mirror of
https://git.busybox.net/uClibc
synced 2025-05-08 23:02:28 +08:00
mass sync with glibc nptl
Signed-off-by: Austin Foxley <austinf@cetoncorp.com>
This commit is contained in:
parent
70f1d42b13
commit
a032a65870
1
.gitignore
vendored
1
.gitignore
vendored
@ -17,6 +17,7 @@ install_dir/
|
||||
.config*
|
||||
.*.dep
|
||||
/*.log
|
||||
cscope.*
|
||||
|
||||
#
|
||||
# Debugging files
|
||||
|
396
include/atomic.h
396
include/atomic.h
@ -1,5 +1,5 @@
|
||||
/* Internal macros for atomic operations for GNU C Library.
|
||||
Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
|
||||
Copyright (C) 2002-2006, 2009 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -21,6 +21,31 @@
|
||||
#ifndef _ATOMIC_H
|
||||
#define _ATOMIC_H 1
|
||||
|
||||
/* This header defines three types of macros:
|
||||
|
||||
- atomic arithmetic and logic operation on memory. They all
|
||||
have the prefix "atomic_".
|
||||
|
||||
- conditionally atomic operations of the same kinds. These
|
||||
always behave identical but can be faster when atomicity
|
||||
is not really needed since only one thread has access to
|
||||
the memory location. In that case the code is slower in
|
||||
the multi-thread case. The interfaces have the prefix
|
||||
"catomic_".
|
||||
|
||||
- support functions like barriers. They also have the preifx
|
||||
"atomic_".
|
||||
|
||||
Architectures must provide a few lowlevel macros (the compare
|
||||
and exchange definitions). All others are optional. They
|
||||
should only be provided if the architecture has specific
|
||||
support for the operation.
|
||||
|
||||
As <atomic.h> macros are usually heavily nested and often use local
|
||||
variables to make sure side-effects are evaluated properly, use for
|
||||
macro local variables a per-macro unique prefix. This file uses
|
||||
__atgN_ prefix where N is different in each macro. */
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <bits/atomic.h>
|
||||
@ -30,33 +55,33 @@
|
||||
and following args. */
|
||||
#define __atomic_val_bysize(pre, post, mem, ...) \
|
||||
({ \
|
||||
__typeof (*mem) __result; \
|
||||
__typeof (*mem) __atg1_result; \
|
||||
if (sizeof (*mem) == 1) \
|
||||
__result = pre##_8_##post (mem, __VA_ARGS__); \
|
||||
__atg1_result = pre##_8_##post (mem, __VA_ARGS__); \
|
||||
else if (sizeof (*mem) == 2) \
|
||||
__result = pre##_16_##post (mem, __VA_ARGS__); \
|
||||
__atg1_result = pre##_16_##post (mem, __VA_ARGS__); \
|
||||
else if (sizeof (*mem) == 4) \
|
||||
__result = pre##_32_##post (mem, __VA_ARGS__); \
|
||||
__atg1_result = pre##_32_##post (mem, __VA_ARGS__); \
|
||||
else if (sizeof (*mem) == 8) \
|
||||
__result = pre##_64_##post (mem, __VA_ARGS__); \
|
||||
__atg1_result = pre##_64_##post (mem, __VA_ARGS__); \
|
||||
else \
|
||||
abort (); \
|
||||
__result; \
|
||||
__atg1_result; \
|
||||
})
|
||||
#define __atomic_bool_bysize(pre, post, mem, ...) \
|
||||
({ \
|
||||
int __result; \
|
||||
int __atg2_result; \
|
||||
if (sizeof (*mem) == 1) \
|
||||
__result = pre##_8_##post (mem, __VA_ARGS__); \
|
||||
__atg2_result = pre##_8_##post (mem, __VA_ARGS__); \
|
||||
else if (sizeof (*mem) == 2) \
|
||||
__result = pre##_16_##post (mem, __VA_ARGS__); \
|
||||
__atg2_result = pre##_16_##post (mem, __VA_ARGS__); \
|
||||
else if (sizeof (*mem) == 4) \
|
||||
__result = pre##_32_##post (mem, __VA_ARGS__); \
|
||||
__atg2_result = pre##_32_##post (mem, __VA_ARGS__); \
|
||||
else if (sizeof (*mem) == 8) \
|
||||
__result = pre##_64_##post (mem, __VA_ARGS__); \
|
||||
__atg2_result = pre##_64_##post (mem, __VA_ARGS__); \
|
||||
else \
|
||||
abort (); \
|
||||
__result; \
|
||||
__atg2_result; \
|
||||
})
|
||||
|
||||
|
||||
@ -70,6 +95,29 @@
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef catomic_compare_and_exchange_val_acq
|
||||
# ifdef __arch_c_compare_and_exchange_val_32_acq
|
||||
# define catomic_compare_and_exchange_val_acq(mem, newval, oldval) \
|
||||
__atomic_val_bysize (__arch_c_compare_and_exchange_val,acq, \
|
||||
mem, newval, oldval)
|
||||
# else
|
||||
# define catomic_compare_and_exchange_val_acq(mem, newval, oldval) \
|
||||
atomic_compare_and_exchange_val_acq (mem, newval, oldval)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef catomic_compare_and_exchange_val_rel
|
||||
# ifndef atomic_compare_and_exchange_val_rel
|
||||
# define catomic_compare_and_exchange_val_rel(mem, newval, oldval) \
|
||||
catomic_compare_and_exchange_val_acq (mem, newval, oldval)
|
||||
# else
|
||||
# define catomic_compare_and_exchange_val_rel(mem, newval, oldval) \
|
||||
atomic_compare_and_exchange_val_rel (mem, newval, oldval)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef atomic_compare_and_exchange_val_rel
|
||||
# define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \
|
||||
atomic_compare_and_exchange_val_acq (mem, newval, oldval)
|
||||
@ -83,17 +131,46 @@
|
||||
# define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
|
||||
__atomic_bool_bysize (__arch_compare_and_exchange_bool,acq, \
|
||||
mem, newval, oldval)
|
||||
# else
|
||||
# define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
|
||||
# else
|
||||
# define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
|
||||
({ /* Cannot use __oldval here, because macros later in this file might \
|
||||
call this macro with __oldval argument. */ \
|
||||
__typeof (oldval) __old = (oldval); \
|
||||
atomic_compare_and_exchange_val_acq (mem, newval, __old) != __old; \
|
||||
__typeof (oldval) __atg3_old = (oldval); \
|
||||
atomic_compare_and_exchange_val_acq (mem, newval, __atg3_old) \
|
||||
!= __atg3_old; \
|
||||
})
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef catomic_compare_and_exchange_bool_acq
|
||||
# ifdef __arch_c_compare_and_exchange_bool_32_acq
|
||||
# define catomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
|
||||
__atomic_bool_bysize (__arch_c_compare_and_exchange_bool,acq, \
|
||||
mem, newval, oldval)
|
||||
# else
|
||||
# define catomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
|
||||
({ /* Cannot use __oldval here, because macros later in this file might \
|
||||
call this macro with __oldval argument. */ \
|
||||
__typeof (oldval) __atg4_old = (oldval); \
|
||||
catomic_compare_and_exchange_val_acq (mem, newval, __atg4_old) \
|
||||
!= __atg4_old; \
|
||||
})
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef catomic_compare_and_exchange_bool_rel
|
||||
# ifndef atomic_compare_and_exchange_bool_rel
|
||||
# define catomic_compare_and_exchange_bool_rel(mem, newval, oldval) \
|
||||
catomic_compare_and_exchange_bool_acq (mem, newval, oldval)
|
||||
# else
|
||||
# define catomic_compare_and_exchange_bool_rel(mem, newval, oldval) \
|
||||
atomic_compare_and_exchange_bool_rel (mem, newval, oldval)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef atomic_compare_and_exchange_bool_rel
|
||||
# define atomic_compare_and_exchange_bool_rel(mem, newval, oldval) \
|
||||
atomic_compare_and_exchange_bool_acq (mem, newval, oldval)
|
||||
@ -103,18 +180,17 @@
|
||||
/* Store NEWVALUE in *MEM and return the old value. */
|
||||
#ifndef atomic_exchange_acq
|
||||
# define atomic_exchange_acq(mem, newvalue) \
|
||||
({ __typeof (*(mem)) __oldval; \
|
||||
__typeof (mem) __memp = (mem); \
|
||||
__typeof (*(mem)) __value = (newvalue); \
|
||||
({ __typeof (*(mem)) __atg5_oldval; \
|
||||
__typeof (mem) __atg5_memp = (mem); \
|
||||
__typeof (*(mem)) __atg5_value = (newvalue); \
|
||||
\
|
||||
do \
|
||||
__oldval = (*__memp); \
|
||||
while (__builtin_expect (atomic_compare_and_exchange_bool_acq (__memp, \
|
||||
__value, \
|
||||
__oldval),\
|
||||
0)); \
|
||||
__atg5_oldval = *__atg5_memp; \
|
||||
while (__builtin_expect \
|
||||
(atomic_compare_and_exchange_bool_acq (__atg5_memp, __atg5_value, \
|
||||
__atg5_oldval), 0)); \
|
||||
\
|
||||
__oldval; })
|
||||
__atg5_oldval; })
|
||||
#endif
|
||||
|
||||
#ifndef atomic_exchange_rel
|
||||
@ -125,19 +201,90 @@
|
||||
/* Add VALUE to *MEM and return the old value of *MEM. */
|
||||
#ifndef atomic_exchange_and_add
|
||||
# define atomic_exchange_and_add(mem, value) \
|
||||
({ __typeof (*(mem)) __oldval; \
|
||||
__typeof (mem) __memp = (mem); \
|
||||
__typeof (*(mem)) __value = (value); \
|
||||
({ __typeof (*(mem)) __atg6_oldval; \
|
||||
__typeof (mem) __atg6_memp = (mem); \
|
||||
__typeof (*(mem)) __atg6_value = (value); \
|
||||
\
|
||||
do \
|
||||
__oldval = (*__memp); \
|
||||
while (__builtin_expect (atomic_compare_and_exchange_bool_acq (__memp, \
|
||||
__oldval \
|
||||
+ __value,\
|
||||
__oldval),\
|
||||
0)); \
|
||||
__atg6_oldval = *__atg6_memp; \
|
||||
while (__builtin_expect \
|
||||
(atomic_compare_and_exchange_bool_acq (__atg6_memp, \
|
||||
__atg6_oldval \
|
||||
+ __atg6_value, \
|
||||
__atg6_oldval), 0)); \
|
||||
\
|
||||
__oldval; })
|
||||
__atg6_oldval; })
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef catomic_exchange_and_add
|
||||
# define catomic_exchange_and_add(mem, value) \
|
||||
({ __typeof (*(mem)) __atg7_oldv; \
|
||||
__typeof (mem) __atg7_memp = (mem); \
|
||||
__typeof (*(mem)) __atg7_value = (value); \
|
||||
\
|
||||
do \
|
||||
__atg7_oldv = *__atg7_memp; \
|
||||
while (__builtin_expect \
|
||||
(catomic_compare_and_exchange_bool_acq (__atg7_memp, \
|
||||
__atg7_oldv \
|
||||
+ __atg7_value, \
|
||||
__atg7_oldv), 0)); \
|
||||
\
|
||||
__atg7_oldv; })
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef atomic_max
|
||||
# define atomic_max(mem, value) \
|
||||
do { \
|
||||
__typeof (*(mem)) __atg8_oldval; \
|
||||
__typeof (mem) __atg8_memp = (mem); \
|
||||
__typeof (*(mem)) __atg8_value = (value); \
|
||||
do { \
|
||||
__atg8_oldval = *__atg8_memp; \
|
||||
if (__atg8_oldval >= __atg8_value) \
|
||||
break; \
|
||||
} while (__builtin_expect \
|
||||
(atomic_compare_and_exchange_bool_acq (__atg8_memp, __atg8_value,\
|
||||
__atg8_oldval), 0)); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef catomic_max
|
||||
# define catomic_max(mem, value) \
|
||||
do { \
|
||||
__typeof (*(mem)) __atg9_oldv; \
|
||||
__typeof (mem) __atg9_memp = (mem); \
|
||||
__typeof (*(mem)) __atg9_value = (value); \
|
||||
do { \
|
||||
__atg9_oldv = *__atg9_memp; \
|
||||
if (__atg9_oldv >= __atg9_value) \
|
||||
break; \
|
||||
} while (__builtin_expect \
|
||||
(catomic_compare_and_exchange_bool_acq (__atg9_memp, \
|
||||
__atg9_value, \
|
||||
__atg9_oldv), 0)); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef atomic_min
|
||||
# define atomic_min(mem, value) \
|
||||
do { \
|
||||
__typeof (*(mem)) __atg10_oldval; \
|
||||
__typeof (mem) __atg10_memp = (mem); \
|
||||
__typeof (*(mem)) __atg10_value = (value); \
|
||||
do { \
|
||||
__atg10_oldval = *__atg10_memp; \
|
||||
if (__atg10_oldval <= __atg10_value) \
|
||||
break; \
|
||||
} while (__builtin_expect \
|
||||
(atomic_compare_and_exchange_bool_acq (__atg10_memp, \
|
||||
__atg10_value, \
|
||||
__atg10_oldval), 0)); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
|
||||
@ -146,16 +293,32 @@
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef catomic_add
|
||||
# define catomic_add(mem, value) \
|
||||
(void) catomic_exchange_and_add ((mem), (value))
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef atomic_increment
|
||||
# define atomic_increment(mem) atomic_add ((mem), 1)
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef catomic_increment
|
||||
# define catomic_increment(mem) catomic_add ((mem), 1)
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef atomic_increment_val
|
||||
# define atomic_increment_val(mem) (atomic_exchange_and_add ((mem), 1) + 1)
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef catomic_increment_val
|
||||
# define catomic_increment_val(mem) (catomic_exchange_and_add ((mem), 1) + 1)
|
||||
#endif
|
||||
|
||||
|
||||
/* Add one to *MEM and return true iff it's now zero. */
|
||||
#ifndef atomic_increment_and_test
|
||||
# define atomic_increment_and_test(mem) \
|
||||
@ -168,11 +331,21 @@
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef catomic_decrement
|
||||
# define catomic_decrement(mem) catomic_add ((mem), -1)
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef atomic_decrement_val
|
||||
# define atomic_decrement_val(mem) (atomic_exchange_and_add ((mem), -1) - 1)
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef catomic_decrement_val
|
||||
# define catomic_decrement_val(mem) (catomic_exchange_and_add ((mem), -1) - 1)
|
||||
#endif
|
||||
|
||||
|
||||
/* Subtract 1 from *MEM and return true iff it's now zero. */
|
||||
#ifndef atomic_decrement_and_test
|
||||
# define atomic_decrement_and_test(mem) \
|
||||
@ -183,35 +356,34 @@
|
||||
/* Decrement *MEM if it is > 0, and return the old value. */
|
||||
#ifndef atomic_decrement_if_positive
|
||||
# define atomic_decrement_if_positive(mem) \
|
||||
({ __typeof (*(mem)) __oldval; \
|
||||
__typeof (mem) __memp = (mem); \
|
||||
({ __typeof (*(mem)) __atg11_oldval; \
|
||||
__typeof (mem) __atg11_memp = (mem); \
|
||||
\
|
||||
do \
|
||||
{ \
|
||||
__oldval = *__memp; \
|
||||
if (__builtin_expect (__oldval <= 0, 0)) \
|
||||
__atg11_oldval = *__atg11_memp; \
|
||||
if (__builtin_expect (__atg11_oldval <= 0, 0)) \
|
||||
break; \
|
||||
} \
|
||||
while (__builtin_expect (atomic_compare_and_exchange_bool_acq (__memp, \
|
||||
__oldval \
|
||||
- 1, \
|
||||
__oldval),\
|
||||
0));\
|
||||
__oldval; })
|
||||
while (__builtin_expect \
|
||||
(atomic_compare_and_exchange_bool_acq (__atg11_memp, \
|
||||
__atg11_oldval - 1, \
|
||||
__atg11_oldval), 0)); \
|
||||
__atg11_oldval; })
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef atomic_add_negative
|
||||
# define atomic_add_negative(mem, value) \
|
||||
({ __typeof (value) __aan_value = (value); \
|
||||
atomic_exchange_and_add (mem, __aan_value) < -__aan_value; })
|
||||
({ __typeof (value) __atg12_value = (value); \
|
||||
atomic_exchange_and_add (mem, __atg12_value) < -__atg12_value; })
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef atomic_add_zero
|
||||
# define atomic_add_zero(mem, value) \
|
||||
({ __typeof (value) __aaz_value = (value); \
|
||||
atomic_exchange_and_add (mem, __aaz_value) == -__aaz_value; })
|
||||
({ __typeof (value) __atg13_value = (value); \
|
||||
atomic_exchange_and_add (mem, __atg13_value) == -__atg13_value; })
|
||||
#endif
|
||||
|
||||
|
||||
@ -223,21 +395,119 @@
|
||||
|
||||
#ifndef atomic_bit_test_set
|
||||
# define atomic_bit_test_set(mem, bit) \
|
||||
({ __typeof (*(mem)) __oldval; \
|
||||
__typeof (mem) __memp = (mem); \
|
||||
__typeof (*(mem)) __mask = ((__typeof (*(mem))) 1 << (bit)); \
|
||||
({ __typeof (*(mem)) __atg14_old; \
|
||||
__typeof (mem) __atg14_memp = (mem); \
|
||||
__typeof (*(mem)) __atg14_mask = ((__typeof (*(mem))) 1 << (bit)); \
|
||||
\
|
||||
do \
|
||||
__oldval = (*__memp); \
|
||||
while (__builtin_expect (atomic_compare_and_exchange_bool_acq (__memp, \
|
||||
__oldval \
|
||||
| __mask, \
|
||||
__oldval),\
|
||||
0)); \
|
||||
__atg14_old = (*__atg14_memp); \
|
||||
while (__builtin_expect \
|
||||
(atomic_compare_and_exchange_bool_acq (__atg14_memp, \
|
||||
__atg14_old | __atg14_mask,\
|
||||
__atg14_old), 0)); \
|
||||
\
|
||||
__oldval & __mask; })
|
||||
__atg14_old & __atg14_mask; })
|
||||
#endif
|
||||
|
||||
/* Atomically *mem &= mask. */
|
||||
#ifndef atomic_and
|
||||
# define atomic_and(mem, mask) \
|
||||
do { \
|
||||
__typeof (*(mem)) __atg15_old; \
|
||||
__typeof (mem) __atg15_memp = (mem); \
|
||||
__typeof (*(mem)) __atg15_mask = (mask); \
|
||||
\
|
||||
do \
|
||||
__atg15_old = (*__atg15_memp); \
|
||||
while (__builtin_expect \
|
||||
(atomic_compare_and_exchange_bool_acq (__atg15_memp, \
|
||||
__atg15_old & __atg15_mask, \
|
||||
__atg15_old), 0)); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#ifndef catomic_and
|
||||
# define catomic_and(mem, mask) \
|
||||
do { \
|
||||
__typeof (*(mem)) __atg20_old; \
|
||||
__typeof (mem) __atg20_memp = (mem); \
|
||||
__typeof (*(mem)) __atg20_mask = (mask); \
|
||||
\
|
||||
do \
|
||||
__atg20_old = (*__atg20_memp); \
|
||||
while (__builtin_expect \
|
||||
(catomic_compare_and_exchange_bool_acq (__atg20_memp, \
|
||||
__atg20_old & __atg20_mask,\
|
||||
__atg20_old), 0)); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
/* Atomically *mem &= mask and return the old value of *mem. */
|
||||
#ifndef atomic_and_val
|
||||
# define atomic_and_val(mem, mask) \
|
||||
({ __typeof (*(mem)) __atg16_old; \
|
||||
__typeof (mem) __atg16_memp = (mem); \
|
||||
__typeof (*(mem)) __atg16_mask = (mask); \
|
||||
\
|
||||
do \
|
||||
__atg16_old = (*__atg16_memp); \
|
||||
while (__builtin_expect \
|
||||
(atomic_compare_and_exchange_bool_acq (__atg16_memp, \
|
||||
__atg16_old & __atg16_mask,\
|
||||
__atg16_old), 0)); \
|
||||
\
|
||||
__atg16_old; })
|
||||
#endif
|
||||
|
||||
/* Atomically *mem |= mask and return the old value of *mem. */
|
||||
#ifndef atomic_or
|
||||
# define atomic_or(mem, mask) \
|
||||
do { \
|
||||
__typeof (*(mem)) __atg17_old; \
|
||||
__typeof (mem) __atg17_memp = (mem); \
|
||||
__typeof (*(mem)) __atg17_mask = (mask); \
|
||||
\
|
||||
do \
|
||||
__atg17_old = (*__atg17_memp); \
|
||||
while (__builtin_expect \
|
||||
(atomic_compare_and_exchange_bool_acq (__atg17_memp, \
|
||||
__atg17_old | __atg17_mask, \
|
||||
__atg17_old), 0)); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#ifndef catomic_or
|
||||
# define catomic_or(mem, mask) \
|
||||
do { \
|
||||
__typeof (*(mem)) __atg18_old; \
|
||||
__typeof (mem) __atg18_memp = (mem); \
|
||||
__typeof (*(mem)) __atg18_mask = (mask); \
|
||||
\
|
||||
do \
|
||||
__atg18_old = (*__atg18_memp); \
|
||||
while (__builtin_expect \
|
||||
(catomic_compare_and_exchange_bool_acq (__atg18_memp, \
|
||||
__atg18_old | __atg18_mask,\
|
||||
__atg18_old), 0)); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
/* Atomically *mem |= mask and return the old value of *mem. */
|
||||
#ifndef atomic_or_val
|
||||
# define atomic_or_val(mem, mask) \
|
||||
({ __typeof (*(mem)) __atg19_old; \
|
||||
__typeof (mem) __atg19_memp = (mem); \
|
||||
__typeof (*(mem)) __atg19_mask = (mask); \
|
||||
\
|
||||
do \
|
||||
__atg19_old = (*__atg19_memp); \
|
||||
while (__builtin_expect \
|
||||
(atomic_compare_and_exchange_bool_acq (__atg19_memp, \
|
||||
__atg19_old | __atg19_mask,\
|
||||
__atg19_old), 0)); \
|
||||
\
|
||||
__atg19_old; })
|
||||
#endif
|
||||
|
||||
#ifndef atomic_full_barrier
|
||||
# define atomic_full_barrier() __asm__ ("" ::: "memory")
|
||||
@ -254,6 +524,12 @@
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef atomic_forced_read
|
||||
# define atomic_forced_read(x) \
|
||||
({ __typeof (x) __x; __asm__ ("" : "=r" (__x) : "0" (x)); __x; })
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef atomic_delay
|
||||
# define atomic_delay() do { /* nothing */ } while (0)
|
||||
#endif
|
||||
|
3
libpthread/nptl/.gitignore
vendored
3
libpthread/nptl/.gitignore
vendored
@ -11,7 +11,10 @@ tcb-offsets.[hcs]
|
||||
lowlevelbarrier.[hcs]
|
||||
lowlevelcond.[hcs]
|
||||
lowlevelrwlock.[hcs]
|
||||
lowlevelrobustlock.[hcs]
|
||||
unwindbuf.[hcs]
|
||||
structsem.[hcs]
|
||||
pthread-pi-defines.[hcs]
|
||||
sysdeps/pthread/pt-sigaction.c
|
||||
sysdeps/pthread/pt-sigfillset.c
|
||||
sysdeps/pthread/pt-sigprocmask.c
|
||||
|
File diff suppressed because it is too large
Load Diff
44
libpthread/nptl/DESIGN-barrier.txt
Normal file
44
libpthread/nptl/DESIGN-barrier.txt
Normal file
@ -0,0 +1,44 @@
|
||||
Barriers pseudocode
|
||||
===================
|
||||
|
||||
int pthread_barrier_wait(barrier_t *barrier);
|
||||
|
||||
struct barrier_t {
|
||||
|
||||
unsigned int lock:
|
||||
- internal mutex
|
||||
|
||||
unsigned int left;
|
||||
- current barrier count, # of threads still needed.
|
||||
|
||||
unsigned int init_count;
|
||||
- number of threads needed for the barrier to continue.
|
||||
|
||||
unsigned int curr_event;
|
||||
- generation count
|
||||
}
|
||||
|
||||
pthread_barrier_wait(barrier_t *barrier)
|
||||
{
|
||||
unsigned int event;
|
||||
result = 0;
|
||||
|
||||
lll_lock(barrier->lock);
|
||||
if (!--barrier->left) {
|
||||
barrier->curr_event++;
|
||||
futex_wake(&barrier->curr_event, INT_MAX)
|
||||
|
||||
result = BARRIER_SERIAL_THREAD;
|
||||
} else {
|
||||
event = barrier->curr_event;
|
||||
lll_unlock(barrier->lock);
|
||||
do {
|
||||
futex_wait(&barrier->curr_event, event)
|
||||
} while (event == barrier->curr_event);
|
||||
}
|
||||
|
||||
if (atomic_increment_val (barrier->left) == barrier->init_count)
|
||||
lll_unlock(barrier->lock);
|
||||
|
||||
return result;
|
||||
}
|
134
libpthread/nptl/DESIGN-condvar.txt
Normal file
134
libpthread/nptl/DESIGN-condvar.txt
Normal file
@ -0,0 +1,134 @@
|
||||
Conditional Variable pseudocode.
|
||||
================================
|
||||
|
||||
int pthread_cond_timedwait (pthread_cond_t *cv, pthread_mutex_t *mutex);
|
||||
int pthread_cond_signal (pthread_cond_t *cv);
|
||||
int pthread_cond_broadcast (pthread_cond_t *cv);
|
||||
|
||||
struct pthread_cond_t {
|
||||
|
||||
unsigned int cond_lock;
|
||||
|
||||
internal mutex
|
||||
|
||||
uint64_t total_seq;
|
||||
|
||||
Total number of threads using the conditional variable.
|
||||
|
||||
uint64_t wakeup_seq;
|
||||
|
||||
sequence number for next wakeup.
|
||||
|
||||
uint64_t woken_seq;
|
||||
|
||||
sequence number of last woken thread.
|
||||
|
||||
uint32_t broadcast_seq;
|
||||
|
||||
}
|
||||
|
||||
|
||||
struct cv_data {
|
||||
|
||||
pthread_cond_t *cv;
|
||||
|
||||
uint32_t bc_seq
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
cleanup_handler(cv_data)
|
||||
{
|
||||
cv = cv_data->cv;
|
||||
lll_lock(cv->lock);
|
||||
|
||||
if (cv_data->bc_seq == cv->broadcast_seq) {
|
||||
++cv->wakeup_seq;
|
||||
++cv->woken_seq;
|
||||
}
|
||||
|
||||
/* make sure no signal gets lost. */
|
||||
FUTEX_WAKE(cv->wakeup_seq, ALL);
|
||||
|
||||
lll_unlock(cv->lock);
|
||||
}
|
||||
|
||||
|
||||
cond_timedwait(cv, mutex, timeout):
|
||||
{
|
||||
lll_lock(cv->lock);
|
||||
mutex_unlock(mutex);
|
||||
|
||||
cleanup_push
|
||||
|
||||
++cv->total_seq;
|
||||
val = seq = cv->wakeup_seq;
|
||||
cv_data.bc = cv->broadcast_seq;
|
||||
cv_data.cv = cv;
|
||||
|
||||
while (1) {
|
||||
|
||||
lll_unlock(cv->lock);
|
||||
|
||||
enable_async(&cv_data);
|
||||
|
||||
ret = FUTEX_WAIT(cv->wakeup_seq, val, timeout);
|
||||
|
||||
restore_async
|
||||
|
||||
lll_lock(cv->lock);
|
||||
|
||||
if (bc != cv->broadcast_seq)
|
||||
goto bc_out;
|
||||
|
||||
val = cv->wakeup_seq;
|
||||
|
||||
if (val != seq && cv->woken_seq != val) {
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret == TIMEDOUT) {
|
||||
++cv->wakeup_seq;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
++cv->woken_seq;
|
||||
|
||||
bc_out:
|
||||
lll_unlock(cv->lock);
|
||||
|
||||
cleanup_pop
|
||||
|
||||
mutex_lock(mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
cond_signal(cv)
|
||||
{
|
||||
lll_lock(cv->lock);
|
||||
|
||||
if (cv->total_seq > cv->wakeup_seq) {
|
||||
++cv->wakeup_seq;
|
||||
FUTEX_WAKE(cv->wakeup_seq, 1);
|
||||
}
|
||||
|
||||
lll_unlock(cv->lock);
|
||||
}
|
||||
|
||||
cond_broadcast(cv)
|
||||
{
|
||||
lll_lock(cv->lock);
|
||||
|
||||
if (cv->total_seq > cv->wakeup_seq) {
|
||||
cv->wakeup_seq = cv->total_seq;
|
||||
cv->woken_seq = cv->total_seq;
|
||||
++cv->broadcast_seq;
|
||||
FUTEX_WAKE(cv->wakeup_seq, ALL);
|
||||
}
|
||||
|
||||
lll_unlock(cv->lock);
|
||||
}
|
113
libpthread/nptl/DESIGN-rwlock.txt
Normal file
113
libpthread/nptl/DESIGN-rwlock.txt
Normal file
@ -0,0 +1,113 @@
|
||||
Reader Writer Locks pseudocode
|
||||
==============================
|
||||
|
||||
pthread_rwlock_rdlock(pthread_rwlock_t *rwlock);
|
||||
pthread_rwlock_unlock(pthread_rwlock_t *rwlock);
|
||||
pthread_rwlock_wrlock(pthread_rwlock_t *rwlock);
|
||||
|
||||
struct pthread_rwlock_t {
|
||||
|
||||
unsigned int lock:
|
||||
- internal mutex
|
||||
|
||||
unsigned int writers_preferred;
|
||||
- locking mode: 0 recursive, readers preferred
|
||||
1 nonrecursive, writers preferred
|
||||
|
||||
unsigned int readers;
|
||||
- number of read-only references various threads have
|
||||
|
||||
pthread_t writer;
|
||||
- descriptor of the writer or 0
|
||||
|
||||
unsigned int readers_wakeup;
|
||||
- 'all readers should wake up' futex.
|
||||
|
||||
unsigned int writer_wakeup;
|
||||
- 'one writer should wake up' futex.
|
||||
|
||||
unsigned int nr_readers_queued;
|
||||
- number of readers queued up.
|
||||
|
||||
unsigned int nr_writers_queued;
|
||||
- number of writers queued up.
|
||||
}
|
||||
|
||||
pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
|
||||
{
|
||||
lll_lock(rwlock->lock);
|
||||
for (;;) {
|
||||
if (!rwlock->writer && (!rwlock->nr_writers_queued ||
|
||||
!rwlock->writers_preferred))
|
||||
break;
|
||||
|
||||
rwlock->nr_readers_queued++;
|
||||
val = rwlock->readers_wakeup;
|
||||
lll_unlock(rwlock->lock);
|
||||
|
||||
futex_wait(&rwlock->readers_wakeup, val)
|
||||
|
||||
lll_lock(rwlock->lock);
|
||||
rwlock->nr_readers_queued--;
|
||||
}
|
||||
rwlock->readers++;
|
||||
lll_unlock(rwlock->lock);
|
||||
}
|
||||
|
||||
pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
|
||||
{
|
||||
int result = EBUSY;
|
||||
lll_lock(rwlock->lock);
|
||||
if (!rwlock->writer && (!rwlock->nr_writers_queued ||
|
||||
!rwlock->writers_preferred))
|
||||
rwlock->readers++;
|
||||
lll_unlock(rwlock->lock);
|
||||
return result;
|
||||
}
|
||||
|
||||
pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
|
||||
{
|
||||
lll_lock(rwlock->lock);
|
||||
for (;;) {
|
||||
if (!rwlock->writer && !rwlock->readers)
|
||||
break;
|
||||
|
||||
rwlock->nr_writers_queued++;
|
||||
val = rwlock->writer_wakeup;
|
||||
lll_unlock(rwlock->lock);
|
||||
|
||||
futex_wait(&rwlock->writer_wakeup, val);
|
||||
|
||||
lll_lock(rwlock->lock);
|
||||
rwlock->nr_writers_queued--;
|
||||
}
|
||||
rwlock->writer = pthread_self();
|
||||
lll_unlock(rwlock->lock);
|
||||
}
|
||||
|
||||
pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
|
||||
{
|
||||
lll_lock(rwlock->lock);
|
||||
|
||||
if (rwlock->writer)
|
||||
rwlock->writer = 0;
|
||||
else
|
||||
rwlock->readers--;
|
||||
|
||||
if (!rwlock->readers) {
|
||||
if (rwlock->nr_writers_queued) {
|
||||
++rwlock->writer_wakeup;
|
||||
lll_unlock(rwlock->lock);
|
||||
futex_wake(&rwlock->writer_wakeup, 1);
|
||||
return;
|
||||
} else
|
||||
if (rwlock->nr_readers_queued) {
|
||||
++rwlock->readers_wakeup;
|
||||
lll_unlock(rwlock->lock);
|
||||
futex_wake(&rwlock->readers_wakeup, MAX_INT);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
lll_unlock(rwlock->lock);
|
||||
}
|
46
libpthread/nptl/DESIGN-sem.txt
Normal file
46
libpthread/nptl/DESIGN-sem.txt
Normal file
@ -0,0 +1,46 @@
|
||||
Semaphores pseudocode
|
||||
==============================
|
||||
|
||||
int sem_wait(sem_t * sem);
|
||||
int sem_trywait(sem_t * sem);
|
||||
int sem_post(sem_t * sem);
|
||||
int sem_getvalue(sem_t * sem, int * sval);
|
||||
|
||||
struct sem_t {
|
||||
|
||||
unsigned int count;
|
||||
- current semaphore count, also used as a futex
|
||||
}
|
||||
|
||||
sem_wait(sem_t *sem)
|
||||
{
|
||||
for (;;) {
|
||||
|
||||
if (atomic_decrement_if_positive(sem->count))
|
||||
break;
|
||||
|
||||
futex_wait(&sem->count, 0)
|
||||
}
|
||||
}
|
||||
|
||||
sem_post(sem_t *sem)
|
||||
{
|
||||
n = atomic_increment(sem->count);
|
||||
// Pass the new value of sem->count
|
||||
futex_wake(&sem->count, n + 1);
|
||||
}
|
||||
|
||||
sem_trywait(sem_t *sem)
|
||||
{
|
||||
if (atomic_decrement_if_positive(sem->count)) {
|
||||
return 0;
|
||||
} else {
|
||||
return EAGAIN;
|
||||
}
|
||||
}
|
||||
|
||||
sem_getvalue(sem_t *sem, int *sval)
|
||||
{
|
||||
*sval = sem->count;
|
||||
read_barrier();
|
||||
}
|
@ -31,9 +31,16 @@ libpthread-routines = init vars events version \
|
||||
pthread_mutex_init pthread_mutex_destroy \
|
||||
pthread_mutex_lock pthread_mutex_trylock \
|
||||
pthread_mutex_timedlock pthread_mutex_unlock \
|
||||
pthread_mutex_consistent \
|
||||
pthread_mutexattr_init pthread_mutexattr_destroy \
|
||||
pthread_mutexattr_getpshared \
|
||||
pthread_mutexattr_setpshared \
|
||||
pthread_mutexattr_getrobust \
|
||||
pthread_mutexattr_setrobust \
|
||||
pthread_mutexattr_getprotocol \
|
||||
pthread_mutexattr_setprotocol \
|
||||
pthread_mutexattr_getprioceiling \
|
||||
pthread_mutexattr_setprioceiling \
|
||||
pthread_mutexattr_gettype pthread_mutexattr_settype \
|
||||
pthread_rwlock_init pthread_rwlock_destroy \
|
||||
pthread_rwlock_rdlock pthread_rwlock_timedrdlock \
|
||||
|
31
libpthread/nptl/TODO
Normal file
31
libpthread/nptl/TODO
Normal file
@ -0,0 +1,31 @@
|
||||
- we should probably extend pthread_mutexattr_t with a field to create a
|
||||
single linked list of all instances. This requires changing the
|
||||
pthread_mutexattr_* functions.
|
||||
|
||||
|
||||
- a new attribute for mutexes: number of times we spin before calling
|
||||
sys_futex
|
||||
|
||||
- for adaptive mutexes: when releasing, determine whether somebody spins.
|
||||
If yes, for a short time release lock. If someone else locks no wakeup
|
||||
syscall needed.
|
||||
|
||||
|
||||
|
||||
- test with threaded process terminating and semadj (?) being applied
|
||||
only after all threads are gone
|
||||
|
||||
|
||||
|
||||
- semaphore changes:
|
||||
|
||||
- sem_post should only wake one thread and only when the state of
|
||||
the semaphore changed from 0 to 1
|
||||
|
||||
this also requires that sem_wait and sem_timedwait don't drop the
|
||||
post if they get canceled.
|
||||
|
||||
- possibly add counter field. This requires reviving the
|
||||
differences between old and new semaphose funtions. The old ones
|
||||
stay as they are now. The new once can use an additional field
|
||||
wich is the counter for the number of waiters
|
20
libpthread/nptl/TODO-kernel
Normal file
20
libpthread/nptl/TODO-kernel
Normal file
@ -0,0 +1,20 @@
|
||||
- setuid/setgid must effect process
|
||||
+ test syscalls (getuid) afterwards
|
||||
+ test core file content
|
||||
|
||||
+ use UID/GID in access(2), chmod(2), chown(2), link(2)
|
||||
|
||||
- nice level is process property
|
||||
|
||||
- rlimit should be process-wide and SIGXCPU should be sent if all threads
|
||||
together exceed the limit
|
||||
|
||||
- getrusage() must return resource utilization for the process
|
||||
|
||||
|
||||
|
||||
The following are possible optimizations and in no way required:
|
||||
|
||||
|
||||
- the scheduler should be thread group-aware, i.e., it has to give time to
|
||||
the thread group not proportional to the number of threads.
|
20
libpthread/nptl/TODO-testing
Normal file
20
libpthread/nptl/TODO-testing
Normal file
@ -0,0 +1,20 @@
|
||||
pthread_attr_setguardsize
|
||||
|
||||
test effectiveness
|
||||
|
||||
pthread_attr_[sg]etschedparam
|
||||
|
||||
what to test?
|
||||
|
||||
pthread_attr_[sg]etstack
|
||||
|
||||
some more tests needed
|
||||
|
||||
pthread_getcpuclockid
|
||||
|
||||
check that value is reset -> rt subdir
|
||||
|
||||
pthread_getschedparam
|
||||
pthread_setschedparam
|
||||
|
||||
what to test?
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002-2007, 2009 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -28,13 +28,13 @@
|
||||
#include <tls.h>
|
||||
#include <lowlevellock.h>
|
||||
#include <link.h>
|
||||
#include <bits/kernel-features.h>
|
||||
|
||||
#define __getpagesize getpagesize
|
||||
|
||||
#ifndef NEED_SEPARATE_REGISTER_STACK
|
||||
|
||||
/* Most architectures have exactly one stack pointer. Some have more. */
|
||||
# define STACK_VARIABLES void *stackaddr = 0
|
||||
# define STACK_VARIABLES void *stackaddr = NULL
|
||||
|
||||
/* How to pass the values to the 'create_thread' function. */
|
||||
# define STACK_VARIABLES_ARGS stackaddr
|
||||
@ -53,7 +53,7 @@
|
||||
|
||||
/* We need two stacks. The kernel will place them but we have to tell
|
||||
the kernel about the size of the reserved address space. */
|
||||
# define STACK_VARIABLES void *stackaddr = 0; size_t stacksize
|
||||
# define STACK_VARIABLES void *stackaddr = NULL; size_t stacksize = 0
|
||||
|
||||
/* How to pass the values to the 'create_thread' function. */
|
||||
# define STACK_VARIABLES_ARGS stackaddr, stacksize
|
||||
@ -84,10 +84,10 @@
|
||||
#endif
|
||||
|
||||
|
||||
/* Let the architecture add some flags to the mmap() call used to
|
||||
allocate stacks. */
|
||||
#ifndef ARCH_MAP_FLAGS
|
||||
# define ARCH_MAP_FLAGS 0
|
||||
/* Newer kernels have the MAP_STACK flag to indicate a mapping is used for
|
||||
a stack. Use it when possible. */
|
||||
#ifndef MAP_STACK
|
||||
# define MAP_STACK 0
|
||||
#endif
|
||||
|
||||
/* This yields the pointer that TLS support code calls the thread pointer. */
|
||||
@ -104,7 +104,7 @@ static size_t stack_cache_maxsize = 40 * 1024 * 1024; /* 40MiBi by default. */
|
||||
static size_t stack_cache_actsize;
|
||||
|
||||
/* Mutex protecting this variable. */
|
||||
static lll_lock_t stack_cache_lock = LLL_LOCK_INITIALIZER;
|
||||
static int stack_cache_lock = LLL_LOCK_INITIALIZER;
|
||||
|
||||
/* List of queued stack frames. */
|
||||
static LIST_HEAD (stack_cache);
|
||||
@ -112,10 +112,15 @@ static LIST_HEAD (stack_cache);
|
||||
/* List of the stacks in use. */
|
||||
static LIST_HEAD (stack_used);
|
||||
|
||||
/* We need to record what list operations we are going to do so that,
|
||||
in case of an asynchronous interruption due to a fork() call, we
|
||||
can correct for the work. */
|
||||
static uintptr_t in_flight_stack;
|
||||
|
||||
/* List of the threads with user provided stacks in use. No need to
|
||||
initialize this, since it's done in __pthread_initialize_minimal. */
|
||||
list_t __stack_user __attribute__ ((nocommon));
|
||||
hidden_def (__stack_user)
|
||||
hidden_data_def (__stack_user)
|
||||
|
||||
#if COLORING_INCREMENT != 0
|
||||
/* Number of threads created. */
|
||||
@ -127,6 +132,36 @@ static unsigned int nptl_ncreated;
|
||||
#define FREE_P(descr) ((descr)->tid <= 0)
|
||||
|
||||
|
||||
static void
|
||||
stack_list_del (list_t *elem)
|
||||
{
|
||||
in_flight_stack = (uintptr_t) elem;
|
||||
|
||||
atomic_write_barrier ();
|
||||
|
||||
list_del (elem);
|
||||
|
||||
atomic_write_barrier ();
|
||||
|
||||
in_flight_stack = 0;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
stack_list_add (list_t *elem, list_t *list)
|
||||
{
|
||||
in_flight_stack = (uintptr_t) elem | 1;
|
||||
|
||||
atomic_write_barrier ();
|
||||
|
||||
list_add (elem, list);
|
||||
|
||||
atomic_write_barrier ();
|
||||
|
||||
in_flight_stack = 0;
|
||||
}
|
||||
|
||||
|
||||
/* We create a double linked list of all cache entries. Double linked
|
||||
because this allows removing entries from the end. */
|
||||
|
||||
@ -140,7 +175,7 @@ get_cached_stack (size_t *sizep, void **memp)
|
||||
struct pthread *result = NULL;
|
||||
list_t *entry;
|
||||
|
||||
lll_lock (stack_cache_lock);
|
||||
lll_lock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
/* Search the cache for a matching entry. We search for the
|
||||
smallest stack which has at least the required size. Note that
|
||||
@ -173,22 +208,22 @@ get_cached_stack (size_t *sizep, void **memp)
|
||||
|| __builtin_expect (result->stackblock_size > 4 * size, 0))
|
||||
{
|
||||
/* Release the lock. */
|
||||
lll_unlock (stack_cache_lock);
|
||||
lll_unlock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Dequeue the entry. */
|
||||
list_del (&result->list);
|
||||
stack_list_del (&result->list);
|
||||
|
||||
/* And add to the list of stacks in use. */
|
||||
list_add (&result->list, &stack_used);
|
||||
stack_list_add (&result->list, &stack_used);
|
||||
|
||||
/* And decrease the cache size. */
|
||||
stack_cache_actsize -= result->stackblock_size;
|
||||
|
||||
/* Release the lock early. */
|
||||
lll_unlock (stack_cache_lock);
|
||||
lll_unlock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
/* Report size and location of the stack to the caller. */
|
||||
*sizep = result->stackblock_size;
|
||||
@ -212,6 +247,45 @@ get_cached_stack (size_t *sizep, void **memp)
|
||||
}
|
||||
|
||||
|
||||
/* Free stacks until cache size is lower than LIMIT. */
|
||||
void
|
||||
__free_stacks (size_t limit)
|
||||
{
|
||||
/* We reduce the size of the cache. Remove the last entries until
|
||||
the size is below the limit. */
|
||||
list_t *entry;
|
||||
list_t *prev;
|
||||
|
||||
/* Search from the end of the list. */
|
||||
list_for_each_prev_safe (entry, prev, &stack_cache)
|
||||
{
|
||||
struct pthread *curr;
|
||||
|
||||
curr = list_entry (entry, struct pthread, list);
|
||||
if (FREE_P (curr))
|
||||
{
|
||||
/* Unlink the block. */
|
||||
stack_list_del (entry);
|
||||
|
||||
/* Account for the freed memory. */
|
||||
stack_cache_actsize -= curr->stackblock_size;
|
||||
|
||||
/* Free the memory associated with the ELF TLS. */
|
||||
_dl_deallocate_tls (TLS_TPADJ (curr), false);
|
||||
|
||||
/* Remove this block. This should never fail. If it does
|
||||
something is really wrong. */
|
||||
if (munmap (curr->stackblock, curr->stackblock_size) != 0)
|
||||
abort ();
|
||||
|
||||
/* Maybe we have freed enough. */
|
||||
if (stack_cache_actsize <= limit)
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Add a stack frame which is not used anymore to the stack. Must be
|
||||
called with the cache lock held. */
|
||||
static inline void
|
||||
@ -221,44 +295,11 @@ queue_stack (struct pthread *stack)
|
||||
/* We unconditionally add the stack to the list. The memory may
|
||||
still be in use but it will not be reused until the kernel marks
|
||||
the stack as not used anymore. */
|
||||
list_add (&stack->list, &stack_cache);
|
||||
stack_list_add (&stack->list, &stack_cache);
|
||||
|
||||
stack_cache_actsize += stack->stackblock_size;
|
||||
if (__builtin_expect (stack_cache_actsize > stack_cache_maxsize, 0))
|
||||
{
|
||||
/* We reduce the size of the cache. Remove the last entries
|
||||
until the size is below the limit. */
|
||||
list_t *entry;
|
||||
list_t *prev;
|
||||
|
||||
/* Search from the end of the list. */
|
||||
list_for_each_prev_safe (entry, prev, &stack_cache)
|
||||
{
|
||||
struct pthread *curr;
|
||||
|
||||
curr = list_entry (entry, struct pthread, list);
|
||||
if (FREE_P (curr))
|
||||
{
|
||||
/* Unlink the block. */
|
||||
list_del (entry);
|
||||
|
||||
/* Account for the freed memory. */
|
||||
stack_cache_actsize -= curr->stackblock_size;
|
||||
|
||||
/* Free the memory associated with the ELF TLS. */
|
||||
_dl_deallocate_tls (TLS_TPADJ (curr), false);
|
||||
|
||||
/* Remove this block. This should never fail. If it
|
||||
does something is really wrong. */
|
||||
if (munmap (curr->stackblock, curr->stackblock_size) != 0)
|
||||
abort ();
|
||||
|
||||
/* Maybe we have freed enough. */
|
||||
if (stack_cache_actsize <= stack_cache_maxsize)
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
__free_stacks (stack_cache_maxsize);
|
||||
}
|
||||
|
||||
|
||||
@ -275,9 +316,14 @@ change_stack_perm (struct pthread *pd
|
||||
+ (((((pd->stackblock_size - pd->guardsize) / 2)
|
||||
& pagemask) + pd->guardsize) & pagemask));
|
||||
size_t len = pd->stackblock + pd->stackblock_size - stack;
|
||||
#else
|
||||
#elif _STACK_GROWS_DOWN
|
||||
void *stack = pd->stackblock + pd->guardsize;
|
||||
size_t len = pd->stackblock_size - pd->guardsize;
|
||||
#elif _STACK_GROWS_UP
|
||||
void *stack = pd->stackblock;
|
||||
size_t len = (uintptr_t) pd - pd->guardsize - (uintptr_t) pd->stackblock;
|
||||
#else
|
||||
# error "Define either _STACK_GROWS_DOWN or _STACK_GROWS_UP"
|
||||
#endif
|
||||
if (mprotect (stack, len, PROT_READ | PROT_WRITE | PROT_EXEC) != 0)
|
||||
return errno;
|
||||
@ -358,6 +404,12 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
|
||||
__pthread_multiple_threads = *__libc_multiple_threads_ptr = 1;
|
||||
#endif
|
||||
|
||||
#ifndef __ASSUME_PRIVATE_FUTEX
|
||||
/* The thread must know when private futexes are supported. */
|
||||
pd->header.private_futex = THREAD_GETMEM (THREAD_SELF,
|
||||
header.private_futex);
|
||||
#endif
|
||||
|
||||
#ifdef NEED_DL_SYSINFO
|
||||
/* Copy the sysinfo value from the parent. */
|
||||
THREAD_SYSINFO(pd) = THREAD_SELF_SYSINFO;
|
||||
@ -376,12 +428,12 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
|
||||
|
||||
|
||||
/* Prepare to modify global data. */
|
||||
lll_lock (stack_cache_lock);
|
||||
lll_lock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
/* And add to the list of stacks in use. */
|
||||
list_add (&pd->list, &__stack_user);
|
||||
|
||||
lll_unlock (stack_cache_lock);
|
||||
lll_unlock (stack_cache_lock, LLL_PRIVATE);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -406,8 +458,9 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
|
||||
/* Make sure the size of the stack is enough for the guard and
|
||||
eventually the thread descriptor. */
|
||||
guardsize = (attr->guardsize + pagesize_m1) & ~pagesize_m1;
|
||||
if (__builtin_expect (size < (guardsize + __static_tls_size
|
||||
+ MINIMAL_REST_STACK + pagesize_m1 + 1),
|
||||
if (__builtin_expect (size < ((guardsize + __static_tls_size
|
||||
+ MINIMAL_REST_STACK + pagesize_m1)
|
||||
& ~pagesize_m1),
|
||||
0))
|
||||
/* The stack is too small (or the guard too large). */
|
||||
return EINVAL;
|
||||
@ -427,15 +480,14 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
|
||||
#endif
|
||||
|
||||
mem = mmap (NULL, size, prot,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS | ARCH_MAP_FLAGS, -1, 0);
|
||||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
|
||||
|
||||
if (__builtin_expect (mem == MAP_FAILED, 0))
|
||||
{
|
||||
#ifdef ARCH_RETRY_MMAP
|
||||
mem = ARCH_RETRY_MMAP (size);
|
||||
if (__builtin_expect (mem == MAP_FAILED, 0))
|
||||
#endif
|
||||
return errno;
|
||||
if (errno == ENOMEM)
|
||||
__set_errno (EAGAIN);
|
||||
|
||||
return errno;
|
||||
}
|
||||
|
||||
/* SIZE is guaranteed to be greater than zero.
|
||||
@ -490,6 +542,12 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
|
||||
__pthread_multiple_threads = *__libc_multiple_threads_ptr = 1;
|
||||
#endif
|
||||
|
||||
#ifndef __ASSUME_PRIVATE_FUTEX
|
||||
/* The thread must know when private futexes are supported. */
|
||||
pd->header.private_futex = THREAD_GETMEM (THREAD_SELF,
|
||||
header.private_futex);
|
||||
#endif
|
||||
|
||||
#ifdef NEED_DL_SYSINFO
|
||||
/* Copy the sysinfo value from the parent. */
|
||||
THREAD_SYSINFO(pd) = THREAD_SELF_SYSINFO;
|
||||
@ -512,12 +570,12 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
|
||||
|
||||
|
||||
/* Prepare to modify global data. */
|
||||
lll_lock (stack_cache_lock);
|
||||
lll_lock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
/* And add to the list of stacks in use. */
|
||||
list_add (&pd->list, &stack_used);
|
||||
stack_list_add (&pd->list, &stack_used);
|
||||
|
||||
lll_unlock (stack_cache_lock);
|
||||
lll_unlock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
|
||||
/* Note that all of the stack and the thread descriptor is
|
||||
@ -533,8 +591,10 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
|
||||
{
|
||||
#ifdef NEED_SEPARATE_REGISTER_STACK
|
||||
char *guard = mem + (((size - guardsize) / 2) & ~pagesize_m1);
|
||||
#else
|
||||
#elif _STACK_GROWS_DOWN
|
||||
char *guard = mem;
|
||||
# elif _STACK_GROWS_UP
|
||||
char *guard = (char *) (((uintptr_t) pd - guardsize) & ~pagesize_m1);
|
||||
#endif
|
||||
if (mprotect (guard, guardsize, PROT_NONE) != 0)
|
||||
{
|
||||
@ -542,12 +602,12 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
|
||||
mprot_error:
|
||||
err = errno;
|
||||
|
||||
lll_lock (stack_cache_lock);
|
||||
lll_lock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
/* Remove the thread from the list. */
|
||||
list_del (&pd->list);
|
||||
stack_list_del (&pd->list);
|
||||
|
||||
lll_unlock (stack_cache_lock);
|
||||
lll_unlock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
/* Get rid of the TLS block we allocated. */
|
||||
_dl_deallocate_tls (TLS_TPADJ (pd), false);
|
||||
@ -581,10 +641,14 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
|
||||
oldguard + pd->guardsize - guard - guardsize,
|
||||
prot) != 0)
|
||||
goto mprot_error;
|
||||
#else
|
||||
#elif _STACK_GROWS_DOWN
|
||||
if (mprotect ((char *) mem + guardsize, pd->guardsize - guardsize,
|
||||
prot) != 0)
|
||||
goto mprot_error;
|
||||
#elif _STACK_GROWS_UP
|
||||
if (mprotect ((char *) pd - pd->guardsize,
|
||||
pd->guardsize - guardsize, prot) != 0)
|
||||
goto mprot_error;
|
||||
#endif
|
||||
|
||||
pd->guardsize = guardsize;
|
||||
@ -599,6 +663,18 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
|
||||
stillborn thread could be canceled while the lock is taken. */
|
||||
pd->lock = LLL_LOCK_INITIALIZER;
|
||||
|
||||
/* The robust mutex lists also need to be initialized
|
||||
unconditionally because the cleanup for the previous stack owner
|
||||
might have happened in the kernel. */
|
||||
pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
|
||||
- offsetof (pthread_mutex_t,
|
||||
__data.__list.__next));
|
||||
pd->robust_head.list_op_pending = NULL;
|
||||
#ifdef __PTHREAD_MUTEX_HAVE_PREV
|
||||
pd->robust_prev = &pd->robust_head;
|
||||
#endif
|
||||
pd->robust_head.list = &pd->robust_head;
|
||||
|
||||
/* We place the thread descriptor at the end of the stack. */
|
||||
*pdp = pd;
|
||||
|
||||
@ -612,8 +688,11 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
|
||||
#ifdef NEED_SEPARATE_REGISTER_STACK
|
||||
*stack = pd->stackblock;
|
||||
*stacksize = stacktop - *stack;
|
||||
#else
|
||||
#elif _STACK_GROWS_DOWN
|
||||
*stack = stacktop;
|
||||
#elif _STACK_GROWS_UP
|
||||
*stack = pd->stackblock;
|
||||
assert (*stack > 0);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
@ -624,11 +703,11 @@ void
|
||||
internal_function
|
||||
__deallocate_stack (struct pthread *pd)
|
||||
{
|
||||
lll_lock (stack_cache_lock);
|
||||
lll_lock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
/* Remove the thread from the list of threads with user defined
|
||||
stacks. */
|
||||
list_del (&pd->list);
|
||||
stack_list_del (&pd->list);
|
||||
|
||||
/* Not much to do. Just free the mmap()ed memory. Note that we do
|
||||
not reset the 'used' flag in the 'tid' field. This is done by
|
||||
@ -640,7 +719,7 @@ __deallocate_stack (struct pthread *pd)
|
||||
/* Free the memory associated with the ELF TLS. */
|
||||
_dl_deallocate_tls (TLS_TPADJ (pd), false);
|
||||
|
||||
lll_unlock (stack_cache_lock);
|
||||
lll_unlock (stack_cache_lock, LLL_PRIVATE);
|
||||
}
|
||||
|
||||
|
||||
@ -657,7 +736,7 @@ __make_stacks_executable (void **stack_endp)
|
||||
const size_t pagemask = ~(__getpagesize () - 1);
|
||||
#endif
|
||||
|
||||
lll_lock (stack_cache_lock);
|
||||
lll_lock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
list_t *runp;
|
||||
list_for_each (runp, &stack_used)
|
||||
@ -686,7 +765,7 @@ __make_stacks_executable (void **stack_endp)
|
||||
break;
|
||||
}
|
||||
|
||||
lll_unlock (stack_cache_lock);
|
||||
lll_unlock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -701,15 +780,51 @@ __reclaim_stacks (void)
|
||||
{
|
||||
struct pthread *self = (struct pthread *) THREAD_SELF;
|
||||
|
||||
/* No locking necessary. The caller is the only stack in use. */
|
||||
/* No locking necessary. The caller is the only stack in use. But
|
||||
we have to be aware that we might have interrupted a list
|
||||
operation. */
|
||||
|
||||
if (in_flight_stack != 0)
|
||||
{
|
||||
bool add_p = in_flight_stack & 1;
|
||||
list_t *elem = (list_t *) (in_flight_stack & ~UINTMAX_C (1));
|
||||
|
||||
if (add_p)
|
||||
{
|
||||
/* We always add at the beginning of the list. So in this
|
||||
case we only need to check the beginning of these lists. */
|
||||
int check_list (list_t *l)
|
||||
{
|
||||
if (l->next->prev != l)
|
||||
{
|
||||
assert (l->next->prev == elem);
|
||||
|
||||
elem->next = l->next;
|
||||
elem->prev = l;
|
||||
l->next = elem;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (check_list (&stack_used) == 0)
|
||||
(void) check_list (&stack_cache);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* We can simply always replay the delete operation. */
|
||||
elem->next->prev = elem->prev;
|
||||
elem->prev->next = elem->next;
|
||||
}
|
||||
}
|
||||
|
||||
/* Mark all stacks except the still running one as free. */
|
||||
list_t *runp;
|
||||
list_for_each (runp, &stack_used)
|
||||
{
|
||||
struct pthread *curp;
|
||||
|
||||
curp = list_entry (runp, struct pthread, list);
|
||||
struct pthread *curp = list_entry (runp, struct pthread, list);
|
||||
if (curp != self)
|
||||
{
|
||||
/* This marks the stack as free. */
|
||||
@ -720,16 +835,43 @@ __reclaim_stacks (void)
|
||||
|
||||
/* Account for the size of the stack. */
|
||||
stack_cache_actsize += curp->stackblock_size;
|
||||
|
||||
if (curp->specific_used)
|
||||
{
|
||||
/* Clear the thread-specific data. */
|
||||
memset (curp->specific_1stblock, '\0',
|
||||
sizeof (curp->specific_1stblock));
|
||||
|
||||
curp->specific_used = false;
|
||||
|
||||
for (size_t cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
|
||||
if (curp->specific[cnt] != NULL)
|
||||
{
|
||||
memset (curp->specific[cnt], '\0',
|
||||
sizeof (curp->specific_1stblock));
|
||||
|
||||
/* We have allocated the block which we do not
|
||||
free here so re-set the bit. */
|
||||
curp->specific_used = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Reset the PIDs in any cached stacks. */
|
||||
list_for_each (runp, &stack_cache)
|
||||
{
|
||||
struct pthread *curp = list_entry (runp, struct pthread, list);
|
||||
curp->pid = self->pid;
|
||||
}
|
||||
|
||||
/* Add the stack of all running threads to the cache. */
|
||||
list_splice (&stack_used, &stack_cache);
|
||||
|
||||
/* Remove the entry for the current thread to from the cache list
|
||||
and add it to the list of running threads. Which of the two
|
||||
lists is decided by the user_stack flag. */
|
||||
list_del (&self->list);
|
||||
stack_list_del (&self->list);
|
||||
|
||||
/* Re-initialize the lists for all the threads. */
|
||||
INIT_LIST_HEAD (&stack_used);
|
||||
@ -743,6 +885,8 @@ __reclaim_stacks (void)
|
||||
/* There is one thread running. */
|
||||
__nptl_nthreads = 1;
|
||||
|
||||
in_flight_stack = 0;
|
||||
|
||||
/* Initialize the lock. */
|
||||
stack_cache_lock = LLL_LOCK_INITIALIZER;
|
||||
}
|
||||
@ -757,7 +901,7 @@ __find_thread_by_id (pid_t tid)
|
||||
{
|
||||
struct pthread *result = NULL;
|
||||
|
||||
lll_lock (stack_cache_lock);
|
||||
lll_lock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
/* Iterate over the list with system-allocated threads first. */
|
||||
list_t *runp;
|
||||
@ -789,24 +933,100 @@ __find_thread_by_id (pid_t tid)
|
||||
}
|
||||
|
||||
out:
|
||||
lll_unlock (stack_cache_lock);
|
||||
lll_unlock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
return result;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
static void
|
||||
internal_function
|
||||
setxid_mark_thread (struct xid_command *cmdp, struct pthread *t)
|
||||
{
|
||||
int ch;
|
||||
|
||||
/* Don't let the thread exit before the setxid handler runs. */
|
||||
t->setxid_futex = 0;
|
||||
|
||||
do
|
||||
{
|
||||
ch = t->cancelhandling;
|
||||
|
||||
/* If the thread is exiting right now, ignore it. */
|
||||
if ((ch & EXITING_BITMASK) != 0)
|
||||
return;
|
||||
}
|
||||
while (atomic_compare_and_exchange_bool_acq (&t->cancelhandling,
|
||||
ch | SETXID_BITMASK, ch));
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
internal_function
|
||||
setxid_unmark_thread (struct xid_command *cmdp, struct pthread *t)
|
||||
{
|
||||
int ch;
|
||||
|
||||
do
|
||||
{
|
||||
ch = t->cancelhandling;
|
||||
if ((ch & SETXID_BITMASK) == 0)
|
||||
return;
|
||||
}
|
||||
while (atomic_compare_and_exchange_bool_acq (&t->cancelhandling,
|
||||
ch & ~SETXID_BITMASK, ch));
|
||||
|
||||
/* Release the futex just in case. */
|
||||
t->setxid_futex = 1;
|
||||
lll_futex_wake (&t->setxid_futex, 1, LLL_PRIVATE);
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
internal_function
|
||||
setxid_signal_thread (struct xid_command *cmdp, struct pthread *t)
|
||||
{
|
||||
if ((t->cancelhandling & SETXID_BITMASK) == 0)
|
||||
return 0;
|
||||
|
||||
int val;
|
||||
INTERNAL_SYSCALL_DECL (err);
|
||||
#if __ASSUME_TGKILL
|
||||
val = INTERNAL_SYSCALL (tgkill, err, 3, THREAD_GETMEM (THREAD_SELF, pid),
|
||||
t->tid, SIGSETXID);
|
||||
#else
|
||||
# ifdef __NR_tgkill
|
||||
val = INTERNAL_SYSCALL (tgkill, err, 3, THREAD_GETMEM (THREAD_SELF, pid),
|
||||
t->tid, SIGSETXID);
|
||||
if (INTERNAL_SYSCALL_ERROR_P (val, err)
|
||||
&& INTERNAL_SYSCALL_ERRNO (val, err) == ENOSYS)
|
||||
# endif
|
||||
val = INTERNAL_SYSCALL (tkill, err, 2, t->tid, SIGSETXID);
|
||||
#endif
|
||||
|
||||
/* If this failed, it must have had not started yet or else exited. */
|
||||
if (!INTERNAL_SYSCALL_ERROR_P (val, err))
|
||||
{
|
||||
atomic_increment (&cmdp->cntr);
|
||||
return 1;
|
||||
}
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
attribute_hidden
|
||||
__nptl_setxid (struct xid_command *cmdp)
|
||||
{
|
||||
int signalled;
|
||||
int result;
|
||||
lll_lock (stack_cache_lock);
|
||||
lll_lock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
__xidcmd = cmdp;
|
||||
cmdp->cntr = 0;
|
||||
|
||||
INTERNAL_SYSCALL_DECL (err);
|
||||
|
||||
struct pthread *self = THREAD_SELF;
|
||||
|
||||
/* Iterate over the list with system-allocated threads first. */
|
||||
@ -814,65 +1034,79 @@ __nptl_setxid (struct xid_command *cmdp)
|
||||
list_for_each (runp, &stack_used)
|
||||
{
|
||||
struct pthread *t = list_entry (runp, struct pthread, list);
|
||||
if (t != self)
|
||||
{
|
||||
int val;
|
||||
#if __ASSUME_TGKILL
|
||||
val = INTERNAL_SYSCALL (tgkill, err, 3,
|
||||
THREAD_GETMEM (THREAD_SELF, pid),
|
||||
t->tid, SIGSETXID);
|
||||
#else
|
||||
# ifdef __NR_tgkill
|
||||
val = INTERNAL_SYSCALL (tgkill, err, 3,
|
||||
THREAD_GETMEM (THREAD_SELF, pid),
|
||||
t->tid, SIGSETXID);
|
||||
if (INTERNAL_SYSCALL_ERROR_P (val, err)
|
||||
&& INTERNAL_SYSCALL_ERRNO (val, err) == ENOSYS)
|
||||
# endif
|
||||
val = INTERNAL_SYSCALL (tkill, err, 2, t->tid, SIGSETXID);
|
||||
#endif
|
||||
if (t == self)
|
||||
continue;
|
||||
|
||||
if (!INTERNAL_SYSCALL_ERROR_P (val, err))
|
||||
atomic_increment (&cmdp->cntr);
|
||||
}
|
||||
setxid_mark_thread (cmdp, t);
|
||||
}
|
||||
|
||||
/* Now the list with threads using user-allocated stacks. */
|
||||
list_for_each (runp, &__stack_user)
|
||||
{
|
||||
struct pthread *t = list_entry (runp, struct pthread, list);
|
||||
if (t != self)
|
||||
{
|
||||
int val;
|
||||
#if __ASSUME_TGKILL
|
||||
val = INTERNAL_SYSCALL (tgkill, err, 3,
|
||||
THREAD_GETMEM (THREAD_SELF, pid),
|
||||
t->tid, SIGSETXID);
|
||||
#else
|
||||
# ifdef __NR_tgkill
|
||||
val = INTERNAL_SYSCALL (tgkill, err, 3,
|
||||
THREAD_GETMEM (THREAD_SELF, pid),
|
||||
t->tid, SIGSETXID);
|
||||
if (INTERNAL_SYSCALL_ERROR_P (val, err)
|
||||
&& INTERNAL_SYSCALL_ERRNO (val, err) == ENOSYS)
|
||||
# endif
|
||||
val = INTERNAL_SYSCALL (tkill, err, 2, t->tid, SIGSETXID);
|
||||
#endif
|
||||
if (t == self)
|
||||
continue;
|
||||
|
||||
if (!INTERNAL_SYSCALL_ERROR_P (val, err))
|
||||
atomic_increment (&cmdp->cntr);
|
||||
}
|
||||
setxid_mark_thread (cmdp, t);
|
||||
}
|
||||
|
||||
int cur = cmdp->cntr;
|
||||
while (cur != 0)
|
||||
/* Iterate until we don't succeed in signalling anyone. That means
|
||||
we have gotten all running threads, and their children will be
|
||||
automatically correct once started. */
|
||||
do
|
||||
{
|
||||
lll_futex_wait (&cmdp->cntr, cur);
|
||||
cur = cmdp->cntr;
|
||||
signalled = 0;
|
||||
|
||||
list_for_each (runp, &stack_used)
|
||||
{
|
||||
struct pthread *t = list_entry (runp, struct pthread, list);
|
||||
if (t == self)
|
||||
continue;
|
||||
|
||||
signalled += setxid_signal_thread (cmdp, t);
|
||||
}
|
||||
|
||||
list_for_each (runp, &__stack_user)
|
||||
{
|
||||
struct pthread *t = list_entry (runp, struct pthread, list);
|
||||
if (t == self)
|
||||
continue;
|
||||
|
||||
signalled += setxid_signal_thread (cmdp, t);
|
||||
}
|
||||
|
||||
int cur = cmdp->cntr;
|
||||
while (cur != 0)
|
||||
{
|
||||
lll_futex_wait (&cmdp->cntr, cur, LLL_PRIVATE);
|
||||
cur = cmdp->cntr;
|
||||
}
|
||||
}
|
||||
while (signalled != 0);
|
||||
|
||||
/* Clean up flags, so that no thread blocks during exit waiting
|
||||
for a signal which will never come. */
|
||||
list_for_each (runp, &stack_used)
|
||||
{
|
||||
struct pthread *t = list_entry (runp, struct pthread, list);
|
||||
if (t == self)
|
||||
continue;
|
||||
|
||||
setxid_unmark_thread (cmdp, t);
|
||||
}
|
||||
|
||||
list_for_each (runp, &__stack_user)
|
||||
{
|
||||
struct pthread *t = list_entry (runp, struct pthread, list);
|
||||
if (t == self)
|
||||
continue;
|
||||
|
||||
setxid_unmark_thread (cmdp, t);
|
||||
}
|
||||
|
||||
/* This must be last, otherwise the current thread might not have
|
||||
permissions to send SIGSETXID syscall to the other threads. */
|
||||
INTERNAL_SYSCALL_DECL (err);
|
||||
result = INTERNAL_SYSCALL_NCS (cmdp->syscall_no, err, 3,
|
||||
cmdp->id[0], cmdp->id[1], cmdp->id[2]);
|
||||
if (INTERNAL_SYSCALL_ERROR_P (result, err))
|
||||
@ -881,7 +1115,7 @@ __nptl_setxid (struct xid_command *cmdp)
|
||||
result = -1;
|
||||
}
|
||||
|
||||
lll_unlock (stack_cache_lock);
|
||||
lll_unlock (stack_cache_lock, LLL_PRIVATE);
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -910,7 +1144,7 @@ void
|
||||
attribute_hidden
|
||||
__pthread_init_static_tls (struct link_map *map)
|
||||
{
|
||||
lll_lock (stack_cache_lock);
|
||||
lll_lock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
/* Iterate over the list with system-allocated threads first. */
|
||||
list_t *runp;
|
||||
@ -921,5 +1155,62 @@ __pthread_init_static_tls (struct link_map *map)
|
||||
list_for_each (runp, &__stack_user)
|
||||
init_one_static_tls (list_entry (runp, struct pthread, list), map);
|
||||
|
||||
lll_unlock (stack_cache_lock);
|
||||
lll_unlock (stack_cache_lock, LLL_PRIVATE);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
attribute_hidden
|
||||
__wait_lookup_done (void)
|
||||
{
|
||||
lll_lock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
struct pthread *self = THREAD_SELF;
|
||||
|
||||
/* Iterate over the list with system-allocated threads first. */
|
||||
list_t *runp;
|
||||
list_for_each (runp, &stack_used)
|
||||
{
|
||||
struct pthread *t = list_entry (runp, struct pthread, list);
|
||||
if (t == self || t->header.gscope_flag == THREAD_GSCOPE_FLAG_UNUSED)
|
||||
continue;
|
||||
|
||||
int *const gscope_flagp = &t->header.gscope_flag;
|
||||
|
||||
/* We have to wait until this thread is done with the global
|
||||
scope. First tell the thread that we are waiting and
|
||||
possibly have to be woken. */
|
||||
if (atomic_compare_and_exchange_bool_acq (gscope_flagp,
|
||||
THREAD_GSCOPE_FLAG_WAIT,
|
||||
THREAD_GSCOPE_FLAG_USED))
|
||||
continue;
|
||||
|
||||
do
|
||||
lll_futex_wait (gscope_flagp, THREAD_GSCOPE_FLAG_WAIT, LLL_PRIVATE);
|
||||
while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT);
|
||||
}
|
||||
|
||||
/* Now the list with threads using user-allocated stacks. */
|
||||
list_for_each (runp, &__stack_user)
|
||||
{
|
||||
struct pthread *t = list_entry (runp, struct pthread, list);
|
||||
if (t == self || t->header.gscope_flag == THREAD_GSCOPE_FLAG_UNUSED)
|
||||
continue;
|
||||
|
||||
int *const gscope_flagp = &t->header.gscope_flag;
|
||||
|
||||
/* We have to wait until this thread is done with the global
|
||||
scope. First tell the thread that we are waiting and
|
||||
possibly have to be woken. */
|
||||
if (atomic_compare_and_exchange_bool_acq (gscope_flagp,
|
||||
THREAD_GSCOPE_FLAG_WAIT,
|
||||
THREAD_GSCOPE_FLAG_USED))
|
||||
continue;
|
||||
|
||||
do
|
||||
lll_futex_wait (gscope_flagp, THREAD_GSCOPE_FLAG_WAIT, LLL_PRIVATE);
|
||||
while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT);
|
||||
}
|
||||
|
||||
lll_unlock (stack_cache_lock, LLL_PRIVATE);
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2009 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -70,14 +70,13 @@ __pthread_disable_asynccancel (int oldtype)
|
||||
return;
|
||||
|
||||
struct pthread *self = THREAD_SELF;
|
||||
int newval;
|
||||
|
||||
int oldval = THREAD_GETMEM (self, cancelhandling);
|
||||
|
||||
while (1)
|
||||
{
|
||||
int newval = oldval & ~CANCELTYPE_BITMASK;
|
||||
|
||||
if (newval == oldval)
|
||||
break;
|
||||
newval = oldval & ~CANCELTYPE_BITMASK;
|
||||
|
||||
int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
|
||||
oldval);
|
||||
@ -87,4 +86,15 @@ __pthread_disable_asynccancel (int oldtype)
|
||||
/* Prepare the next round. */
|
||||
oldval = curval;
|
||||
}
|
||||
|
||||
/* We cannot return when we are being canceled. Upon return the
|
||||
thread might be things which would have to be undone. The
|
||||
following loop should loop until the cancellation signal is
|
||||
delivered. */
|
||||
while (__builtin_expect ((newval & (CANCELING_BITMASK | CANCELED_BITMASK))
|
||||
== CANCELING_BITMASK, 0))
|
||||
{
|
||||
lll_futex_wait (&self->cancelhandling, newval, LLL_PRIVATE);
|
||||
newval = THREAD_GETMEM (self, cancelhandling);
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002-2006, 2007, 2008, 2009 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -36,6 +36,7 @@
|
||||
#endif
|
||||
#define __need_res_state
|
||||
#include <resolv.h>
|
||||
#include <bits/kernel-features.h>
|
||||
|
||||
#ifndef TCB_ALIGNMENT
|
||||
# define TCB_ALIGNMENT sizeof (double)
|
||||
@ -101,6 +102,23 @@ struct xid_command
|
||||
};
|
||||
|
||||
|
||||
/* Data structure used by the kernel to find robust futexes. */
|
||||
struct robust_list_head
|
||||
{
|
||||
void *list;
|
||||
long int futex_offset;
|
||||
void *list_op_pending;
|
||||
};
|
||||
|
||||
|
||||
/* Data strcture used to handle thread priority protection. */
|
||||
struct priority_protection_data
|
||||
{
|
||||
int priomax;
|
||||
unsigned int priomap[];
|
||||
};
|
||||
|
||||
|
||||
/* Thread descriptor data structure. */
|
||||
struct pthread
|
||||
{
|
||||
@ -113,6 +131,10 @@ struct pthread
|
||||
struct
|
||||
{
|
||||
int multiple_threads;
|
||||
int gscope_flag;
|
||||
# ifndef __ASSUME_PRIVATE_FUTEX
|
||||
int private_futex;
|
||||
# endif
|
||||
} header;
|
||||
#endif
|
||||
|
||||
@ -120,7 +142,7 @@ struct pthread
|
||||
is private and subject to change without affecting the official ABI.
|
||||
We just have it here in case it might be convenient for some
|
||||
implementation-specific instrumentation hack or suchlike. */
|
||||
void *__padding[16];
|
||||
void *__padding[24];
|
||||
};
|
||||
|
||||
/* This descriptor's link on the `stack_used' or `__stack_user' list. */
|
||||
@ -133,6 +155,82 @@ struct pthread
|
||||
/* Process ID - thread group ID in kernel speak. */
|
||||
pid_t pid;
|
||||
|
||||
/* List of robust mutexes the thread is holding. */
|
||||
#ifdef __PTHREAD_MUTEX_HAVE_PREV
|
||||
void *robust_prev;
|
||||
struct robust_list_head robust_head;
|
||||
|
||||
/* The list above is strange. It is basically a double linked list
|
||||
but the pointer to the next/previous element of the list points
|
||||
in the middle of the object, the __next element. Whenever
|
||||
casting to __pthread_list_t we need to adjust the pointer
|
||||
first. */
|
||||
# define QUEUE_PTR_ADJUST (offsetof (__pthread_list_t, __next))
|
||||
|
||||
# define ENQUEUE_MUTEX_BOTH(mutex, val) \
|
||||
do { \
|
||||
__pthread_list_t *next = (__pthread_list_t *) \
|
||||
((((uintptr_t) THREAD_GETMEM (THREAD_SELF, robust_head.list)) & ~1ul) \
|
||||
- QUEUE_PTR_ADJUST); \
|
||||
next->__prev = (void *) &mutex->__data.__list.__next; \
|
||||
mutex->__data.__list.__next = THREAD_GETMEM (THREAD_SELF, \
|
||||
robust_head.list); \
|
||||
mutex->__data.__list.__prev = (void *) &THREAD_SELF->robust_head; \
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list, \
|
||||
(void *) (((uintptr_t) &mutex->__data.__list.__next) \
|
||||
| val)); \
|
||||
} while (0)
|
||||
# define DEQUEUE_MUTEX(mutex) \
|
||||
do { \
|
||||
__pthread_list_t *next = (__pthread_list_t *) \
|
||||
((char *) (((uintptr_t) mutex->__data.__list.__next) & ~1ul) \
|
||||
- QUEUE_PTR_ADJUST); \
|
||||
next->__prev = mutex->__data.__list.__prev; \
|
||||
__pthread_list_t *prev = (__pthread_list_t *) \
|
||||
((char *) (((uintptr_t) mutex->__data.__list.__prev) & ~1ul) \
|
||||
- QUEUE_PTR_ADJUST); \
|
||||
prev->__next = mutex->__data.__list.__next; \
|
||||
mutex->__data.__list.__prev = NULL; \
|
||||
mutex->__data.__list.__next = NULL; \
|
||||
} while (0)
|
||||
#else
|
||||
union
|
||||
{
|
||||
__pthread_slist_t robust_list;
|
||||
struct robust_list_head robust_head;
|
||||
};
|
||||
|
||||
# define ENQUEUE_MUTEX_BOTH(mutex, val) \
|
||||
do { \
|
||||
mutex->__data.__list.__next \
|
||||
= THREAD_GETMEM (THREAD_SELF, robust_list.__next); \
|
||||
THREAD_SETMEM (THREAD_SELF, robust_list.__next, \
|
||||
(void *) (((uintptr_t) &mutex->__data.__list) | val)); \
|
||||
} while (0)
|
||||
# define DEQUEUE_MUTEX(mutex) \
|
||||
do { \
|
||||
__pthread_slist_t *runp = (__pthread_slist_t *) \
|
||||
(((uintptr_t) THREAD_GETMEM (THREAD_SELF, robust_list.__next)) & ~1ul); \
|
||||
if (runp == &mutex->__data.__list) \
|
||||
THREAD_SETMEM (THREAD_SELF, robust_list.__next, runp->__next); \
|
||||
else \
|
||||
{ \
|
||||
__pthread_slist_t *next = (__pthread_slist_t *) \
|
||||
(((uintptr_t) runp->__next) & ~1ul); \
|
||||
while (next != &mutex->__data.__list) \
|
||||
{ \
|
||||
runp = next; \
|
||||
next = (__pthread_slist_t *) (((uintptr_t) runp->__next) & ~1ul); \
|
||||
} \
|
||||
\
|
||||
runp->__next = next->__next; \
|
||||
mutex->__data.__list.__next = NULL; \
|
||||
} \
|
||||
} while (0)
|
||||
#endif
|
||||
#define ENQUEUE_MUTEX(mutex) ENQUEUE_MUTEX_BOTH (mutex, 0)
|
||||
#define ENQUEUE_MUTEX_PI(mutex) ENQUEUE_MUTEX_BOTH (mutex, 1)
|
||||
|
||||
/* List of cleanup buffers. */
|
||||
struct _pthread_cleanup_buffer *cleanup;
|
||||
|
||||
@ -144,25 +242,25 @@ struct pthread
|
||||
int cancelhandling;
|
||||
/* Bit set if cancellation is disabled. */
|
||||
#define CANCELSTATE_BIT 0
|
||||
#define CANCELSTATE_BITMASK 0x01
|
||||
#define CANCELSTATE_BITMASK (0x01 << CANCELSTATE_BIT)
|
||||
/* Bit set if asynchronous cancellation mode is selected. */
|
||||
#define CANCELTYPE_BIT 1
|
||||
#define CANCELTYPE_BITMASK 0x02
|
||||
#define CANCELTYPE_BITMASK (0x01 << CANCELTYPE_BIT)
|
||||
/* Bit set if canceling has been initiated. */
|
||||
#define CANCELING_BIT 2
|
||||
#define CANCELING_BITMASK 0x04
|
||||
#define CANCELING_BITMASK (0x01 << CANCELING_BIT)
|
||||
/* Bit set if canceled. */
|
||||
#define CANCELED_BIT 3
|
||||
#define CANCELED_BITMASK 0x08
|
||||
#define CANCELED_BITMASK (0x01 << CANCELED_BIT)
|
||||
/* Bit set if thread is exiting. */
|
||||
#define EXITING_BIT 4
|
||||
#define EXITING_BITMASK 0x10
|
||||
#define EXITING_BITMASK (0x01 << EXITING_BIT)
|
||||
/* Bit set if thread terminated and TCB is freed. */
|
||||
#define TERMINATED_BIT 5
|
||||
#define TERMINATED_BITMASK 0x20
|
||||
#define TERMINATED_BITMASK (0x01 << TERMINATED_BIT)
|
||||
/* Bit set if thread is supposed to change XID. */
|
||||
#define SETXID_BIT 6
|
||||
#define SETXID_BITMASK 0x40
|
||||
#define SETXID_BITMASK (0x01 << SETXID_BIT)
|
||||
/* Mask for the rest. Helps the compiler to optimize. */
|
||||
#define CANCEL_RESTMASK 0xffffff80
|
||||
|
||||
@ -174,6 +272,9 @@ struct pthread
|
||||
| EXITING_BITMASK | CANCEL_RESTMASK | TERMINATED_BITMASK)) \
|
||||
== (CANCELTYPE_BITMASK | CANCELED_BITMASK))
|
||||
|
||||
/* Flags. Including those copied from the thread attribute. */
|
||||
int flags;
|
||||
|
||||
/* We allocate one block of references here. This should be enough
|
||||
to avoid allocating any memory dynamically for most applications. */
|
||||
struct pthread_key_data
|
||||
@ -187,12 +288,12 @@ struct pthread
|
||||
void *data;
|
||||
} specific_1stblock[PTHREAD_KEY_2NDLEVEL_SIZE];
|
||||
|
||||
/* Flag which is set when specific data is set. */
|
||||
bool specific_used;
|
||||
|
||||
/* Two-level array for the thread-specific data. */
|
||||
struct pthread_key_data *specific[PTHREAD_KEY_1STLEVEL_SIZE];
|
||||
|
||||
/* Flag which is set when specific data is set. */
|
||||
bool specific_used;
|
||||
|
||||
/* True if events must be reported. */
|
||||
bool report_events;
|
||||
|
||||
@ -202,11 +303,15 @@ struct pthread
|
||||
/* True if thread must stop at startup time. */
|
||||
bool stopped_start;
|
||||
|
||||
/* The parent's cancel handling at the time of the pthread_create
|
||||
call. This might be needed to undo the effects of a cancellation. */
|
||||
int parent_cancelhandling;
|
||||
|
||||
/* Lock to synchronize access to the descriptor. */
|
||||
lll_lock_t lock;
|
||||
int lock;
|
||||
|
||||
/* Lock for synchronizing setxid calls. */
|
||||
lll_lock_t setxid_futex;
|
||||
int setxid_futex;
|
||||
|
||||
#if HP_TIMING_AVAIL
|
||||
/* Offset of the CPU clock at start thread start time. */
|
||||
@ -223,9 +328,6 @@ struct pthread
|
||||
/* Check whether a thread is detached. */
|
||||
#define IS_DETACHED(pd) ((pd)->joinid == (pd))
|
||||
|
||||
/* Flags. Including those copied from the thread attribute. */
|
||||
int flags;
|
||||
|
||||
/* The result of the thread function. */
|
||||
void *result;
|
||||
|
||||
@ -257,6 +359,9 @@ struct pthread
|
||||
/* This is what the user specified and what we will report. */
|
||||
size_t reported_guardsize;
|
||||
|
||||
/* Thread Priority Protection data. */
|
||||
struct priority_protection_data *tpp;
|
||||
|
||||
/* Resolver state. */
|
||||
struct __res_state res;
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2004, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -21,22 +21,24 @@
|
||||
#include <pthreadP.h>
|
||||
#include <signal.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <atomic.h>
|
||||
#include <sysdep.h>
|
||||
|
||||
|
||||
/* Pointers to the libc functions. */
|
||||
struct pthread_functions __libc_pthread_functions attribute_hidden;
|
||||
int __libc_pthread_functions_init attribute_hidden;
|
||||
|
||||
|
||||
#define FORWARD2(name, rettype, decl, params, defaction) \
|
||||
rettype \
|
||||
name decl \
|
||||
{ \
|
||||
if (__libc_pthread_functions.ptr_##name == NULL) \
|
||||
if (!__libc_pthread_functions_init) \
|
||||
defaction; \
|
||||
\
|
||||
return __libc_pthread_functions.ptr_##name params; \
|
||||
return PTHFCT_CALL (ptr_##name, params); \
|
||||
}
|
||||
|
||||
#define FORWARD(name, decl, params, defretval) \
|
||||
@ -123,34 +125,13 @@ FORWARD (pthread_setschedparam,
|
||||
|
||||
FORWARD (pthread_mutex_destroy, (pthread_mutex_t *mutex), (mutex), 0)
|
||||
|
||||
libc_hidden_proto(pthread_mutex_init)
|
||||
FORWARD (pthread_mutex_init,
|
||||
(pthread_mutex_t *mutex, const pthread_mutexattr_t *mutexattr),
|
||||
(mutex, mutexattr), 0)
|
||||
strong_alias(pthread_mutex_init, __pthread_mutex_init)
|
||||
libc_hidden_def(pthread_mutex_init)
|
||||
|
||||
libc_hidden_proto(pthread_mutex_trylock)
|
||||
FORWARD (pthread_mutex_trylock, (pthread_mutex_t *mutex), (mutex), 0)
|
||||
strong_alias(pthread_mutex_trylock, __pthread_mutex_trylock)
|
||||
libc_hidden_def(pthread_mutex_trylock)
|
||||
|
||||
libc_hidden_proto(pthread_mutex_lock)
|
||||
FORWARD (pthread_mutex_lock, (pthread_mutex_t *mutex), (mutex), 0)
|
||||
strong_alias(pthread_mutex_lock, __pthread_mutex_lock)
|
||||
libc_hidden_def(pthread_mutex_lock)
|
||||
|
||||
libc_hidden_proto(pthread_mutex_unlock)
|
||||
FORWARD (pthread_mutex_unlock, (pthread_mutex_t *mutex), (mutex), 0)
|
||||
strong_alias(pthread_mutex_unlock, __pthread_mutex_unlock)
|
||||
libc_hidden_def(pthread_mutex_unlock)
|
||||
|
||||
FORWARD (pthread_mutexattr_init, (pthread_mutexattr_t *attr), (attr), 0)
|
||||
|
||||
FORWARD (pthread_mutexattr_destroy, (pthread_mutexattr_t *attr), (attr), 0)
|
||||
|
||||
FORWARD (pthread_mutexattr_settype, (pthread_mutexattr_t *attr, int kind),
|
||||
(attr, kind), 0)
|
||||
|
||||
|
||||
FORWARD2 (pthread_self, pthread_t, (void), (), return 0)
|
||||
@ -163,7 +144,8 @@ FORWARD (pthread_setcanceltype, (int type, int *oldtype), (type, oldtype), 0)
|
||||
|
||||
#define return /* value is void */
|
||||
FORWARD2(__pthread_unwind,
|
||||
void attribute_hidden __attribute ((noreturn)) __cleanup_fct_attribute,
|
||||
void attribute_hidden __attribute ((noreturn)) __cleanup_fct_attribute
|
||||
attribute_compat_text_section,
|
||||
(__pthread_unwind_buf_t *buf), (buf), {
|
||||
/* We cannot call abort() here. */
|
||||
INTERNAL_SYSCALL_DECL (err);
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002-2007, 2008, 2009 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -18,6 +18,7 @@
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <assert.h>
|
||||
#include <errno.h>
|
||||
#include <limits.h>
|
||||
#include <signal.h>
|
||||
#include <stdlib.h>
|
||||
@ -29,49 +30,46 @@
|
||||
#include <ldsodefs.h>
|
||||
#include <tls.h>
|
||||
#include <fork.h>
|
||||
#include <version.h>
|
||||
#include <smp.h>
|
||||
#include <lowlevellock.h>
|
||||
#include <version.h>
|
||||
|
||||
|
||||
#ifndef __NR_set_tid_address
|
||||
/* XXX For the time being... Once we can rely on the kernel headers
|
||||
having the definition remove these lines. */
|
||||
#if defined __s390__
|
||||
# define __NR_set_tid_address 252
|
||||
#elif defined __ia64__
|
||||
# define __NR_set_tid_address 1233
|
||||
#elif defined __i386__
|
||||
# define __NR_set_tid_address 258
|
||||
#elif defined __x86_64__
|
||||
# define __NR_set_tid_address 218
|
||||
#elif defined __powerpc__
|
||||
# define __NR_set_tid_address 232
|
||||
#elif defined __sparc__
|
||||
# define __NR_set_tid_address 166
|
||||
#else
|
||||
# error "define __NR_set_tid_address"
|
||||
#endif
|
||||
#endif
|
||||
#include <bits/kernel-features.h>
|
||||
|
||||
|
||||
/* Size and alignment of static TLS block. */
|
||||
size_t __static_tls_size;
|
||||
size_t __static_tls_align_m1;
|
||||
|
||||
#ifndef __ASSUME_SET_ROBUST_LIST
|
||||
/* Negative if we do not have the system call and we can use it. */
|
||||
int __set_robust_list_avail;
|
||||
# define set_robust_list_not_avail() \
|
||||
__set_robust_list_avail = -1
|
||||
#else
|
||||
# define set_robust_list_not_avail() do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
|
||||
/* Nonzero if we do not have FUTEX_CLOCK_REALTIME. */
|
||||
int __have_futex_clock_realtime;
|
||||
# define __set_futex_clock_realtime() \
|
||||
__have_futex_clock_realtime = 1
|
||||
#else
|
||||
#define __set_futex_clock_realtime() do { } while (0)
|
||||
#endif
|
||||
|
||||
/* Version of the library, used in libthread_db to detect mismatches. */
|
||||
static const char nptl_version[] __attribute_used__ = VERSION;
|
||||
|
||||
|
||||
#if defined USE_TLS && !defined SHARED
|
||||
#ifndef SHARED
|
||||
extern void __libc_setup_tls (size_t tcbsize, size_t tcbalign);
|
||||
#endif
|
||||
|
||||
int
|
||||
__libc_sigaction (int sig, const struct sigaction *act, struct sigaction *oact);
|
||||
|
||||
|
||||
#ifdef SHARED
|
||||
static void nptl_freeres (void);
|
||||
|
||||
|
||||
static const struct pthread_functions pthread_functions =
|
||||
{
|
||||
.ptr_pthread_attr_destroy = __pthread_attr_destroy,
|
||||
@ -98,10 +96,10 @@ static const struct pthread_functions pthread_functions =
|
||||
.ptr___pthread_exit = __pthread_exit,
|
||||
.ptr_pthread_getschedparam = __pthread_getschedparam,
|
||||
.ptr_pthread_setschedparam = __pthread_setschedparam,
|
||||
.ptr_pthread_mutex_destroy = __pthread_mutex_destroy,
|
||||
.ptr_pthread_mutex_init = __pthread_mutex_init,
|
||||
.ptr_pthread_mutex_lock = __pthread_mutex_lock,
|
||||
.ptr_pthread_mutex_unlock = __pthread_mutex_unlock,
|
||||
.ptr_pthread_mutex_destroy = INTUSE(__pthread_mutex_destroy),
|
||||
.ptr_pthread_mutex_init = INTUSE(__pthread_mutex_init),
|
||||
.ptr_pthread_mutex_lock = INTUSE(__pthread_mutex_lock),
|
||||
.ptr_pthread_mutex_unlock = INTUSE(__pthread_mutex_unlock),
|
||||
.ptr_pthread_self = __pthread_self,
|
||||
.ptr_pthread_setcancelstate = __pthread_setcancelstate,
|
||||
.ptr_pthread_setcanceltype = __pthread_setcanceltype,
|
||||
@ -118,7 +116,9 @@ static const struct pthread_functions pthread_functions =
|
||||
.ptr_nthreads = &__nptl_nthreads,
|
||||
.ptr___pthread_unwind = &__pthread_unwind,
|
||||
.ptr__nptl_deallocate_tsd = __nptl_deallocate_tsd,
|
||||
.ptr__nptl_setxid = __nptl_setxid
|
||||
.ptr__nptl_setxid = __nptl_setxid,
|
||||
/* For now only the stack cache needs to be freed. */
|
||||
.ptr_freeres = nptl_freeres
|
||||
};
|
||||
# define ptr_pthread_functions &pthread_functions
|
||||
#else
|
||||
@ -126,10 +126,30 @@ static const struct pthread_functions pthread_functions =
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef SHARED
|
||||
/* This function is called indirectly from the freeres code in libc. */
|
||||
static void
|
||||
__libc_freeres_fn_section
|
||||
nptl_freeres (void)
|
||||
{
|
||||
__unwind_freeres ();
|
||||
__free_stacks (0);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/* For asynchronous cancellation we use a signal. This is the handler. */
|
||||
static void
|
||||
sigcancel_handler (int sig, siginfo_t *si, void *ctx)
|
||||
{
|
||||
#ifdef __ASSUME_CORRECT_SI_PID
|
||||
/* Determine the process ID. It might be negative if the thread is
|
||||
in the middle of a fork() call. */
|
||||
pid_t pid = THREAD_GETMEM (THREAD_SELF, pid);
|
||||
if (__builtin_expect (pid < 0, 0))
|
||||
pid = -pid;
|
||||
#endif
|
||||
|
||||
/* Safety check. It would be possible to call this function for
|
||||
other signals and send a signal from another process. This is not
|
||||
correct and might even be a security problem. Try to catch as
|
||||
@ -138,7 +158,7 @@ sigcancel_handler (int sig, siginfo_t *si, void *ctx)
|
||||
#ifdef __ASSUME_CORRECT_SI_PID
|
||||
/* Kernels before 2.5.75 stored the thread ID and not the process
|
||||
ID in si_pid so we skip this test. */
|
||||
|| si->si_pid != THREAD_GETMEM (THREAD_SELF, pid)
|
||||
|| si->si_pid != pid
|
||||
#endif
|
||||
|| si->si_code != SI_TKILL)
|
||||
return;
|
||||
@ -183,6 +203,14 @@ struct xid_command *__xidcmd attribute_hidden;
|
||||
static void
|
||||
sighandler_setxid (int sig, siginfo_t *si, void *ctx)
|
||||
{
|
||||
#ifdef __ASSUME_CORRECT_SI_PID
|
||||
/* Determine the process ID. It might be negative if the thread is
|
||||
in the middle of a fork() call. */
|
||||
pid_t pid = THREAD_GETMEM (THREAD_SELF, pid);
|
||||
if (__builtin_expect (pid < 0, 0))
|
||||
pid = -pid;
|
||||
#endif
|
||||
|
||||
/* Safety check. It would be possible to call this function for
|
||||
other signals and send a signal from another process. This is not
|
||||
correct and might even be a security problem. Try to catch as
|
||||
@ -191,7 +219,7 @@ sighandler_setxid (int sig, siginfo_t *si, void *ctx)
|
||||
#ifdef __ASSUME_CORRECT_SI_PID
|
||||
/* Kernels before 2.5.75 stored the thread ID and not the process
|
||||
ID in si_pid so we skip this test. */
|
||||
|| si->si_pid != THREAD_GETMEM (THREAD_SELF, pid)
|
||||
|| si->si_pid != pid
|
||||
#endif
|
||||
|| si->si_code != SI_TKILL)
|
||||
return;
|
||||
@ -200,8 +228,23 @@ sighandler_setxid (int sig, siginfo_t *si, void *ctx)
|
||||
INTERNAL_SYSCALL_NCS (__xidcmd->syscall_no, err, 3, __xidcmd->id[0],
|
||||
__xidcmd->id[1], __xidcmd->id[2]);
|
||||
|
||||
/* Reset the SETXID flag. */
|
||||
struct pthread *self = THREAD_SELF;
|
||||
int flags, newval;
|
||||
do
|
||||
{
|
||||
flags = THREAD_GETMEM (self, cancelhandling);
|
||||
newval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling,
|
||||
flags & ~SETXID_BITMASK, flags);
|
||||
}
|
||||
while (flags != newval);
|
||||
|
||||
/* And release the futex. */
|
||||
self->setxid_futex = 1;
|
||||
lll_futex_wake (&self->setxid_futex, 1, LLL_PRIVATE);
|
||||
|
||||
if (atomic_decrement_val (&__xidcmd->cntr) == 0)
|
||||
lll_futex_wake (&__xidcmd->cntr, 1);
|
||||
lll_futex_wake (&__xidcmd->cntr, 1, LLL_PRIVATE);
|
||||
}
|
||||
|
||||
|
||||
@ -210,6 +253,9 @@ sighandler_setxid (int sig, siginfo_t *si, void *ctx)
|
||||
extern void **__libc_dl_error_tsd (void) __attribute__ ((const));
|
||||
|
||||
|
||||
/* This can be set by the debugger before initialization is complete. */
|
||||
static bool __nptl_initial_report_events __attribute_used__;
|
||||
|
||||
void
|
||||
__pthread_initialize_minimal_internal (void)
|
||||
{
|
||||
@ -237,6 +283,55 @@ __pthread_initialize_minimal_internal (void)
|
||||
THREAD_SETMEM (pd, cpuclock_offset, GL(dl_cpuclock_offset));
|
||||
#endif
|
||||
|
||||
/* Initialize the robust mutex data. */
|
||||
#ifdef __PTHREAD_MUTEX_HAVE_PREV
|
||||
pd->robust_prev = &pd->robust_head;
|
||||
#endif
|
||||
pd->robust_head.list = &pd->robust_head;
|
||||
#ifdef __NR_set_robust_list
|
||||
pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
|
||||
- offsetof (pthread_mutex_t,
|
||||
__data.__list.__next));
|
||||
int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
|
||||
sizeof (struct robust_list_head));
|
||||
if (INTERNAL_SYSCALL_ERROR_P (res, err))
|
||||
#endif
|
||||
set_robust_list_not_avail ();
|
||||
|
||||
#ifndef __ASSUME_PRIVATE_FUTEX
|
||||
/* Private futexes are always used (at least internally) so that
|
||||
doing the test once this early is beneficial. */
|
||||
{
|
||||
int word = 0;
|
||||
word = INTERNAL_SYSCALL (futex, err, 3, &word,
|
||||
FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1);
|
||||
if (!INTERNAL_SYSCALL_ERROR_P (word, err))
|
||||
THREAD_SETMEM (pd, header.private_futex, FUTEX_PRIVATE_FLAG);
|
||||
}
|
||||
|
||||
/* Private futexes have been introduced earlier than the
|
||||
FUTEX_CLOCK_REALTIME flag. We don't have to run the test if we
|
||||
know the former are not supported. This also means we know the
|
||||
kernel will return ENOSYS for unknown operations. */
|
||||
if (THREAD_GETMEM (pd, header.private_futex) != 0)
|
||||
#endif
|
||||
#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
|
||||
{
|
||||
int word = 0;
|
||||
/* NB: the syscall actually takes six parameters. The last is the
|
||||
bit mask. But since we will not actually wait at all the value
|
||||
is irrelevant. Given that passing six parameters is difficult
|
||||
on some architectures we just pass whatever random value the
|
||||
calling convention calls for to the kernel. It causes no harm. */
|
||||
word = INTERNAL_SYSCALL (futex, err, 5, &word,
|
||||
FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME
|
||||
| FUTEX_PRIVATE_FLAG, 1, NULL, 0);
|
||||
assert (INTERNAL_SYSCALL_ERROR_P (word, err));
|
||||
if (INTERNAL_SYSCALL_ERRNO (word, err) != ENOSYS)
|
||||
__set_futex_clock_realtime ();
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Set initial thread's stack block from 0 up to __libc_stack_end.
|
||||
It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
|
||||
purposes this is good enough. */
|
||||
@ -246,6 +341,9 @@ __pthread_initialize_minimal_internal (void)
|
||||
INIT_LIST_HEAD (&__stack_user);
|
||||
list_add (&pd->list, &__stack_user);
|
||||
|
||||
/* Before initializing __stack_user, the debugger could not find us and
|
||||
had to set __nptl_initial_report_events. Propagate its setting. */
|
||||
THREAD_SETMEM (pd, report_events, __nptl_initial_report_events);
|
||||
|
||||
/* Install the cancellation signal handler. If for some reason we
|
||||
cannot install the handler we do not abort. Maybe we should, but
|
||||
@ -311,6 +409,15 @@ __pthread_initialize_minimal_internal (void)
|
||||
/* Transfer the old value from the dynamic linker's internal location. */
|
||||
*__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd)) ();
|
||||
GL(dl_error_catch_tsd) = &__libc_dl_error_tsd;
|
||||
|
||||
/* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
|
||||
keep the lock count from the ld.so implementation. */
|
||||
GL(dl_rtld_lock_recursive) = (void *) INTUSE (__pthread_mutex_lock);
|
||||
GL(dl_rtld_unlock_recursive) = (void *) INTUSE (__pthread_mutex_unlock);
|
||||
unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__data.__count;
|
||||
GL(dl_load_lock).mutex.__data.__count = 0;
|
||||
while (rtld_lock_count-- > 0)
|
||||
INTUSE (__pthread_mutex_lock) (&GL(dl_load_lock).mutex);
|
||||
#endif
|
||||
|
||||
GL(dl_init_static_tls) = &__pthread_init_static_tls;
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2005, 2009 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -17,100 +17,9 @@
|
||||
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <setjmp.h>
|
||||
#include <stdlib.h>
|
||||
#include "pthreadP.h"
|
||||
#include "atomic.h"
|
||||
#include <bits/libc-lock.h>
|
||||
|
||||
|
||||
#ifndef NOT_IN_libc
|
||||
|
||||
/* The next two functions are similar to pthread_setcanceltype() but
|
||||
more specialized for the use in the cancelable functions like write().
|
||||
They do not need to check parameters etc. */
|
||||
int
|
||||
attribute_hidden
|
||||
__libc_enable_asynccancel (void)
|
||||
{
|
||||
struct pthread *self = THREAD_SELF;
|
||||
int oldval = THREAD_GETMEM (self, cancelhandling);
|
||||
|
||||
while (1)
|
||||
{
|
||||
int newval = oldval | CANCELTYPE_BITMASK;
|
||||
|
||||
if (__builtin_expect ((oldval & CANCELED_BITMASK) != 0, 0))
|
||||
{
|
||||
/* If we are already exiting or if PTHREAD_CANCEL_DISABLED,
|
||||
stop right here. */
|
||||
if ((oldval & (EXITING_BITMASK | CANCELSTATE_BITMASK)) != 0)
|
||||
break;
|
||||
|
||||
int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling,
|
||||
newval, oldval);
|
||||
if (__builtin_expect (curval != oldval, 0))
|
||||
{
|
||||
/* Somebody else modified the word, try again. */
|
||||
oldval = curval;
|
||||
continue;
|
||||
}
|
||||
|
||||
THREAD_SETMEM (self, result, PTHREAD_CANCELED);
|
||||
|
||||
__do_cancel ();
|
||||
|
||||
/* NOTREACHED */
|
||||
}
|
||||
|
||||
int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
|
||||
oldval);
|
||||
if (__builtin_expect (curval == oldval, 1))
|
||||
break;
|
||||
|
||||
/* Prepare the next round. */
|
||||
oldval = curval;
|
||||
}
|
||||
|
||||
return oldval;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
internal_function attribute_hidden
|
||||
__libc_disable_asynccancel (int oldtype)
|
||||
{
|
||||
/* If asynchronous cancellation was enabled before we do not have
|
||||
anything to do. */
|
||||
if (oldtype & CANCELTYPE_BITMASK)
|
||||
return;
|
||||
|
||||
struct pthread *self = THREAD_SELF;
|
||||
int oldval = THREAD_GETMEM (self, cancelhandling);
|
||||
|
||||
while (1)
|
||||
{
|
||||
int newval = oldval & ~CANCELTYPE_BITMASK;
|
||||
|
||||
if (newval == oldval)
|
||||
break;
|
||||
|
||||
int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
|
||||
oldval);
|
||||
if (__builtin_expect (curval == oldval, 1))
|
||||
break;
|
||||
|
||||
/* Prepare the next round. */
|
||||
oldval = curval;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
__libc_cleanup_routine (struct __pthread_cleanup_frame *f)
|
||||
{
|
||||
if (f->__do_it)
|
||||
f->__cancel_routine (f->__cancel_arg);
|
||||
}
|
||||
|
||||
#endif
|
||||
#define __pthread_enable_asynccancel __libc_enable_asynccancel
|
||||
#define __pthread_disable_asynccancel __libc_disable_asynccancel
|
||||
#include "cancellation.c"
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -20,7 +20,7 @@
|
||||
#include <setjmp.h>
|
||||
#include <stdlib.h>
|
||||
#include "pthreadP.h"
|
||||
#include "jmpbuf-unwind.h"
|
||||
#include <jmpbuf-unwind.h>
|
||||
|
||||
void
|
||||
__pthread_cleanup_upto (__jmp_buf target, char *targetframe)
|
||||
|
@ -22,10 +22,10 @@
|
||||
#include <sysdep.h>
|
||||
#include "pthreadP.h"
|
||||
|
||||
|
||||
extern __typeof(system) __libc_system;
|
||||
#include <system.c>
|
||||
|
||||
|
||||
int
|
||||
system (const char *line)
|
||||
{
|
||||
|
@ -8,5 +8,6 @@ EDEADLK EDEADLK
|
||||
EINTR EINTR
|
||||
EINVAL EINVAL
|
||||
ENOSYS ENOSYS
|
||||
EOVERFLOW EOVERFLOW
|
||||
ETIMEDOUT ETIMEDOUT
|
||||
EWOULDBLOCK EWOULDBLOCK
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002-2007, 2009 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -31,6 +31,7 @@
|
||||
#include <internaltypes.h>
|
||||
#include <pthread-functions.h>
|
||||
#include <atomic.h>
|
||||
#include <bits/kernel-features.h>
|
||||
|
||||
|
||||
/* Atomic operations on TLS memory. */
|
||||
@ -51,6 +52,99 @@
|
||||
#endif
|
||||
|
||||
|
||||
/* Magic cookie representing robust mutex with dead owner. */
|
||||
#define PTHREAD_MUTEX_INCONSISTENT INT_MAX
|
||||
/* Magic cookie representing not recoverable robust mutex. */
|
||||
#define PTHREAD_MUTEX_NOTRECOVERABLE (INT_MAX - 1)
|
||||
|
||||
|
||||
/* Internal mutex type value. */
|
||||
enum
|
||||
{
|
||||
PTHREAD_MUTEX_KIND_MASK_NP = 3,
|
||||
PTHREAD_MUTEX_ROBUST_NORMAL_NP = 16,
|
||||
PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
|
||||
= PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_RECURSIVE_NP,
|
||||
PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
|
||||
= PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_ERRORCHECK_NP,
|
||||
PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
|
||||
= PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_ADAPTIVE_NP,
|
||||
PTHREAD_MUTEX_PRIO_INHERIT_NP = 32,
|
||||
PTHREAD_MUTEX_PI_NORMAL_NP
|
||||
= PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_NORMAL,
|
||||
PTHREAD_MUTEX_PI_RECURSIVE_NP
|
||||
= PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_RECURSIVE_NP,
|
||||
PTHREAD_MUTEX_PI_ERRORCHECK_NP
|
||||
= PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ERRORCHECK_NP,
|
||||
PTHREAD_MUTEX_PI_ADAPTIVE_NP
|
||||
= PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ADAPTIVE_NP,
|
||||
PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
|
||||
= PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_NORMAL_NP,
|
||||
PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
|
||||
= PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_RECURSIVE_NP,
|
||||
PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
|
||||
= PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP,
|
||||
PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
|
||||
= PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP,
|
||||
PTHREAD_MUTEX_PRIO_PROTECT_NP = 64,
|
||||
PTHREAD_MUTEX_PP_NORMAL_NP
|
||||
= PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_NORMAL,
|
||||
PTHREAD_MUTEX_PP_RECURSIVE_NP
|
||||
= PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_RECURSIVE_NP,
|
||||
PTHREAD_MUTEX_PP_ERRORCHECK_NP
|
||||
= PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_ERRORCHECK_NP,
|
||||
PTHREAD_MUTEX_PP_ADAPTIVE_NP
|
||||
= PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_ADAPTIVE_NP
|
||||
};
|
||||
#define PTHREAD_MUTEX_PSHARED_BIT 128
|
||||
|
||||
#define PTHREAD_MUTEX_TYPE(m) \
|
||||
((m)->__data.__kind & 127)
|
||||
|
||||
#if LLL_PRIVATE == 0 && LLL_SHARED == 128
|
||||
# define PTHREAD_MUTEX_PSHARED(m) \
|
||||
((m)->__data.__kind & 128)
|
||||
#else
|
||||
# define PTHREAD_MUTEX_PSHARED(m) \
|
||||
(((m)->__data.__kind & 128) ? LLL_SHARED : LLL_PRIVATE)
|
||||
#endif
|
||||
|
||||
/* The kernel when waking robust mutexes on exit never uses
|
||||
FUTEX_PRIVATE_FLAG FUTEX_WAKE. */
|
||||
#define PTHREAD_ROBUST_MUTEX_PSHARED(m) LLL_SHARED
|
||||
|
||||
/* Ceiling in __data.__lock. __data.__lock is signed, so don't
|
||||
use the MSB bit in there, but in the mask also include that bit,
|
||||
so that the compiler can optimize & PTHREAD_MUTEX_PRIO_CEILING_MASK
|
||||
masking if the value is then shifted down by
|
||||
PTHREAD_MUTEX_PRIO_CEILING_SHIFT. */
|
||||
#define PTHREAD_MUTEX_PRIO_CEILING_SHIFT 19
|
||||
#define PTHREAD_MUTEX_PRIO_CEILING_MASK 0xfff80000
|
||||
|
||||
|
||||
/* Flags in mutex attr. */
|
||||
#define PTHREAD_MUTEXATTR_PROTOCOL_SHIFT 28
|
||||
#define PTHREAD_MUTEXATTR_PROTOCOL_MASK 0x30000000
|
||||
#define PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT 12
|
||||
#define PTHREAD_MUTEXATTR_PRIO_CEILING_MASK 0x00fff000
|
||||
#define PTHREAD_MUTEXATTR_FLAG_ROBUST 0x40000000
|
||||
#define PTHREAD_MUTEXATTR_FLAG_PSHARED 0x80000000
|
||||
#define PTHREAD_MUTEXATTR_FLAG_BITS \
|
||||
(PTHREAD_MUTEXATTR_FLAG_ROBUST | PTHREAD_MUTEXATTR_FLAG_PSHARED \
|
||||
| PTHREAD_MUTEXATTR_PROTOCOL_MASK | PTHREAD_MUTEXATTR_PRIO_CEILING_MASK)
|
||||
|
||||
|
||||
/* Check whether rwlock prefers readers. */
|
||||
#define PTHREAD_RWLOCK_PREFER_READER_P(rwlock) \
|
||||
((rwlock)->__data.__flags == 0)
|
||||
|
||||
|
||||
/* Bits used in robust mutex implementation. */
|
||||
#define FUTEX_WAITERS 0x80000000
|
||||
#define FUTEX_OWNER_DIED 0x40000000
|
||||
#define FUTEX_TID_MASK 0x3fffffff
|
||||
|
||||
|
||||
/* Internal variables. */
|
||||
|
||||
|
||||
@ -70,7 +164,7 @@ hidden_proto (__stack_user)
|
||||
|
||||
/* Attribute handling. */
|
||||
extern struct pthread_attr *__attr_list attribute_hidden;
|
||||
extern lll_lock_t __attr_list_lock attribute_hidden;
|
||||
extern int __attr_list_lock attribute_hidden;
|
||||
|
||||
/* First available RT signal. */
|
||||
extern int __current_sigrtmin attribute_hidden;
|
||||
@ -87,6 +181,19 @@ hidden_proto (__pthread_keys)
|
||||
/* Number of threads running. */
|
||||
extern unsigned int __nptl_nthreads attribute_hidden;
|
||||
|
||||
#ifndef __ASSUME_SET_ROBUST_LIST
|
||||
/* Negative if we do not have the system call and we can use it. */
|
||||
extern int __set_robust_list_avail attribute_hidden;
|
||||
#endif
|
||||
|
||||
/* Thread Priority Protection. */
|
||||
extern int __sched_fifo_min_prio attribute_hidden;
|
||||
extern int __sched_fifo_max_prio attribute_hidden;
|
||||
extern void __init_sched_fifo_prio (void) attribute_hidden;
|
||||
extern int __pthread_tpp_change_priority (int prev_prio, int new_prio)
|
||||
attribute_hidden;
|
||||
extern int __pthread_current_priority (void) attribute_hidden;
|
||||
|
||||
/* The library can run in debugging mode where it performs a lot more
|
||||
tests. */
|
||||
extern int __pthread_debug attribute_hidden;
|
||||
@ -108,8 +215,8 @@ extern int __pthread_debug attribute_hidden;
|
||||
/* Cancellation test. */
|
||||
#define CANCELLATION_P(self) \
|
||||
do { \
|
||||
int _cancelhandling = THREAD_GETMEM (self, cancelhandling); \
|
||||
if (CANCEL_ENABLED_AND_CANCELED (_cancelhandling)) \
|
||||
int cancelhandling = THREAD_GETMEM (self, cancelhandling); \
|
||||
if (CANCEL_ENABLED_AND_CANCELED (cancelhandling)) \
|
||||
{ \
|
||||
THREAD_SETMEM (self, result, PTHREAD_CANCELED); \
|
||||
__do_cancel (); \
|
||||
@ -140,6 +247,7 @@ hidden_proto (__pthread_register_cancel)
|
||||
hidden_proto (__pthread_unregister_cancel)
|
||||
# ifdef SHARED
|
||||
extern void attribute_hidden pthread_cancel_init (void);
|
||||
extern void __unwind_freeres (void);
|
||||
# endif
|
||||
#endif
|
||||
|
||||
@ -174,22 +282,22 @@ __do_cancel (void)
|
||||
# define LIBC_CANCEL_RESET(oldtype) \
|
||||
__libc_disable_asynccancel (oldtype)
|
||||
# define LIBC_CANCEL_HANDLED() \
|
||||
__asm (".globl " __USER_LABEL_PREFIX__ "__libc_enable_asynccancel"); \
|
||||
__asm (".globl " __USER_LABEL_PREFIX__ "__libc_disable_asynccancel")
|
||||
__asm__ (".globl " __USER_LABEL_PREFIX__ "__libc_enable_asynccancel"); \
|
||||
__asm__ (".globl " __USER_LABEL_PREFIX__ "__libc_disable_asynccancel")
|
||||
#elif defined NOT_IN_libc && defined IS_IN_libpthread
|
||||
# define LIBC_CANCEL_ASYNC() CANCEL_ASYNC ()
|
||||
# define LIBC_CANCEL_RESET(val) CANCEL_RESET (val)
|
||||
# define LIBC_CANCEL_HANDLED() \
|
||||
__asm (".globl " __USER_LABEL_PREFIX__ "__pthread_enable_asynccancel"); \
|
||||
__asm (".globl " __USER_LABEL_PREFIX__ "__pthread_disable_asynccancel")
|
||||
__asm__ (".globl " __USER_LABEL_PREFIX__ "__pthread_enable_asynccancel"); \
|
||||
__asm__ (".globl " __USER_LABEL_PREFIX__ "__pthread_disable_asynccancel")
|
||||
#elif defined NOT_IN_libc && defined IS_IN_librt
|
||||
# define LIBC_CANCEL_ASYNC() \
|
||||
__librt_enable_asynccancel ()
|
||||
# define LIBC_CANCEL_RESET(val) \
|
||||
__librt_disable_asynccancel (val)
|
||||
# define LIBC_CANCEL_HANDLED() \
|
||||
__asm (".globl " __USER_LABEL_PREFIX__ "__librt_enable_asynccancel"); \
|
||||
__asm (".globl " __USER_LABEL_PREFIX__ "__librt_disable_asynccancel")
|
||||
__asm__ (".globl " __USER_LABEL_PREFIX__ "__librt_enable_asynccancel"); \
|
||||
__asm__ (".globl " __USER_LABEL_PREFIX__ "__librt_disable_asynccancel")
|
||||
#else
|
||||
# define LIBC_CANCEL_ASYNC() 0 /* Just a dummy value. */
|
||||
# define LIBC_CANCEL_RESET(val) ((void)(val)) /* Nothing, but evaluate it. */
|
||||
@ -263,11 +371,13 @@ hidden_proto (__nptl_death_event)
|
||||
#ifdef TLS_MULTIPLE_THREADS_IN_TCB
|
||||
extern void __libc_pthread_init (unsigned long int *ptr,
|
||||
void (*reclaim) (void),
|
||||
const struct pthread_functions *functions);
|
||||
const struct pthread_functions *functions)
|
||||
internal_function;
|
||||
#else
|
||||
extern int *__libc_pthread_init (unsigned long int *ptr,
|
||||
void (*reclaim) (void),
|
||||
const struct pthread_functions *functions);
|
||||
const struct pthread_functions *functions)
|
||||
internal_function;
|
||||
|
||||
/* Variable set to a nonzero value if more than one thread runs or ran. */
|
||||
extern int __pthread_multiple_threads attribute_hidden;
|
||||
@ -307,6 +417,7 @@ extern int __pthread_mutex_lock (pthread_mutex_t *__mutex);
|
||||
extern int __pthread_mutex_lock_internal (pthread_mutex_t *__mutex)
|
||||
attribute_hidden;
|
||||
extern int __pthread_mutex_cond_lock (pthread_mutex_t *__mutex);
|
||||
extern void __pthread_mutex_cond_lock_adjust (pthread_mutex_t *__mutex);
|
||||
extern int __pthread_mutex_unlock (pthread_mutex_t *__mutex);
|
||||
extern int __pthread_mutex_unlock_internal (pthread_mutex_t *__mutex)
|
||||
attribute_hidden;
|
||||
@ -454,10 +565,25 @@ extern void __nptl_deallocate_tsd (void) attribute_hidden;
|
||||
|
||||
extern int __nptl_setxid (struct xid_command *cmdp) attribute_hidden;
|
||||
|
||||
extern void __free_stacks (size_t limit) attribute_hidden;
|
||||
|
||||
extern void __wait_lookup_done (void) attribute_hidden;
|
||||
|
||||
#ifdef SHARED
|
||||
# define PTHREAD_STATIC_FN_REQUIRE(name)
|
||||
#else
|
||||
# define PTHREAD_STATIC_FN_REQUIRE(name) __asm (".globl " #name);
|
||||
# define PTHREAD_STATIC_FN_REQUIRE(name) __asm__ (".globl " #name);
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef __NR_set_robust_list
|
||||
/* XXX For the time being... Once we can rely on the kernel headers
|
||||
having the definition remove these lines. */
|
||||
# if defined __i386__
|
||||
# define __NR_set_robust_list 311
|
||||
# elif defined __x86_64__
|
||||
# define __NR_set_robust_list 273
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#endif /* pthreadP.h */
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2006 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -38,7 +38,8 @@
|
||||
#include <fork.h>
|
||||
|
||||
/* This is defined by newer gcc version unique for each module. */
|
||||
extern void *__dso_handle __attribute__ ((__weak__));
|
||||
extern void *__dso_handle __attribute__ ((__weak__,
|
||||
__visibility__ ("hidden")));
|
||||
|
||||
|
||||
/* Hide the symbol so that no definition but the one locally in the
|
||||
@ -52,4 +53,4 @@ __pthread_atfork (
|
||||
return __register_atfork (prepare, parent, child,
|
||||
&__dso_handle == NULL ? NULL : __dso_handle);
|
||||
}
|
||||
strong_alias(__pthread_atfork, pthread_atfork)
|
||||
strong_alias (__pthread_atfork, pthread_atfork)
|
||||
|
@ -24,7 +24,8 @@
|
||||
#include "pthreadP.h"
|
||||
|
||||
int
|
||||
__pthread_attr_destroy (pthread_attr_t *attr)
|
||||
__pthread_attr_destroy (
|
||||
pthread_attr_t *attr)
|
||||
{
|
||||
struct pthread_attr *iattr;
|
||||
|
||||
|
@ -22,7 +22,9 @@
|
||||
|
||||
|
||||
int
|
||||
__pthread_attr_getdetachstate (const pthread_attr_t *attr, int *detachstate)
|
||||
__pthread_attr_getdetachstate (
|
||||
const pthread_attr_t *attr,
|
||||
int *detachstate)
|
||||
{
|
||||
struct pthread_attr *iattr;
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2004, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -25,11 +25,12 @@
|
||||
|
||||
|
||||
struct pthread_attr *__attr_list;
|
||||
lll_lock_t __attr_list_lock = LLL_LOCK_INITIALIZER;
|
||||
int __attr_list_lock = LLL_LOCK_INITIALIZER;
|
||||
|
||||
|
||||
int
|
||||
__pthread_attr_init_2_1 (pthread_attr_t *attr)
|
||||
__pthread_attr_init_2_1 (
|
||||
pthread_attr_t *attr)
|
||||
{
|
||||
struct pthread_attr *iattr;
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2004 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2004, 2009 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -31,6 +31,12 @@ __pthread_attr_setschedparam (
|
||||
assert (sizeof (*attr) >= sizeof (struct pthread_attr));
|
||||
struct pthread_attr *iattr = (struct pthread_attr *) attr;
|
||||
|
||||
int min = sched_get_priority_min (iattr->schedpolicy);
|
||||
int max = sched_get_priority_max (iattr->schedpolicy);
|
||||
if (min == -1 || max == -1
|
||||
|| param->sched_priority > max || param->sched_priority < min)
|
||||
return EINVAL;
|
||||
|
||||
/* Copy the new values. */
|
||||
memcpy (&iattr->schedparam, param, sizeof (struct sched_param));
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2006 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -23,21 +23,22 @@
|
||||
|
||||
|
||||
int
|
||||
pthread_barrier_destroy (pthread_barrier_t *barrier)
|
||||
pthread_barrier_destroy (
|
||||
pthread_barrier_t *barrier)
|
||||
{
|
||||
struct pthread_barrier *ibarrier;
|
||||
int result = EBUSY;
|
||||
|
||||
ibarrier = (struct pthread_barrier *) barrier;
|
||||
|
||||
lll_lock (ibarrier->lock);
|
||||
lll_lock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
|
||||
|
||||
if (__builtin_expect (ibarrier->left == ibarrier->init_count, 1))
|
||||
/* The barrier is not used anymore. */
|
||||
result = 0;
|
||||
else
|
||||
/* Still used, return with an error. */
|
||||
lll_unlock (ibarrier->lock);
|
||||
lll_unlock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -20,6 +20,13 @@
|
||||
#include <errno.h>
|
||||
#include "pthreadP.h"
|
||||
#include <lowlevellock.h>
|
||||
#include <bits/kernel-features.h>
|
||||
|
||||
|
||||
static const struct pthread_barrierattr default_attr =
|
||||
{
|
||||
.pshared = PTHREAD_PROCESS_PRIVATE
|
||||
};
|
||||
|
||||
|
||||
int
|
||||
@ -33,17 +40,15 @@ pthread_barrier_init (
|
||||
if (__builtin_expect (count == 0, 0))
|
||||
return EINVAL;
|
||||
|
||||
if (attr != NULL)
|
||||
{
|
||||
struct pthread_barrierattr *iattr;
|
||||
const struct pthread_barrierattr *iattr
|
||||
= (attr != NULL
|
||||
? iattr = (struct pthread_barrierattr *) attr
|
||||
: &default_attr);
|
||||
|
||||
iattr = (struct pthread_barrierattr *) attr;
|
||||
|
||||
if (iattr->pshared != PTHREAD_PROCESS_PRIVATE
|
||||
&& __builtin_expect (iattr->pshared != PTHREAD_PROCESS_SHARED, 0))
|
||||
/* Invalid attribute. */
|
||||
return EINVAL;
|
||||
}
|
||||
if (iattr->pshared != PTHREAD_PROCESS_PRIVATE
|
||||
&& __builtin_expect (iattr->pshared != PTHREAD_PROCESS_SHARED, 0))
|
||||
/* Invalid attribute. */
|
||||
return EINVAL;
|
||||
|
||||
ibarrier = (struct pthread_barrier *) barrier;
|
||||
|
||||
@ -53,5 +58,14 @@ pthread_barrier_init (
|
||||
ibarrier->init_count = count;
|
||||
ibarrier->curr_event = 0;
|
||||
|
||||
#ifdef __ASSUME_PRIVATE_FUTEX
|
||||
ibarrier->private = (iattr->pshared != PTHREAD_PROCESS_PRIVATE
|
||||
? 0 : FUTEX_PRIVATE_FLAG);
|
||||
#else
|
||||
ibarrier->private = (iattr->pshared != PTHREAD_PROCESS_PRIVATE
|
||||
? 0 : THREAD_GETMEM (THREAD_SELF,
|
||||
header.private_futex));
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2004, 2009 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -26,7 +26,8 @@
|
||||
|
||||
|
||||
int
|
||||
pthread_cancel (pthread_t th)
|
||||
pthread_cancel (
|
||||
pthread_t th)
|
||||
{
|
||||
volatile struct pthread *pd = (volatile struct pthread *) th;
|
||||
|
||||
@ -43,6 +44,7 @@ pthread_cancel (pthread_t th)
|
||||
int newval;
|
||||
do
|
||||
{
|
||||
again:
|
||||
oldval = pd->cancelhandling;
|
||||
newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
|
||||
|
||||
@ -58,7 +60,10 @@ pthread_cancel (pthread_t th)
|
||||
if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval))
|
||||
{
|
||||
/* Mark the cancellation as "in progress". */
|
||||
atomic_bit_set (&pd->cancelhandling, CANCELING_BIT);
|
||||
if (atomic_compare_and_exchange_bool_acq (&pd->cancelhandling,
|
||||
oldval | CANCELING_BITMASK,
|
||||
oldval))
|
||||
goto again;
|
||||
|
||||
/* The cancellation handler will take care of marking the
|
||||
thread as canceled. */
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2004, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -22,16 +22,20 @@
|
||||
|
||||
|
||||
int
|
||||
__pthread_cond_destroy (pthread_cond_t *cond)
|
||||
__pthread_cond_destroy (
|
||||
pthread_cond_t *cond)
|
||||
{
|
||||
int pshared = (cond->__data.__mutex == (void *) ~0l)
|
||||
? LLL_SHARED : LLL_PRIVATE;
|
||||
|
||||
/* Make sure we are alone. */
|
||||
lll_mutex_lock (cond->__data.__lock);
|
||||
lll_lock (cond->__data.__lock, pshared);
|
||||
|
||||
if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
|
||||
{
|
||||
/* If there are still some waiters which have not been
|
||||
woken up, this is an application bug. */
|
||||
lll_mutex_unlock (cond->__data.__lock);
|
||||
lll_unlock (cond->__data.__lock, pshared);
|
||||
return EBUSY;
|
||||
}
|
||||
|
||||
@ -42,15 +46,36 @@ __pthread_cond_destroy (pthread_cond_t *cond)
|
||||
broadcasted, but still are using the pthread_cond_t structure,
|
||||
pthread_cond_destroy needs to wait for them. */
|
||||
unsigned int nwaiters = cond->__data.__nwaiters;
|
||||
while (nwaiters >= (1 << COND_CLOCK_BITS))
|
||||
|
||||
if (nwaiters >= (1 << COND_NWAITERS_SHIFT))
|
||||
{
|
||||
lll_mutex_unlock (cond->__data.__lock);
|
||||
/* Wake everybody on the associated mutex in case there are
|
||||
threads that have been requeued to it.
|
||||
Without this, pthread_cond_destroy could block potentially
|
||||
for a long time or forever, as it would depend on other
|
||||
thread's using the mutex.
|
||||
When all threads waiting on the mutex are woken up, pthread_cond_wait
|
||||
only waits for threads to acquire and release the internal
|
||||
condvar lock. */
|
||||
if (cond->__data.__mutex != NULL
|
||||
&& cond->__data.__mutex != (void *) ~0l)
|
||||
{
|
||||
pthread_mutex_t *mut = (pthread_mutex_t *) cond->__data.__mutex;
|
||||
lll_futex_wake (&mut->__data.__lock, INT_MAX,
|
||||
PTHREAD_MUTEX_PSHARED (mut));
|
||||
}
|
||||
|
||||
lll_futex_wait (&cond->__data.__nwaiters, nwaiters);
|
||||
do
|
||||
{
|
||||
lll_unlock (cond->__data.__lock, pshared);
|
||||
|
||||
lll_mutex_lock (cond->__data.__lock);
|
||||
lll_futex_wait (&cond->__data.__nwaiters, nwaiters, pshared);
|
||||
|
||||
nwaiters = cond->__data.__nwaiters;
|
||||
lll_lock (cond->__data.__lock, pshared);
|
||||
|
||||
nwaiters = cond->__data.__nwaiters;
|
||||
}
|
||||
while (nwaiters >= (1 << COND_NWAITERS_SHIFT));
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1,4 +1,5 @@
|
||||
/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2004, 2005, 2007, 2008
|
||||
Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -27,11 +28,12 @@ __pthread_cond_init (
|
||||
{
|
||||
struct pthread_condattr *icond_attr = (struct pthread_condattr *) cond_attr;
|
||||
|
||||
cond->__data.__lock = LLL_MUTEX_LOCK_INITIALIZER;
|
||||
cond->__data.__lock = LLL_LOCK_INITIALIZER;
|
||||
cond->__data.__futex = 0;
|
||||
cond->__data.__nwaiters = (icond_attr != NULL
|
||||
&& ((icond_attr->value & (COND_CLOCK_BITS << 1))
|
||||
>> 1));
|
||||
? ((icond_attr->value >> 1)
|
||||
& ((1 << COND_NWAITERS_SHIFT) - 1))
|
||||
: CLOCK_REALTIME);
|
||||
cond->__data.__total_seq = 0;
|
||||
cond->__data.__wakeup_seq = 0;
|
||||
cond->__data.__woken_seq = 0;
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2003.
|
||||
|
||||
@ -26,6 +26,6 @@ pthread_condattr_getclock (
|
||||
clockid_t *clock_id)
|
||||
{
|
||||
*clock_id = (((((const struct pthread_condattr *) attr)->value) >> 1)
|
||||
& ((1 << COND_CLOCK_BITS) - 1));
|
||||
& ((1 << COND_NWAITERS_SHIFT) - 1));
|
||||
return 0;
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2003, 2004, 2007, 2008 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2003.
|
||||
|
||||
@ -62,11 +62,12 @@ pthread_condattr_setclock (
|
||||
return EINVAL;
|
||||
|
||||
/* Make sure the value fits in the bits we reserved. */
|
||||
assert (clock_id < (1 << COND_CLOCK_BITS));
|
||||
assert (clock_id < (1 << COND_NWAITERS_SHIFT));
|
||||
|
||||
int *valuep = &((struct pthread_condattr *) attr)->value;
|
||||
|
||||
*valuep = (*valuep & ~(1 << (COND_CLOCK_BITS + 1)) & ~1) | (clock_id << 1);
|
||||
*valuep = ((*valuep & ~(((1 << COND_NWAITERS_SHIFT) - 1) << 1))
|
||||
| (clock_id << 1));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002-2007,2008,2009 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -27,6 +27,7 @@
|
||||
#include <atomic.h>
|
||||
#include <libc-internal.h>
|
||||
#include <resolv.h>
|
||||
#include <bits/kernel-features.h>
|
||||
|
||||
|
||||
/* Local function to start thread and handle cleanup. */
|
||||
@ -37,10 +38,10 @@ static int start_thread (void *arg);
|
||||
int __pthread_debug;
|
||||
|
||||
/* Globally enabled events. */
|
||||
static td_thr_events_t __nptl_threads_events;
|
||||
static td_thr_events_t __nptl_threads_events __attribute_used__;
|
||||
|
||||
/* Pointer to descriptor with the last event. */
|
||||
static struct pthread *__nptl_last_event;
|
||||
static struct pthread *__nptl_last_event __attribute_used__;
|
||||
|
||||
/* Number of threads running. */
|
||||
unsigned int __nptl_nthreads = 1;
|
||||
@ -50,17 +51,18 @@ unsigned int __nptl_nthreads = 1;
|
||||
#include "allocatestack.c"
|
||||
|
||||
/* Code to create the thread. */
|
||||
#include "createthread.c"
|
||||
#include <createthread.c>
|
||||
|
||||
|
||||
struct pthread *
|
||||
internal_function
|
||||
__find_in_stack_list (struct pthread *pd)
|
||||
__find_in_stack_list (
|
||||
struct pthread *pd)
|
||||
{
|
||||
list_t *entry;
|
||||
struct pthread *result = NULL;
|
||||
|
||||
lll_lock (stack_cache_lock);
|
||||
lll_lock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
list_for_each (entry, &stack_used)
|
||||
{
|
||||
@ -87,7 +89,7 @@ __find_in_stack_list (struct pthread *pd)
|
||||
}
|
||||
}
|
||||
|
||||
lll_unlock (stack_cache_lock);
|
||||
lll_unlock (stack_cache_lock, LLL_PRIVATE);
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -203,6 +205,15 @@ __free_tcb (struct pthread *pd)
|
||||
running thread is gone. */
|
||||
abort ();
|
||||
|
||||
/* Free TPP data. */
|
||||
if (__builtin_expect (pd->tpp != NULL, 0))
|
||||
{
|
||||
struct priority_protection_data *tpp = pd->tpp;
|
||||
|
||||
pd->tpp = NULL;
|
||||
free (tpp);
|
||||
}
|
||||
|
||||
/* Queue the stack memory block for reuse and exit the process. The
|
||||
kernel will signal via writing to the address returned by
|
||||
QUEUE-STACK when the stack is available. */
|
||||
@ -226,6 +237,32 @@ start_thread (void *arg)
|
||||
/* Initialize resolver state pointer. */
|
||||
__resp = &pd->res;
|
||||
|
||||
#ifdef __NR_set_robust_list
|
||||
# ifndef __ASSUME_SET_ROBUST_LIST
|
||||
if (__set_robust_list_avail >= 0)
|
||||
# endif
|
||||
{
|
||||
INTERNAL_SYSCALL_DECL (err);
|
||||
/* This call should never fail because the initial call in init.c
|
||||
succeeded. */
|
||||
INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
|
||||
sizeof (struct robust_list_head));
|
||||
}
|
||||
#endif
|
||||
|
||||
/* If the parent was running cancellation handlers while creating
|
||||
the thread the new thread inherited the signal mask. Reset the
|
||||
cancellation signal mask. */
|
||||
if (__builtin_expect (pd->parent_cancelhandling & CANCELING_BITMASK, 0))
|
||||
{
|
||||
INTERNAL_SYSCALL_DECL (err);
|
||||
sigset_t mask;
|
||||
__sigemptyset (&mask);
|
||||
__sigaddset (&mask, SIGCANCEL);
|
||||
(void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &mask,
|
||||
NULL, _NSIG / 8);
|
||||
}
|
||||
|
||||
/* This is where the try/finally block should be created. For
|
||||
compilers without that support we do use setjmp. */
|
||||
struct pthread_unwind_buf unwind_buf;
|
||||
@ -246,9 +283,9 @@ start_thread (void *arg)
|
||||
int oldtype = CANCEL_ASYNC ();
|
||||
|
||||
/* Get the lock the parent locked to force synchronization. */
|
||||
lll_lock (pd->lock);
|
||||
lll_lock (pd->lock, LLL_PRIVATE);
|
||||
/* And give it up right away. */
|
||||
lll_unlock (pd->lock);
|
||||
lll_unlock (pd->lock, LLL_PRIVATE);
|
||||
|
||||
CANCEL_RESET (oldtype);
|
||||
}
|
||||
@ -264,6 +301,9 @@ start_thread (void *arg)
|
||||
/* Run the destructor for the thread-local data. */
|
||||
__nptl_deallocate_tsd ();
|
||||
|
||||
/* Clean up any state libc stored in thread-local variables. */
|
||||
__libc_thread_freeres ();
|
||||
|
||||
/* If this is the last thread we terminate the process now. We
|
||||
do not notify the debugger, it might just irritate it if there
|
||||
is no thread left. */
|
||||
@ -304,10 +344,65 @@ start_thread (void *arg)
|
||||
the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
|
||||
atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
|
||||
|
||||
#ifndef __ASSUME_SET_ROBUST_LIST
|
||||
/* If this thread has any robust mutexes locked, handle them now. */
|
||||
# if __WORDSIZE == 64
|
||||
void *robust = pd->robust_head.list;
|
||||
# else
|
||||
__pthread_slist_t *robust = pd->robust_list.__next;
|
||||
# endif
|
||||
/* We let the kernel do the notification if it is able to do so.
|
||||
If we have to do it here there for sure are no PI mutexes involved
|
||||
since the kernel support for them is even more recent. */
|
||||
if (__set_robust_list_avail < 0
|
||||
&& __builtin_expect (robust != (void *) &pd->robust_head, 0))
|
||||
{
|
||||
do
|
||||
{
|
||||
struct __pthread_mutex_s *this = (struct __pthread_mutex_s *)
|
||||
((char *) robust - offsetof (struct __pthread_mutex_s,
|
||||
__list.__next));
|
||||
robust = *((void **) robust);
|
||||
|
||||
# ifdef __PTHREAD_MUTEX_HAVE_PREV
|
||||
this->__list.__prev = NULL;
|
||||
# endif
|
||||
this->__list.__next = NULL;
|
||||
|
||||
lll_robust_dead (this->__lock, /* XYZ */ LLL_SHARED);
|
||||
}
|
||||
while (robust != (void *) &pd->robust_head);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Mark the memory of the stack as usable to the kernel. We free
|
||||
everything except for the space used for the TCB itself. */
|
||||
size_t pagesize_m1 = __getpagesize () - 1;
|
||||
#ifdef _STACK_GROWS_DOWN
|
||||
char *sp = CURRENT_STACK_FRAME;
|
||||
size_t freesize = (sp - (char *) pd->stackblock) & ~pagesize_m1;
|
||||
#else
|
||||
# error "to do"
|
||||
#endif
|
||||
assert (freesize < pd->stackblock_size);
|
||||
if (freesize > PTHREAD_STACK_MIN)
|
||||
madvise (pd->stackblock, freesize - PTHREAD_STACK_MIN, MADV_DONTNEED);
|
||||
|
||||
/* If the thread is detached free the TCB. */
|
||||
if (IS_DETACHED (pd))
|
||||
/* Free the TCB. */
|
||||
__free_tcb (pd);
|
||||
else if (__builtin_expect (pd->cancelhandling & SETXID_BITMASK, 0))
|
||||
{
|
||||
/* Some other thread might call any of the setXid functions and expect
|
||||
us to reply. In this case wait until we did that. */
|
||||
do
|
||||
lll_futex_wait (&pd->setxid_futex, 0, LLL_PRIVATE);
|
||||
while (pd->cancelhandling & SETXID_BITMASK);
|
||||
|
||||
/* Reset the value so that the stack can be reused. */
|
||||
pd->setxid_futex = 0;
|
||||
}
|
||||
|
||||
/* We cannot call '_exit' here. '_exit' will terminate the process.
|
||||
|
||||
@ -348,7 +443,7 @@ __pthread_create_2_1 (
|
||||
accessing far-away memory. */
|
||||
iattr = &default_attr;
|
||||
|
||||
struct pthread *pd = 0;
|
||||
struct pthread *pd = NULL;
|
||||
int err = ALLOCATE_STACK (iattr, &pd);
|
||||
if (__builtin_expect (err != 0, 0))
|
||||
/* Something went wrong. Maybe a parameter of the attributes is
|
||||
@ -398,6 +493,11 @@ __pthread_create_2_1 (
|
||||
THREAD_COPY_STACK_GUARD (pd);
|
||||
#endif
|
||||
|
||||
/* Copy the pointer guard value. */
|
||||
#ifdef THREAD_COPY_POINTER_GUARD
|
||||
THREAD_COPY_POINTER_GUARD (pd);
|
||||
#endif
|
||||
|
||||
/* Determine scheduling parameters for the thread. */
|
||||
if (attr != NULL
|
||||
&& __builtin_expect ((iattr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0, 0)
|
||||
@ -468,12 +568,14 @@ weak_alias(__pthread_create_2_1, pthread_create)
|
||||
/* If pthread_create is present, libgcc_eh.a and libsupc++.a expects some other POSIX thread
|
||||
functions to be present as well. */
|
||||
PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_lock)
|
||||
PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_trylock)
|
||||
PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_unlock)
|
||||
|
||||
PTHREAD_STATIC_FN_REQUIRE (pthread_once)
|
||||
PTHREAD_STATIC_FN_REQUIRE (pthread_cancel)
|
||||
|
||||
PTHREAD_STATIC_FN_REQUIRE (pthread_key_create)
|
||||
PTHREAD_STATIC_FN_REQUIRE (pthread_key_delete)
|
||||
PTHREAD_STATIC_FN_REQUIRE (pthread_setspecific)
|
||||
PTHREAD_STATIC_FN_REQUIRE (pthread_getspecific)
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -39,7 +39,7 @@ pthread_getattr_np (
|
||||
struct pthread_attr *iattr = (struct pthread_attr *) attr;
|
||||
int ret = 0;
|
||||
|
||||
lll_lock (thread->lock);
|
||||
lll_lock (thread->lock, LLL_PRIVATE);
|
||||
|
||||
/* The thread library is responsible for keeping the values in the
|
||||
thread desriptor up-to-date in case the user changes them. */
|
||||
@ -79,51 +79,55 @@ pthread_getattr_np (
|
||||
if (fp == NULL)
|
||||
ret = errno;
|
||||
/* We need the limit of the stack in any case. */
|
||||
else if (getrlimit (RLIMIT_STACK, &rl) != 0)
|
||||
ret = errno;
|
||||
else
|
||||
{
|
||||
/* We need no locking. */
|
||||
__fsetlocking (fp, FSETLOCKING_BYCALLER);
|
||||
|
||||
/* Until we found an entry (which should always be the case)
|
||||
mark the result as a failure. */
|
||||
ret = ENOENT;
|
||||
|
||||
char *line = NULL;
|
||||
size_t linelen = 0;
|
||||
uintptr_t last_to = 0;
|
||||
|
||||
while (! feof_unlocked (fp))
|
||||
if (getrlimit (RLIMIT_STACK, &rl) != 0)
|
||||
ret = errno;
|
||||
else
|
||||
{
|
||||
if (getdelim (&line, &linelen, '\n', fp) <= 0)
|
||||
break;
|
||||
/* We need no locking. */
|
||||
__fsetlocking (fp, FSETLOCKING_BYCALLER);
|
||||
|
||||
uintptr_t from;
|
||||
uintptr_t to;
|
||||
if (sscanf (line, "%" SCNxPTR "-%" SCNxPTR, &from, &to) != 2)
|
||||
continue;
|
||||
if (from <= (uintptr_t) __libc_stack_end
|
||||
&& (uintptr_t) __libc_stack_end < to)
|
||||
/* Until we found an entry (which should always be the case)
|
||||
mark the result as a failure. */
|
||||
ret = ENOENT;
|
||||
|
||||
char *line = NULL;
|
||||
size_t linelen = 0;
|
||||
uintptr_t last_to = 0;
|
||||
|
||||
while (! feof_unlocked (fp))
|
||||
{
|
||||
/* Found the entry. Now we have the info we need. */
|
||||
iattr->stacksize = rl.rlim_cur;
|
||||
iattr->stackaddr = (void *) to;
|
||||
if (__getdelim (&line, &linelen, '\n', fp) <= 0)
|
||||
break;
|
||||
|
||||
/* The limit might be too high. */
|
||||
if ((size_t) iattr->stacksize
|
||||
> (size_t) iattr->stackaddr - last_to)
|
||||
iattr->stacksize = (size_t) iattr->stackaddr - last_to;
|
||||
uintptr_t from;
|
||||
uintptr_t to;
|
||||
if (sscanf (line, "%" SCNxPTR "-%" SCNxPTR, &from, &to) != 2)
|
||||
continue;
|
||||
if (from <= (uintptr_t) __libc_stack_end
|
||||
&& (uintptr_t) __libc_stack_end < to)
|
||||
{
|
||||
/* Found the entry. Now we have the info we need. */
|
||||
iattr->stacksize = rl.rlim_cur;
|
||||
iattr->stackaddr = (void *) to;
|
||||
|
||||
/* We succeed and no need to look further. */
|
||||
ret = 0;
|
||||
break;
|
||||
/* The limit might be too high. */
|
||||
if ((size_t) iattr->stacksize
|
||||
> (size_t) iattr->stackaddr - last_to)
|
||||
iattr->stacksize = (size_t) iattr->stackaddr - last_to;
|
||||
|
||||
/* We succeed and no need to look further. */
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
last_to = to;
|
||||
}
|
||||
last_to = to;
|
||||
|
||||
free (line);
|
||||
}
|
||||
|
||||
fclose (fp);
|
||||
free (line);
|
||||
}
|
||||
}
|
||||
|
||||
@ -160,12 +164,16 @@ pthread_getattr_np (
|
||||
{
|
||||
free (cpuset);
|
||||
if (ret == ENOSYS)
|
||||
/* There is no such functionality. */
|
||||
ret = 0;
|
||||
{
|
||||
/* There is no such functionality. */
|
||||
ret = 0;
|
||||
iattr->cpuset = NULL;
|
||||
iattr->cpusetsize = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lll_unlock (thread->lock);
|
||||
lll_unlock (thread->lock, LLL_PRIVATE);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2004, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -25,9 +25,9 @@
|
||||
|
||||
int
|
||||
__pthread_getschedparam (
|
||||
pthread_t threadid,
|
||||
int *policy,
|
||||
struct sched_param *param)
|
||||
pthread_t threadid,
|
||||
int *policy,
|
||||
struct sched_param *param)
|
||||
{
|
||||
struct pthread *pd = (struct pthread *) threadid;
|
||||
|
||||
@ -38,7 +38,7 @@ __pthread_getschedparam (
|
||||
|
||||
int result = 0;
|
||||
|
||||
lll_lock (pd->lock);
|
||||
lll_lock (pd->lock, LLL_PRIVATE);
|
||||
|
||||
/* The library is responsible for maintaining the values at all
|
||||
times. If the user uses a interface other than
|
||||
@ -68,7 +68,7 @@ __pthread_getschedparam (
|
||||
memcpy (param, &pd->schedparam, sizeof (struct sched_param));
|
||||
}
|
||||
|
||||
lll_unlock (pd->lock);
|
||||
lll_unlock (pd->lock, LLL_PRIVATE);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2005, 2006 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -20,21 +20,26 @@
|
||||
#include <errno.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "atomic.h"
|
||||
#include <atomic.h>
|
||||
#include "pthreadP.h"
|
||||
|
||||
|
||||
static void
|
||||
cleanup (void *arg)
|
||||
{
|
||||
*(void **) arg = NULL;
|
||||
/* If we already changed the waiter ID, reset it. The call cannot
|
||||
fail for any reason but the thread not having done that yet so
|
||||
there is no reason for a loop. */
|
||||
(void) atomic_compare_and_exchange_bool_acq ((struct pthread **) arg, NULL,
|
||||
THREAD_SELF);
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
pthread_join (pthread_t threadid, void **thread_return)
|
||||
pthread_join (
|
||||
pthread_t threadid,
|
||||
void **thread_return)
|
||||
{
|
||||
struct pthread *self;
|
||||
struct pthread *pd = (struct pthread *) threadid;
|
||||
|
||||
/* Make sure the descriptor is valid. */
|
||||
@ -47,12 +52,23 @@ pthread_join (pthread_t threadid, void **thread_return)
|
||||
/* We cannot wait for the thread. */
|
||||
return EINVAL;
|
||||
|
||||
self = THREAD_SELF;
|
||||
if (pd == self
|
||||
|| (self->joinid == pd
|
||||
&& (pd->cancelhandling
|
||||
& (CANCELING_BITMASK | CANCELED_BITMASK | EXITING_BITMASK
|
||||
| TERMINATED_BITMASK)) == 0))
|
||||
struct pthread *self = THREAD_SELF;
|
||||
int result = 0;
|
||||
|
||||
/* During the wait we change to asynchronous cancellation. If we
|
||||
are canceled the thread we are waiting for must be marked as
|
||||
un-wait-ed for again. */
|
||||
pthread_cleanup_push (cleanup, &pd->joinid);
|
||||
|
||||
/* Switch to asynchronous cancellation. */
|
||||
int oldtype = CANCEL_ASYNC ();
|
||||
|
||||
if ((pd == self
|
||||
|| (self->joinid == pd
|
||||
&& (pd->cancelhandling
|
||||
& (CANCELING_BITMASK | CANCELED_BITMASK | EXITING_BITMASK
|
||||
| TERMINATED_BITMASK)) == 0))
|
||||
&& !CANCEL_ENABLED_AND_CANCELED (self->cancelhandling))
|
||||
/* This is a deadlock situation. The threads are waiting for each
|
||||
other to finish. Note that this is a "may" error. To be 100%
|
||||
sure we catch this error we would have to lock the data
|
||||
@ -60,28 +76,17 @@ pthread_join (pthread_t threadid, void **thread_return)
|
||||
two threads are really caught in this situation they will
|
||||
deadlock. It is the programmer's problem to figure this
|
||||
out. */
|
||||
return EDEADLK;
|
||||
|
||||
result = EDEADLK;
|
||||
/* Wait for the thread to finish. If it is already locked something
|
||||
is wrong. There can only be one waiter. */
|
||||
if (__builtin_expect (atomic_compare_and_exchange_bool_acq (&pd->joinid,
|
||||
self,
|
||||
NULL), 0))
|
||||
else if (__builtin_expect (atomic_compare_and_exchange_bool_acq (&pd->joinid,
|
||||
self,
|
||||
NULL), 0))
|
||||
/* There is already somebody waiting for the thread. */
|
||||
return EINVAL;
|
||||
|
||||
|
||||
/* During the wait we change to asynchronous cancellation. If we
|
||||
are cancelled the thread we are waiting for must be marked as
|
||||
un-wait-ed for again. */
|
||||
pthread_cleanup_push (cleanup, &pd->joinid);
|
||||
|
||||
/* Switch to asynchronous cancellation. */
|
||||
int oldtype = CANCEL_ASYNC ();
|
||||
|
||||
|
||||
/* Wait for the child. */
|
||||
lll_wait_tid (pd->tid);
|
||||
result = EINVAL;
|
||||
else
|
||||
/* Wait for the child. */
|
||||
lll_wait_tid (pd->tid);
|
||||
|
||||
|
||||
/* Restore cancellation mode. */
|
||||
@ -91,16 +96,19 @@ pthread_join (pthread_t threadid, void **thread_return)
|
||||
pthread_cleanup_pop (0);
|
||||
|
||||
|
||||
/* We mark the thread as terminated and as joined. */
|
||||
pd->tid = -1;
|
||||
if (__builtin_expect (result == 0, 1))
|
||||
{
|
||||
/* We mark the thread as terminated and as joined. */
|
||||
pd->tid = -1;
|
||||
|
||||
/* Store the return value if the caller is interested. */
|
||||
if (thread_return != NULL)
|
||||
*thread_return = pd->result;
|
||||
/* Store the return value if the caller is interested. */
|
||||
if (thread_return != NULL)
|
||||
*thread_return = pd->result;
|
||||
|
||||
|
||||
/* Free the TCB. */
|
||||
__free_tcb (pd);
|
||||
/* Free the TCB. */
|
||||
__free_tcb (pd);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return result;
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2006 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -19,45 +19,36 @@
|
||||
|
||||
#include <errno.h>
|
||||
#include "pthreadP.h"
|
||||
#include <atomic.h>
|
||||
|
||||
|
||||
/* Internal mutex for __pthread_keys table handling. */
|
||||
lll_lock_t __pthread_keys_lock = LLL_LOCK_INITIALIZER;
|
||||
|
||||
int
|
||||
__pthread_key_create (
|
||||
pthread_key_t *key,
|
||||
void (*destr) (void *))
|
||||
{
|
||||
int result = EAGAIN;
|
||||
size_t cnt;
|
||||
|
||||
lll_lock (__pthread_keys_lock);
|
||||
|
||||
/* Find a slot in __pthread_kyes which is unused. */
|
||||
for (cnt = 0; cnt < PTHREAD_KEYS_MAX; ++cnt)
|
||||
if (KEY_UNUSED (__pthread_keys[cnt].seq)
|
||||
&& KEY_USABLE (__pthread_keys[cnt].seq))
|
||||
{
|
||||
/* We found an unused slot. */
|
||||
++__pthread_keys[cnt].seq;
|
||||
for (size_t cnt = 0; cnt < PTHREAD_KEYS_MAX; ++cnt)
|
||||
{
|
||||
uintptr_t seq = __pthread_keys[cnt].seq;
|
||||
|
||||
/* Remember the destructor. */
|
||||
__pthread_keys[cnt].destr = destr;
|
||||
if (KEY_UNUSED (seq) && KEY_USABLE (seq)
|
||||
/* We found an unused slot. Try to allocate it. */
|
||||
&& ! atomic_compare_and_exchange_bool_acq (&__pthread_keys[cnt].seq,
|
||||
seq + 1, seq))
|
||||
{
|
||||
/* Remember the destructor. */
|
||||
__pthread_keys[cnt].destr = destr;
|
||||
|
||||
/* Return the key to the caller. */
|
||||
*key = cnt;
|
||||
/* Return the key to the caller. */
|
||||
*key = cnt;
|
||||
|
||||
/* The call succeeded. */
|
||||
result = 0;
|
||||
/* The call succeeded. */
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* We found a key and can stop now. */
|
||||
break;
|
||||
}
|
||||
|
||||
lll_unlock (__pthread_keys_lock);
|
||||
|
||||
return result;
|
||||
return EAGAIN;
|
||||
}
|
||||
strong_alias (__pthread_key_create, pthread_key_create)
|
||||
strong_alias (__pthread_key_create, __pthread_key_create_internal)
|
||||
|
37
libpthread/nptl/pthread_mutex_consistent.c
Normal file
37
libpthread/nptl/pthread_mutex_consistent.c
Normal file
@ -0,0 +1,37 @@
|
||||
/* Copyright (C) 2005, 2006, 2010 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2005.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; if not, write to the Free
|
||||
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <errno.h>
|
||||
#include <pthreadP.h>
|
||||
|
||||
|
||||
int
|
||||
pthread_mutex_consistent (
|
||||
pthread_mutex_t *mutex)
|
||||
{
|
||||
/* Test whether this is a robust mutex with a dead owner. */
|
||||
if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0
|
||||
|| mutex->__data.__owner != PTHREAD_MUTEX_INCONSISTENT)
|
||||
return EINVAL;
|
||||
|
||||
mutex->__data.__owner = THREAD_GETMEM (THREAD_SELF, tid);
|
||||
|
||||
return 0;
|
||||
}
|
||||
weak_alias (pthread_mutex_consistent, pthread_mutex_consistent_np)
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2005, 2006 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -22,11 +22,17 @@
|
||||
|
||||
|
||||
int
|
||||
__pthread_mutex_destroy (pthread_mutex_t *mutex)
|
||||
__pthread_mutex_destroy (
|
||||
pthread_mutex_t *mutex)
|
||||
{
|
||||
if (mutex->__data.__nusers != 0)
|
||||
if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0
|
||||
&& mutex->__data.__nusers != 0)
|
||||
return EBUSY;
|
||||
|
||||
/* Set to an invalid value. */
|
||||
mutex->__data.__kind = -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
strong_alias (__pthread_mutex_destroy, pthread_mutex_destroy)
|
||||
INTDEF(__pthread_mutex_destroy)
|
||||
|
38
libpthread/nptl/pthread_mutex_getprioceiling.c
Normal file
38
libpthread/nptl/pthread_mutex_getprioceiling.c
Normal file
@ -0,0 +1,38 @@
|
||||
/* Get current priority ceiling of pthread_mutex_t.
|
||||
Copyright (C) 2006 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Jakub Jelinek <jakub@redhat.com>, 2006.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; if not, write to the Free
|
||||
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <errno.h>
|
||||
#include <pthreadP.h>
|
||||
|
||||
|
||||
int
|
||||
pthread_mutex_getprioceiling (mutex, prioceiling)
|
||||
const pthread_mutex_t *mutex;
|
||||
int *prioceiling;
|
||||
{
|
||||
if (__builtin_expect ((mutex->__data.__kind
|
||||
& PTHREAD_MUTEX_PRIO_PROTECT_NP) == 0, 0))
|
||||
return EINVAL;
|
||||
|
||||
*prioceiling = (mutex->__data.__lock & PTHREAD_MUTEX_PRIO_CEILING_MASK)
|
||||
>> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
|
||||
|
||||
return 0;
|
||||
}
|
@ -1,4 +1,5 @@
|
||||
/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007
|
||||
Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -18,10 +19,11 @@
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <assert.h>
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
#include <bits/kernel-features.h>
|
||||
#include "pthreadP.h"
|
||||
|
||||
|
||||
static const struct pthread_mutexattr default_attr =
|
||||
{
|
||||
/* Default is a normal mutex, not shared between processes. */
|
||||
@ -29,6 +31,11 @@ static const struct pthread_mutexattr default_attr =
|
||||
};
|
||||
|
||||
|
||||
#ifndef __ASSUME_FUTEX_LOCK_PI
|
||||
static int tpi_supported;
|
||||
#endif
|
||||
|
||||
|
||||
int
|
||||
__pthread_mutex_init (
|
||||
pthread_mutex_t *mutex,
|
||||
@ -40,18 +47,95 @@ __pthread_mutex_init (
|
||||
|
||||
imutexattr = (const struct pthread_mutexattr *) mutexattr ?: &default_attr;
|
||||
|
||||
/* Sanity checks. */
|
||||
switch (__builtin_expect (imutexattr->mutexkind
|
||||
& PTHREAD_MUTEXATTR_PROTOCOL_MASK,
|
||||
PTHREAD_PRIO_NONE
|
||||
<< PTHREAD_MUTEXATTR_PROTOCOL_SHIFT))
|
||||
{
|
||||
case PTHREAD_PRIO_NONE << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
|
||||
break;
|
||||
|
||||
case PTHREAD_PRIO_INHERIT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
|
||||
#ifndef __ASSUME_FUTEX_LOCK_PI
|
||||
if (__builtin_expect (tpi_supported == 0, 0))
|
||||
{
|
||||
int lock = 0;
|
||||
INTERNAL_SYSCALL_DECL (err);
|
||||
int ret = INTERNAL_SYSCALL (futex, err, 4, &lock, FUTEX_UNLOCK_PI,
|
||||
0, 0);
|
||||
assert (INTERNAL_SYSCALL_ERROR_P (ret, err));
|
||||
tpi_supported = INTERNAL_SYSCALL_ERRNO (ret, err) == ENOSYS ? -1 : 1;
|
||||
}
|
||||
if (__builtin_expect (tpi_supported < 0, 0))
|
||||
return ENOTSUP;
|
||||
#endif
|
||||
break;
|
||||
|
||||
default:
|
||||
/* XXX: For now we don't support robust priority protected mutexes. */
|
||||
if (imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST)
|
||||
return ENOTSUP;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Clear the whole variable. */
|
||||
memset (mutex, '\0', __SIZEOF_PTHREAD_MUTEX_T);
|
||||
|
||||
/* Copy the values from the attribute. */
|
||||
mutex->__data.__kind = imutexattr->mutexkind & ~0x80000000;
|
||||
mutex->__data.__kind = imutexattr->mutexkind & ~PTHREAD_MUTEXATTR_FLAG_BITS;
|
||||
|
||||
if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST) != 0)
|
||||
{
|
||||
#ifndef __ASSUME_SET_ROBUST_LIST
|
||||
if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_PSHARED) != 0
|
||||
&& __set_robust_list_avail < 0)
|
||||
return ENOTSUP;
|
||||
#endif
|
||||
|
||||
mutex->__data.__kind |= PTHREAD_MUTEX_ROBUST_NORMAL_NP;
|
||||
}
|
||||
|
||||
switch (imutexattr->mutexkind & PTHREAD_MUTEXATTR_PROTOCOL_MASK)
|
||||
{
|
||||
case PTHREAD_PRIO_INHERIT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
|
||||
mutex->__data.__kind |= PTHREAD_MUTEX_PRIO_INHERIT_NP;
|
||||
break;
|
||||
|
||||
case PTHREAD_PRIO_PROTECT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
|
||||
mutex->__data.__kind |= PTHREAD_MUTEX_PRIO_PROTECT_NP;
|
||||
|
||||
int ceiling = (imutexattr->mutexkind
|
||||
& PTHREAD_MUTEXATTR_PRIO_CEILING_MASK)
|
||||
>> PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT;
|
||||
if (! ceiling)
|
||||
{
|
||||
if (__sched_fifo_min_prio == -1)
|
||||
__init_sched_fifo_prio ();
|
||||
if (ceiling < __sched_fifo_min_prio)
|
||||
ceiling = __sched_fifo_min_prio;
|
||||
}
|
||||
mutex->__data.__lock = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/* The kernel when waking robust mutexes on exit never uses
|
||||
FUTEX_PRIVATE_FLAG FUTEX_WAKE. */
|
||||
if ((imutexattr->mutexkind & (PTHREAD_MUTEXATTR_FLAG_PSHARED
|
||||
| PTHREAD_MUTEXATTR_FLAG_ROBUST)) != 0)
|
||||
mutex->__data.__kind |= PTHREAD_MUTEX_PSHARED_BIT;
|
||||
|
||||
/* Default values: mutex not used yet. */
|
||||
// mutex->__count = 0; already done by memset
|
||||
// mutex->__owner = 0; already done by memset
|
||||
// mutex->__nusers = 0; already done by memset
|
||||
// mutex->__spins = 0; already done by memset
|
||||
// mutex->__next = NULL; already done by memset
|
||||
|
||||
return 0;
|
||||
}
|
||||
strong_alias (__pthread_mutex_init, pthread_mutex_init)
|
||||
INTDEF(__pthread_mutex_init)
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002-2007, 2008, 2009 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -19,27 +19,52 @@
|
||||
|
||||
#include <assert.h>
|
||||
#include <errno.h>
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
#include <not-cancel.h>
|
||||
#include "pthreadP.h"
|
||||
#include <lowlevellock.h>
|
||||
|
||||
|
||||
#ifndef LLL_MUTEX_LOCK
|
||||
# define LLL_MUTEX_LOCK(mutex) lll_mutex_lock (mutex)
|
||||
# define LLL_MUTEX_TRYLOCK(mutex) lll_mutex_trylock (mutex)
|
||||
# define LLL_MUTEX_LOCK(mutex) \
|
||||
lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
|
||||
# define LLL_MUTEX_TRYLOCK(mutex) \
|
||||
lll_trylock ((mutex)->__data.__lock)
|
||||
# define LLL_ROBUST_MUTEX_LOCK(mutex, id) \
|
||||
lll_robust_lock ((mutex)->__data.__lock, id, \
|
||||
PTHREAD_ROBUST_MUTEX_PSHARED (mutex))
|
||||
#endif
|
||||
|
||||
|
||||
static int __pthread_mutex_lock_full (pthread_mutex_t *mutex)
|
||||
__attribute_noinline__;
|
||||
|
||||
|
||||
int
|
||||
__pthread_mutex_lock (pthread_mutex_t *mutex)
|
||||
__pthread_mutex_lock (
|
||||
pthread_mutex_t *mutex)
|
||||
{
|
||||
assert (sizeof (mutex->__size) >= sizeof (mutex->__data));
|
||||
|
||||
unsigned int type = PTHREAD_MUTEX_TYPE (mutex);
|
||||
if (__builtin_expect (type & ~PTHREAD_MUTEX_KIND_MASK_NP, 0))
|
||||
return __pthread_mutex_lock_full (mutex);
|
||||
|
||||
pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
|
||||
|
||||
switch (__builtin_expect (mutex->__data.__kind, PTHREAD_MUTEX_TIMED_NP))
|
||||
if (__builtin_expect (type, PTHREAD_MUTEX_TIMED_NP)
|
||||
== PTHREAD_MUTEX_TIMED_NP)
|
||||
{
|
||||
simple:
|
||||
/* Normal mutex. */
|
||||
LLL_MUTEX_LOCK (mutex);
|
||||
assert (mutex->__data.__owner == 0);
|
||||
}
|
||||
else if (__builtin_expect (type == PTHREAD_MUTEX_RECURSIVE_NP, 1))
|
||||
{
|
||||
/* Recursive mutex. */
|
||||
case PTHREAD_MUTEX_RECURSIVE_NP:
|
||||
|
||||
/* Check whether we already hold the mutex. */
|
||||
if (mutex->__data.__owner == id)
|
||||
{
|
||||
@ -54,32 +79,17 @@ __pthread_mutex_lock (pthread_mutex_t *mutex)
|
||||
}
|
||||
|
||||
/* We have to get the mutex. */
|
||||
LLL_MUTEX_LOCK (mutex->__data.__lock);
|
||||
LLL_MUTEX_LOCK (mutex);
|
||||
|
||||
assert (mutex->__data.__owner == 0);
|
||||
mutex->__data.__count = 1;
|
||||
break;
|
||||
|
||||
/* Error checking mutex. */
|
||||
case PTHREAD_MUTEX_ERRORCHECK_NP:
|
||||
/* Check whether we already hold the mutex. */
|
||||
if (mutex->__data.__owner == id)
|
||||
return EDEADLK;
|
||||
|
||||
/* FALLTHROUGH */
|
||||
|
||||
default:
|
||||
/* Correct code cannot set any other type. */
|
||||
case PTHREAD_MUTEX_TIMED_NP:
|
||||
simple:
|
||||
/* Normal mutex. */
|
||||
LLL_MUTEX_LOCK (mutex->__data.__lock);
|
||||
break;
|
||||
|
||||
case PTHREAD_MUTEX_ADAPTIVE_NP:
|
||||
}
|
||||
else if (__builtin_expect (type == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
|
||||
{
|
||||
if (! __is_smp)
|
||||
goto simple;
|
||||
|
||||
if (LLL_MUTEX_TRYLOCK (mutex->__data.__lock) != 0)
|
||||
if (LLL_MUTEX_TRYLOCK (mutex) != 0)
|
||||
{
|
||||
int cnt = 0;
|
||||
int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
|
||||
@ -88,7 +98,7 @@ __pthread_mutex_lock (pthread_mutex_t *mutex)
|
||||
{
|
||||
if (cnt++ >= max_cnt)
|
||||
{
|
||||
LLL_MUTEX_LOCK (mutex->__data.__lock);
|
||||
LLL_MUTEX_LOCK (mutex);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -96,15 +106,362 @@ __pthread_mutex_lock (pthread_mutex_t *mutex)
|
||||
BUSY_WAIT_NOP;
|
||||
#endif
|
||||
}
|
||||
while (LLL_MUTEX_TRYLOCK (mutex->__data.__lock) != 0);
|
||||
while (LLL_MUTEX_TRYLOCK (mutex) != 0);
|
||||
|
||||
mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
|
||||
}
|
||||
break;
|
||||
assert (mutex->__data.__owner == 0);
|
||||
}
|
||||
else
|
||||
{
|
||||
assert (type == PTHREAD_MUTEX_ERRORCHECK_NP);
|
||||
/* Check whether we already hold the mutex. */
|
||||
if (__builtin_expect (mutex->__data.__owner == id, 0))
|
||||
return EDEADLK;
|
||||
goto simple;
|
||||
}
|
||||
|
||||
/* Record the ownership. */
|
||||
mutex->__data.__owner = id;
|
||||
#ifndef NO_INCR
|
||||
++mutex->__data.__nusers;
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
__pthread_mutex_lock_full (pthread_mutex_t *mutex)
|
||||
{
|
||||
int oldval;
|
||||
pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
|
||||
|
||||
switch (PTHREAD_MUTEX_TYPE (mutex))
|
||||
{
|
||||
case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
|
||||
case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
|
||||
case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
|
||||
case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
||||
&mutex->__data.__list.__next);
|
||||
|
||||
oldval = mutex->__data.__lock;
|
||||
do
|
||||
{
|
||||
again:
|
||||
if ((oldval & FUTEX_OWNER_DIED) != 0)
|
||||
{
|
||||
/* The previous owner died. Try locking the mutex. */
|
||||
int newval = id;
|
||||
#ifdef NO_INCR
|
||||
newval |= FUTEX_WAITERS;
|
||||
#else
|
||||
newval |= (oldval & FUTEX_WAITERS);
|
||||
#endif
|
||||
|
||||
newval
|
||||
= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
|
||||
newval, oldval);
|
||||
|
||||
if (newval != oldval)
|
||||
{
|
||||
oldval = newval;
|
||||
goto again;
|
||||
}
|
||||
|
||||
/* We got the mutex. */
|
||||
mutex->__data.__count = 1;
|
||||
/* But it is inconsistent unless marked otherwise. */
|
||||
mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
|
||||
|
||||
ENQUEUE_MUTEX (mutex);
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
|
||||
/* Note that we deliberately exit here. If we fall
|
||||
through to the end of the function __nusers would be
|
||||
incremented which is not correct because the old
|
||||
owner has to be discounted. If we are not supposed
|
||||
to increment __nusers we actually have to decrement
|
||||
it here. */
|
||||
#ifdef NO_INCR
|
||||
--mutex->__data.__nusers;
|
||||
#endif
|
||||
|
||||
return EOWNERDEAD;
|
||||
}
|
||||
|
||||
/* Check whether we already hold the mutex. */
|
||||
if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
|
||||
{
|
||||
int kind = PTHREAD_MUTEX_TYPE (mutex);
|
||||
if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
|
||||
{
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
||||
NULL);
|
||||
return EDEADLK;
|
||||
}
|
||||
|
||||
if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
|
||||
{
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
||||
NULL);
|
||||
|
||||
/* Just bump the counter. */
|
||||
if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
|
||||
/* Overflow of the counter. */
|
||||
return EAGAIN;
|
||||
|
||||
++mutex->__data.__count;
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
oldval = LLL_ROBUST_MUTEX_LOCK (mutex, id);
|
||||
|
||||
if (__builtin_expect (mutex->__data.__owner
|
||||
== PTHREAD_MUTEX_NOTRECOVERABLE, 0))
|
||||
{
|
||||
/* This mutex is now not recoverable. */
|
||||
mutex->__data.__count = 0;
|
||||
lll_unlock (mutex->__data.__lock,
|
||||
PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
return ENOTRECOVERABLE;
|
||||
}
|
||||
}
|
||||
while ((oldval & FUTEX_OWNER_DIED) != 0);
|
||||
|
||||
mutex->__data.__count = 1;
|
||||
ENQUEUE_MUTEX (mutex);
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
break;
|
||||
|
||||
case PTHREAD_MUTEX_PI_RECURSIVE_NP:
|
||||
case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
|
||||
case PTHREAD_MUTEX_PI_NORMAL_NP:
|
||||
case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
|
||||
case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
|
||||
case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
|
||||
case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
|
||||
case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
|
||||
{
|
||||
int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
|
||||
int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
|
||||
|
||||
if (robust)
|
||||
/* Note: robust PI futexes are signaled by setting bit 0. */
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
||||
(void *) (((uintptr_t) &mutex->__data.__list.__next)
|
||||
| 1));
|
||||
|
||||
oldval = mutex->__data.__lock;
|
||||
|
||||
/* Check whether we already hold the mutex. */
|
||||
if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
|
||||
{
|
||||
if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
|
||||
{
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
return EDEADLK;
|
||||
}
|
||||
|
||||
if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
|
||||
{
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
|
||||
/* Just bump the counter. */
|
||||
if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
|
||||
/* Overflow of the counter. */
|
||||
return EAGAIN;
|
||||
|
||||
++mutex->__data.__count;
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
int newval = id;
|
||||
#ifdef NO_INCR
|
||||
newval |= FUTEX_WAITERS;
|
||||
#endif
|
||||
oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
|
||||
newval, 0);
|
||||
|
||||
if (oldval != 0)
|
||||
{
|
||||
/* The mutex is locked. The kernel will now take care of
|
||||
everything. */
|
||||
int private = (robust
|
||||
? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
|
||||
: PTHREAD_MUTEX_PSHARED (mutex));
|
||||
INTERNAL_SYSCALL_DECL (__err);
|
||||
int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
|
||||
__lll_private_flag (FUTEX_LOCK_PI,
|
||||
private), 1, 0);
|
||||
|
||||
if (INTERNAL_SYSCALL_ERROR_P (e, __err)
|
||||
&& (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
|
||||
|| INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK))
|
||||
{
|
||||
assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
|
||||
|| (kind != PTHREAD_MUTEX_ERRORCHECK_NP
|
||||
&& kind != PTHREAD_MUTEX_RECURSIVE_NP));
|
||||
/* ESRCH can happen only for non-robust PI mutexes where
|
||||
the owner of the lock died. */
|
||||
assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust);
|
||||
|
||||
/* Delay the thread indefinitely. */
|
||||
while (1)
|
||||
pause_not_cancel ();
|
||||
}
|
||||
|
||||
oldval = mutex->__data.__lock;
|
||||
|
||||
assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
|
||||
}
|
||||
|
||||
if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
|
||||
{
|
||||
atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
|
||||
|
||||
/* We got the mutex. */
|
||||
mutex->__data.__count = 1;
|
||||
/* But it is inconsistent unless marked otherwise. */
|
||||
mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
|
||||
|
||||
ENQUEUE_MUTEX_PI (mutex);
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
|
||||
/* Note that we deliberately exit here. If we fall
|
||||
through to the end of the function __nusers would be
|
||||
incremented which is not correct because the old owner
|
||||
has to be discounted. If we are not supposed to
|
||||
increment __nusers we actually have to decrement it here. */
|
||||
#ifdef NO_INCR
|
||||
--mutex->__data.__nusers;
|
||||
#endif
|
||||
|
||||
return EOWNERDEAD;
|
||||
}
|
||||
|
||||
if (robust
|
||||
&& __builtin_expect (mutex->__data.__owner
|
||||
== PTHREAD_MUTEX_NOTRECOVERABLE, 0))
|
||||
{
|
||||
/* This mutex is now not recoverable. */
|
||||
mutex->__data.__count = 0;
|
||||
|
||||
INTERNAL_SYSCALL_DECL (__err);
|
||||
INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
|
||||
__lll_private_flag (FUTEX_UNLOCK_PI,
|
||||
PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
|
||||
0, 0);
|
||||
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
return ENOTRECOVERABLE;
|
||||
}
|
||||
|
||||
mutex->__data.__count = 1;
|
||||
if (robust)
|
||||
{
|
||||
ENQUEUE_MUTEX_PI (mutex);
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case PTHREAD_MUTEX_PP_RECURSIVE_NP:
|
||||
case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
|
||||
case PTHREAD_MUTEX_PP_NORMAL_NP:
|
||||
case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
|
||||
{
|
||||
int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
|
||||
|
||||
oldval = mutex->__data.__lock;
|
||||
|
||||
/* Check whether we already hold the mutex. */
|
||||
if (mutex->__data.__owner == id)
|
||||
{
|
||||
if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
|
||||
return EDEADLK;
|
||||
|
||||
if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
|
||||
{
|
||||
/* Just bump the counter. */
|
||||
if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
|
||||
/* Overflow of the counter. */
|
||||
return EAGAIN;
|
||||
|
||||
++mutex->__data.__count;
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
int oldprio = -1, ceilval;
|
||||
do
|
||||
{
|
||||
int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
|
||||
>> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
|
||||
|
||||
if (__pthread_current_priority () > ceiling)
|
||||
{
|
||||
if (oldprio != -1)
|
||||
__pthread_tpp_change_priority (oldprio, -1);
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
int retval = __pthread_tpp_change_priority (oldprio, ceiling);
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
|
||||
oldprio = ceiling;
|
||||
|
||||
oldval
|
||||
= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
|
||||
#ifdef NO_INCR
|
||||
ceilval | 2,
|
||||
#else
|
||||
ceilval | 1,
|
||||
#endif
|
||||
ceilval);
|
||||
|
||||
if (oldval == ceilval)
|
||||
break;
|
||||
|
||||
do
|
||||
{
|
||||
oldval
|
||||
= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
|
||||
ceilval | 2,
|
||||
ceilval | 1);
|
||||
|
||||
if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
|
||||
break;
|
||||
|
||||
if (oldval != ceilval)
|
||||
lll_futex_wait (&mutex->__data.__lock, ceilval | 2,
|
||||
PTHREAD_MUTEX_PSHARED (mutex));
|
||||
}
|
||||
while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
|
||||
ceilval | 2, ceilval)
|
||||
!= ceilval);
|
||||
}
|
||||
while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
|
||||
|
||||
assert (mutex->__data.__owner == 0);
|
||||
mutex->__data.__count = 1;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
/* Correct code cannot set any other type. */
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
/* Record the ownership. */
|
||||
assert (mutex->__data.__owner == 0);
|
||||
mutex->__data.__owner = id;
|
||||
#ifndef NO_INCR
|
||||
++mutex->__data.__nusers;
|
||||
@ -116,3 +473,22 @@ __pthread_mutex_lock (pthread_mutex_t *mutex)
|
||||
strong_alias (__pthread_mutex_lock, pthread_mutex_lock)
|
||||
strong_alias (__pthread_mutex_lock, __pthread_mutex_lock_internal)
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef NO_INCR
|
||||
void
|
||||
__pthread_mutex_cond_lock_adjust (
|
||||
pthread_mutex_t *mutex)
|
||||
{
|
||||
assert ((mutex->__data.__kind & PTHREAD_MUTEX_PRIO_INHERIT_NP) != 0);
|
||||
assert ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0);
|
||||
assert ((mutex->__data.__kind & PTHREAD_MUTEX_PSHARED_BIT) == 0);
|
||||
|
||||
/* Record the ownership. */
|
||||
pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
|
||||
mutex->__data.__owner = id;
|
||||
|
||||
if (mutex->__data.__kind == PTHREAD_MUTEX_PI_RECURSIVE_NP)
|
||||
++mutex->__data.__count;
|
||||
}
|
||||
#endif
|
||||
|
119
libpthread/nptl/pthread_mutex_setprioceiling.c
Normal file
119
libpthread/nptl/pthread_mutex_setprioceiling.c
Normal file
@ -0,0 +1,119 @@
|
||||
/* Set current priority ceiling of pthread_mutex_t.
|
||||
Copyright (C) 2006, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Jakub Jelinek <jakub@redhat.com>, 2006.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; if not, write to the Free
|
||||
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <errno.h>
|
||||
#include <pthreadP.h>
|
||||
|
||||
|
||||
int
|
||||
pthread_mutex_setprioceiling (mutex, prioceiling, old_ceiling)
|
||||
pthread_mutex_t *mutex;
|
||||
int prioceiling;
|
||||
int *old_ceiling;
|
||||
{
|
||||
/* The low bits of __kind aren't ever changed after pthread_mutex_init,
|
||||
so we don't need a lock yet. */
|
||||
if ((mutex->__data.__kind & PTHREAD_MUTEX_PRIO_PROTECT_NP) == 0)
|
||||
return EINVAL;
|
||||
|
||||
if (__sched_fifo_min_prio == -1)
|
||||
__init_sched_fifo_prio ();
|
||||
|
||||
if (__builtin_expect (prioceiling < __sched_fifo_min_prio, 0)
|
||||
|| __builtin_expect (prioceiling > __sched_fifo_max_prio, 0)
|
||||
|| __builtin_expect ((prioceiling
|
||||
& (PTHREAD_MUTEXATTR_PRIO_CEILING_MASK
|
||||
>> PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT))
|
||||
!= prioceiling, 0))
|
||||
return EINVAL;
|
||||
|
||||
/* Check whether we already hold the mutex. */
|
||||
bool locked = false;
|
||||
int kind = PTHREAD_MUTEX_TYPE (mutex);
|
||||
if (mutex->__data.__owner == THREAD_GETMEM (THREAD_SELF, tid))
|
||||
{
|
||||
if (kind == PTHREAD_MUTEX_PP_ERRORCHECK_NP)
|
||||
return EDEADLK;
|
||||
|
||||
if (kind == PTHREAD_MUTEX_PP_RECURSIVE_NP)
|
||||
locked = true;
|
||||
}
|
||||
|
||||
int oldval = mutex->__data.__lock;
|
||||
if (! locked)
|
||||
do
|
||||
{
|
||||
/* Need to lock the mutex, but without obeying the priority
|
||||
protect protocol. */
|
||||
int ceilval = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK);
|
||||
|
||||
oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
|
||||
ceilval | 1, ceilval);
|
||||
if (oldval == ceilval)
|
||||
break;
|
||||
|
||||
do
|
||||
{
|
||||
oldval
|
||||
= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
|
||||
ceilval | 2,
|
||||
ceilval | 1);
|
||||
|
||||
if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
|
||||
break;
|
||||
|
||||
if (oldval != ceilval)
|
||||
lll_futex_wait (&mutex->__data.__lock, ceilval | 2,
|
||||
PTHREAD_MUTEX_PSHARED (mutex));
|
||||
}
|
||||
while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
|
||||
ceilval | 2, ceilval)
|
||||
!= ceilval);
|
||||
|
||||
if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
|
||||
continue;
|
||||
}
|
||||
while (0);
|
||||
|
||||
int oldprio = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
|
||||
>> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
|
||||
if (locked)
|
||||
{
|
||||
int ret = __pthread_tpp_change_priority (oldprio, prioceiling);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (old_ceiling != NULL)
|
||||
*old_ceiling = oldprio;
|
||||
|
||||
int newlock = 0;
|
||||
if (locked)
|
||||
newlock = (mutex->__data.__lock & ~PTHREAD_MUTEX_PRIO_CEILING_MASK);
|
||||
mutex->__data.__lock = newlock
|
||||
| (prioceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT);
|
||||
atomic_full_barrier ();
|
||||
|
||||
lll_futex_wake (&mutex->__data.__lock, INT_MAX,
|
||||
PTHREAD_MUTEX_PSHARED (mutex));
|
||||
|
||||
return 0;
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002-2007, 2008 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -17,9 +17,12 @@
|
||||
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <assert.h>
|
||||
#include <errno.h>
|
||||
#include <time.h>
|
||||
#include "pthreadP.h"
|
||||
#include <lowlevellock.h>
|
||||
#include <not-cancel.h>
|
||||
|
||||
|
||||
int
|
||||
@ -27,13 +30,15 @@ pthread_mutex_timedlock (
|
||||
pthread_mutex_t *mutex,
|
||||
const struct timespec *abstime)
|
||||
{
|
||||
int oldval;
|
||||
pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
|
||||
int result = 0;
|
||||
|
||||
/* We must not check ABSTIME here. If the thread does not block
|
||||
abstime must not be checked for a valid value. */
|
||||
|
||||
switch (mutex->__data.__kind)
|
||||
switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
|
||||
PTHREAD_MUTEX_TIMED_NP))
|
||||
{
|
||||
/* Recursive mutex. */
|
||||
case PTHREAD_MUTEX_RECURSIVE_NP:
|
||||
@ -49,40 +54,38 @@ pthread_mutex_timedlock (
|
||||
|
||||
goto out;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* We have to get the mutex. */
|
||||
result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
|
||||
|
||||
if (result != 0)
|
||||
goto out;
|
||||
/* We have to get the mutex. */
|
||||
result = lll_timedlock (mutex->__data.__lock, abstime,
|
||||
PTHREAD_MUTEX_PSHARED (mutex));
|
||||
|
||||
/* Only locked once so far. */
|
||||
mutex->__data.__count = 1;
|
||||
}
|
||||
if (result != 0)
|
||||
goto out;
|
||||
|
||||
/* Only locked once so far. */
|
||||
mutex->__data.__count = 1;
|
||||
break;
|
||||
|
||||
/* Error checking mutex. */
|
||||
case PTHREAD_MUTEX_ERRORCHECK_NP:
|
||||
/* Check whether we already hold the mutex. */
|
||||
if (mutex->__data.__owner == id)
|
||||
if (__builtin_expect (mutex->__data.__owner == id, 0))
|
||||
return EDEADLK;
|
||||
|
||||
/* FALLTHROUGH */
|
||||
|
||||
default:
|
||||
/* Correct code cannot set any other type. */
|
||||
case PTHREAD_MUTEX_TIMED_NP:
|
||||
simple:
|
||||
/* Normal mutex. */
|
||||
result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
|
||||
result = lll_timedlock (mutex->__data.__lock, abstime,
|
||||
PTHREAD_MUTEX_PSHARED (mutex));
|
||||
break;
|
||||
|
||||
case PTHREAD_MUTEX_ADAPTIVE_NP:
|
||||
if (! __is_smp)
|
||||
goto simple;
|
||||
|
||||
if (lll_mutex_trylock (mutex->__data.__lock) != 0)
|
||||
if (lll_trylock (mutex->__data.__lock) != 0)
|
||||
{
|
||||
int cnt = 0;
|
||||
int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
|
||||
@ -91,7 +94,8 @@ pthread_mutex_timedlock (
|
||||
{
|
||||
if (cnt++ >= max_cnt)
|
||||
{
|
||||
result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
|
||||
result = lll_timedlock (mutex->__data.__lock, abstime,
|
||||
PTHREAD_MUTEX_PSHARED (mutex));
|
||||
break;
|
||||
}
|
||||
|
||||
@ -99,11 +103,373 @@ pthread_mutex_timedlock (
|
||||
BUSY_WAIT_NOP;
|
||||
#endif
|
||||
}
|
||||
while (lll_mutex_trylock (mutex->__data.__lock) != 0);
|
||||
while (lll_trylock (mutex->__data.__lock) != 0);
|
||||
|
||||
mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
|
||||
}
|
||||
break;
|
||||
|
||||
case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
|
||||
case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
|
||||
case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
|
||||
case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
||||
&mutex->__data.__list.__next);
|
||||
|
||||
oldval = mutex->__data.__lock;
|
||||
do
|
||||
{
|
||||
again:
|
||||
if ((oldval & FUTEX_OWNER_DIED) != 0)
|
||||
{
|
||||
/* The previous owner died. Try locking the mutex. */
|
||||
int newval = id | (oldval & FUTEX_WAITERS);
|
||||
|
||||
newval
|
||||
= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
|
||||
newval, oldval);
|
||||
if (newval != oldval)
|
||||
{
|
||||
oldval = newval;
|
||||
goto again;
|
||||
}
|
||||
|
||||
/* We got the mutex. */
|
||||
mutex->__data.__count = 1;
|
||||
/* But it is inconsistent unless marked otherwise. */
|
||||
mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
|
||||
|
||||
ENQUEUE_MUTEX (mutex);
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
|
||||
/* Note that we deliberately exit here. If we fall
|
||||
through to the end of the function __nusers would be
|
||||
incremented which is not correct because the old
|
||||
owner has to be discounted. */
|
||||
return EOWNERDEAD;
|
||||
}
|
||||
|
||||
/* Check whether we already hold the mutex. */
|
||||
if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
|
||||
{
|
||||
int kind = PTHREAD_MUTEX_TYPE (mutex);
|
||||
if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
|
||||
{
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
||||
NULL);
|
||||
return EDEADLK;
|
||||
}
|
||||
|
||||
if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
|
||||
{
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
||||
NULL);
|
||||
|
||||
/* Just bump the counter. */
|
||||
if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
|
||||
/* Overflow of the counter. */
|
||||
return EAGAIN;
|
||||
|
||||
++mutex->__data.__count;
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
result = lll_robust_timedlock (mutex->__data.__lock, abstime, id,
|
||||
PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
|
||||
|
||||
if (__builtin_expect (mutex->__data.__owner
|
||||
== PTHREAD_MUTEX_NOTRECOVERABLE, 0))
|
||||
{
|
||||
/* This mutex is now not recoverable. */
|
||||
mutex->__data.__count = 0;
|
||||
lll_unlock (mutex->__data.__lock,
|
||||
PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
return ENOTRECOVERABLE;
|
||||
}
|
||||
|
||||
if (result == ETIMEDOUT || result == EINVAL)
|
||||
goto out;
|
||||
|
||||
oldval = result;
|
||||
}
|
||||
while ((oldval & FUTEX_OWNER_DIED) != 0);
|
||||
|
||||
mutex->__data.__count = 1;
|
||||
ENQUEUE_MUTEX (mutex);
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
break;
|
||||
|
||||
case PTHREAD_MUTEX_PI_RECURSIVE_NP:
|
||||
case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
|
||||
case PTHREAD_MUTEX_PI_NORMAL_NP:
|
||||
case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
|
||||
case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
|
||||
case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
|
||||
case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
|
||||
case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
|
||||
{
|
||||
int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
|
||||
int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
|
||||
|
||||
if (robust)
|
||||
/* Note: robust PI futexes are signaled by setting bit 0. */
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
||||
(void *) (((uintptr_t) &mutex->__data.__list.__next)
|
||||
| 1));
|
||||
|
||||
oldval = mutex->__data.__lock;
|
||||
|
||||
/* Check whether we already hold the mutex. */
|
||||
if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
|
||||
{
|
||||
if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
|
||||
{
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
return EDEADLK;
|
||||
}
|
||||
|
||||
if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
|
||||
{
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
|
||||
/* Just bump the counter. */
|
||||
if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
|
||||
/* Overflow of the counter. */
|
||||
return EAGAIN;
|
||||
|
||||
++mutex->__data.__count;
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
|
||||
id, 0);
|
||||
|
||||
if (oldval != 0)
|
||||
{
|
||||
/* The mutex is locked. The kernel will now take care of
|
||||
everything. The timeout value must be a relative value.
|
||||
Convert it. */
|
||||
int private = (robust
|
||||
? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
|
||||
: PTHREAD_MUTEX_PSHARED (mutex));
|
||||
INTERNAL_SYSCALL_DECL (__err);
|
||||
|
||||
int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
|
||||
__lll_private_flag (FUTEX_LOCK_PI,
|
||||
private), 1,
|
||||
abstime);
|
||||
if (INTERNAL_SYSCALL_ERROR_P (e, __err))
|
||||
{
|
||||
if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)
|
||||
return ETIMEDOUT;
|
||||
|
||||
if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
|
||||
|| INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)
|
||||
{
|
||||
assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
|
||||
|| (kind != PTHREAD_MUTEX_ERRORCHECK_NP
|
||||
&& kind != PTHREAD_MUTEX_RECURSIVE_NP));
|
||||
/* ESRCH can happen only for non-robust PI mutexes where
|
||||
the owner of the lock died. */
|
||||
assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH
|
||||
|| !robust);
|
||||
|
||||
/* Delay the thread until the timeout is reached.
|
||||
Then return ETIMEDOUT. */
|
||||
struct timespec reltime;
|
||||
struct timespec now;
|
||||
|
||||
INTERNAL_SYSCALL (clock_gettime, __err, 2, CLOCK_REALTIME,
|
||||
&now);
|
||||
reltime.tv_sec = abstime->tv_sec - now.tv_sec;
|
||||
reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
|
||||
if (reltime.tv_nsec < 0)
|
||||
{
|
||||
reltime.tv_nsec += 1000000000;
|
||||
--reltime.tv_sec;
|
||||
}
|
||||
if (reltime.tv_sec >= 0)
|
||||
while (nanosleep_not_cancel (&reltime, &reltime) != 0)
|
||||
continue;
|
||||
|
||||
return ETIMEDOUT;
|
||||
}
|
||||
|
||||
return INTERNAL_SYSCALL_ERRNO (e, __err);
|
||||
}
|
||||
|
||||
oldval = mutex->__data.__lock;
|
||||
|
||||
assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
|
||||
}
|
||||
|
||||
if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
|
||||
{
|
||||
atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
|
||||
|
||||
/* We got the mutex. */
|
||||
mutex->__data.__count = 1;
|
||||
/* But it is inconsistent unless marked otherwise. */
|
||||
mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
|
||||
|
||||
ENQUEUE_MUTEX_PI (mutex);
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
|
||||
/* Note that we deliberately exit here. If we fall
|
||||
through to the end of the function __nusers would be
|
||||
incremented which is not correct because the old owner
|
||||
has to be discounted. */
|
||||
return EOWNERDEAD;
|
||||
}
|
||||
|
||||
if (robust
|
||||
&& __builtin_expect (mutex->__data.__owner
|
||||
== PTHREAD_MUTEX_NOTRECOVERABLE, 0))
|
||||
{
|
||||
/* This mutex is now not recoverable. */
|
||||
mutex->__data.__count = 0;
|
||||
|
||||
INTERNAL_SYSCALL_DECL (__err);
|
||||
INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
|
||||
__lll_private_flag (FUTEX_UNLOCK_PI,
|
||||
PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
|
||||
0, 0);
|
||||
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
return ENOTRECOVERABLE;
|
||||
}
|
||||
|
||||
mutex->__data.__count = 1;
|
||||
if (robust)
|
||||
{
|
||||
ENQUEUE_MUTEX_PI (mutex);
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case PTHREAD_MUTEX_PP_RECURSIVE_NP:
|
||||
case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
|
||||
case PTHREAD_MUTEX_PP_NORMAL_NP:
|
||||
case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
|
||||
{
|
||||
int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
|
||||
|
||||
oldval = mutex->__data.__lock;
|
||||
|
||||
/* Check whether we already hold the mutex. */
|
||||
if (mutex->__data.__owner == id)
|
||||
{
|
||||
if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
|
||||
return EDEADLK;
|
||||
|
||||
if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
|
||||
{
|
||||
/* Just bump the counter. */
|
||||
if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
|
||||
/* Overflow of the counter. */
|
||||
return EAGAIN;
|
||||
|
||||
++mutex->__data.__count;
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
int oldprio = -1, ceilval;
|
||||
do
|
||||
{
|
||||
int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
|
||||
>> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
|
||||
|
||||
if (__pthread_current_priority () > ceiling)
|
||||
{
|
||||
result = EINVAL;
|
||||
failpp:
|
||||
if (oldprio != -1)
|
||||
__pthread_tpp_change_priority (oldprio, -1);
|
||||
return result;
|
||||
}
|
||||
|
||||
result = __pthread_tpp_change_priority (oldprio, ceiling);
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
|
||||
oldprio = ceiling;
|
||||
|
||||
oldval
|
||||
= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
|
||||
ceilval | 1, ceilval);
|
||||
|
||||
if (oldval == ceilval)
|
||||
break;
|
||||
|
||||
do
|
||||
{
|
||||
oldval
|
||||
= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
|
||||
ceilval | 2,
|
||||
ceilval | 1);
|
||||
|
||||
if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
|
||||
break;
|
||||
|
||||
if (oldval != ceilval)
|
||||
{
|
||||
/* Reject invalid timeouts. */
|
||||
if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
|
||||
{
|
||||
result = EINVAL;
|
||||
goto failpp;
|
||||
}
|
||||
|
||||
struct timeval tv;
|
||||
struct timespec rt;
|
||||
|
||||
/* Get the current time. */
|
||||
(void) gettimeofday (&tv, NULL);
|
||||
|
||||
/* Compute relative timeout. */
|
||||
rt.tv_sec = abstime->tv_sec - tv.tv_sec;
|
||||
rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
|
||||
if (rt.tv_nsec < 0)
|
||||
{
|
||||
rt.tv_nsec += 1000000000;
|
||||
--rt.tv_sec;
|
||||
}
|
||||
|
||||
/* Already timed out? */
|
||||
if (rt.tv_sec < 0)
|
||||
{
|
||||
result = ETIMEDOUT;
|
||||
goto failpp;
|
||||
}
|
||||
|
||||
lll_futex_timed_wait (&mutex->__data.__lock,
|
||||
ceilval | 2, &rt,
|
||||
PTHREAD_MUTEX_PSHARED (mutex));
|
||||
}
|
||||
}
|
||||
while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
|
||||
ceilval | 2, ceilval)
|
||||
!= ceilval);
|
||||
}
|
||||
while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
|
||||
|
||||
assert (mutex->__data.__owner == 0);
|
||||
mutex->__data.__count = 1;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
/* Correct code cannot set any other type. */
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
if (result == 0)
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2005-2007, 2008 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -17,21 +17,25 @@
|
||||
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <assert.h>
|
||||
#include <errno.h>
|
||||
#include <stdlib.h>
|
||||
#include "pthreadP.h"
|
||||
#include <lowlevellock.h>
|
||||
|
||||
|
||||
int
|
||||
__pthread_mutex_trylock (pthread_mutex_t *mutex)
|
||||
__pthread_mutex_trylock (
|
||||
pthread_mutex_t *mutex)
|
||||
{
|
||||
pid_t id;
|
||||
int oldval;
|
||||
pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
|
||||
|
||||
switch (__builtin_expect (mutex->__data.__kind, PTHREAD_MUTEX_TIMED_NP))
|
||||
switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
|
||||
PTHREAD_MUTEX_TIMED_NP))
|
||||
{
|
||||
/* Recursive mutex. */
|
||||
case PTHREAD_MUTEX_RECURSIVE_NP:
|
||||
id = THREAD_GETMEM (THREAD_SELF, tid);
|
||||
/* Check whether we already hold the mutex. */
|
||||
if (mutex->__data.__owner == id)
|
||||
{
|
||||
@ -44,7 +48,7 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (lll_mutex_trylock (mutex->__data.__lock) == 0)
|
||||
if (lll_trylock (mutex->__data.__lock) == 0)
|
||||
{
|
||||
/* Record the ownership. */
|
||||
mutex->__data.__owner = id;
|
||||
@ -55,20 +59,322 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
|
||||
break;
|
||||
|
||||
case PTHREAD_MUTEX_ERRORCHECK_NP:
|
||||
/* Error checking mutex. We do not check for deadlocks. */
|
||||
default:
|
||||
/* Correct code cannot set any other type. */
|
||||
case PTHREAD_MUTEX_TIMED_NP:
|
||||
case PTHREAD_MUTEX_ADAPTIVE_NP:
|
||||
/* Normal mutex. */
|
||||
if (lll_mutex_trylock (mutex->__data.__lock) == 0)
|
||||
{
|
||||
/* Record the ownership. */
|
||||
mutex->__data.__owner = THREAD_GETMEM (THREAD_SELF, tid);
|
||||
++mutex->__data.__nusers;
|
||||
if (lll_trylock (mutex->__data.__lock) != 0)
|
||||
break;
|
||||
|
||||
return 0;
|
||||
/* Record the ownership. */
|
||||
mutex->__data.__owner = id;
|
||||
++mutex->__data.__nusers;
|
||||
|
||||
return 0;
|
||||
|
||||
case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
|
||||
case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
|
||||
case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
|
||||
case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
||||
&mutex->__data.__list.__next);
|
||||
|
||||
oldval = mutex->__data.__lock;
|
||||
do
|
||||
{
|
||||
again:
|
||||
if ((oldval & FUTEX_OWNER_DIED) != 0)
|
||||
{
|
||||
/* The previous owner died. Try locking the mutex. */
|
||||
int newval = id | (oldval & FUTEX_WAITERS);
|
||||
|
||||
newval
|
||||
= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
|
||||
newval, oldval);
|
||||
|
||||
if (newval != oldval)
|
||||
{
|
||||
oldval = newval;
|
||||
goto again;
|
||||
}
|
||||
|
||||
/* We got the mutex. */
|
||||
mutex->__data.__count = 1;
|
||||
/* But it is inconsistent unless marked otherwise. */
|
||||
mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
|
||||
|
||||
ENQUEUE_MUTEX (mutex);
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
|
||||
/* Note that we deliberately exist here. If we fall
|
||||
through to the end of the function __nusers would be
|
||||
incremented which is not correct because the old
|
||||
owner has to be discounted. */
|
||||
return EOWNERDEAD;
|
||||
}
|
||||
|
||||
/* Check whether we already hold the mutex. */
|
||||
if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
|
||||
{
|
||||
int kind = PTHREAD_MUTEX_TYPE (mutex);
|
||||
if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
|
||||
{
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
||||
NULL);
|
||||
return EDEADLK;
|
||||
}
|
||||
|
||||
if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
|
||||
{
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
||||
NULL);
|
||||
|
||||
/* Just bump the counter. */
|
||||
if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
|
||||
/* Overflow of the counter. */
|
||||
return EAGAIN;
|
||||
|
||||
++mutex->__data.__count;
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
oldval = lll_robust_trylock (mutex->__data.__lock, id);
|
||||
if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0)
|
||||
{
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
|
||||
return EBUSY;
|
||||
}
|
||||
|
||||
if (__builtin_expect (mutex->__data.__owner
|
||||
== PTHREAD_MUTEX_NOTRECOVERABLE, 0))
|
||||
{
|
||||
/* This mutex is now not recoverable. */
|
||||
mutex->__data.__count = 0;
|
||||
if (oldval == id)
|
||||
lll_unlock (mutex->__data.__lock,
|
||||
PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
return ENOTRECOVERABLE;
|
||||
}
|
||||
}
|
||||
while ((oldval & FUTEX_OWNER_DIED) != 0);
|
||||
|
||||
ENQUEUE_MUTEX (mutex);
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
|
||||
mutex->__data.__owner = id;
|
||||
++mutex->__data.__nusers;
|
||||
mutex->__data.__count = 1;
|
||||
|
||||
return 0;
|
||||
|
||||
case PTHREAD_MUTEX_PI_RECURSIVE_NP:
|
||||
case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
|
||||
case PTHREAD_MUTEX_PI_NORMAL_NP:
|
||||
case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
|
||||
case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
|
||||
case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
|
||||
case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
|
||||
case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
|
||||
{
|
||||
int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
|
||||
int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
|
||||
|
||||
if (robust)
|
||||
/* Note: robust PI futexes are signaled by setting bit 0. */
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
||||
(void *) (((uintptr_t) &mutex->__data.__list.__next)
|
||||
| 1));
|
||||
|
||||
oldval = mutex->__data.__lock;
|
||||
|
||||
/* Check whether we already hold the mutex. */
|
||||
if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
|
||||
{
|
||||
if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
|
||||
{
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
return EDEADLK;
|
||||
}
|
||||
|
||||
if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
|
||||
{
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
|
||||
/* Just bump the counter. */
|
||||
if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
|
||||
/* Overflow of the counter. */
|
||||
return EAGAIN;
|
||||
|
||||
++mutex->__data.__count;
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
oldval
|
||||
= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
|
||||
id, 0);
|
||||
|
||||
if (oldval != 0)
|
||||
{
|
||||
if ((oldval & FUTEX_OWNER_DIED) == 0)
|
||||
{
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
|
||||
return EBUSY;
|
||||
}
|
||||
|
||||
assert (robust);
|
||||
|
||||
/* The mutex owner died. The kernel will now take care of
|
||||
everything. */
|
||||
int private = (robust
|
||||
? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
|
||||
: PTHREAD_MUTEX_PSHARED (mutex));
|
||||
INTERNAL_SYSCALL_DECL (__err);
|
||||
int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
|
||||
__lll_private_flag (FUTEX_TRYLOCK_PI,
|
||||
private), 0, 0);
|
||||
|
||||
if (INTERNAL_SYSCALL_ERROR_P (e, __err)
|
||||
&& INTERNAL_SYSCALL_ERRNO (e, __err) == EWOULDBLOCK)
|
||||
{
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
|
||||
return EBUSY;
|
||||
}
|
||||
|
||||
oldval = mutex->__data.__lock;
|
||||
}
|
||||
|
||||
if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
|
||||
{
|
||||
atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
|
||||
|
||||
/* We got the mutex. */
|
||||
mutex->__data.__count = 1;
|
||||
/* But it is inconsistent unless marked otherwise. */
|
||||
mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
|
||||
|
||||
ENQUEUE_MUTEX (mutex);
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
|
||||
/* Note that we deliberately exit here. If we fall
|
||||
through to the end of the function __nusers would be
|
||||
incremented which is not correct because the old owner
|
||||
has to be discounted. */
|
||||
return EOWNERDEAD;
|
||||
}
|
||||
|
||||
if (robust
|
||||
&& __builtin_expect (mutex->__data.__owner
|
||||
== PTHREAD_MUTEX_NOTRECOVERABLE, 0))
|
||||
{
|
||||
/* This mutex is now not recoverable. */
|
||||
mutex->__data.__count = 0;
|
||||
|
||||
INTERNAL_SYSCALL_DECL (__err);
|
||||
INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
|
||||
__lll_private_flag (FUTEX_UNLOCK_PI,
|
||||
PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
|
||||
0, 0);
|
||||
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
return ENOTRECOVERABLE;
|
||||
}
|
||||
|
||||
if (robust)
|
||||
{
|
||||
ENQUEUE_MUTEX_PI (mutex);
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
}
|
||||
|
||||
mutex->__data.__owner = id;
|
||||
++mutex->__data.__nusers;
|
||||
mutex->__data.__count = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
case PTHREAD_MUTEX_PP_RECURSIVE_NP:
|
||||
case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
|
||||
case PTHREAD_MUTEX_PP_NORMAL_NP:
|
||||
case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
|
||||
{
|
||||
int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
|
||||
|
||||
oldval = mutex->__data.__lock;
|
||||
|
||||
/* Check whether we already hold the mutex. */
|
||||
if (mutex->__data.__owner == id)
|
||||
{
|
||||
if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
|
||||
return EDEADLK;
|
||||
|
||||
if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
|
||||
{
|
||||
/* Just bump the counter. */
|
||||
if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
|
||||
/* Overflow of the counter. */
|
||||
return EAGAIN;
|
||||
|
||||
++mutex->__data.__count;
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
int oldprio = -1, ceilval;
|
||||
do
|
||||
{
|
||||
int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
|
||||
>> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
|
||||
|
||||
if (__pthread_current_priority () > ceiling)
|
||||
{
|
||||
if (oldprio != -1)
|
||||
__pthread_tpp_change_priority (oldprio, -1);
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
int retval = __pthread_tpp_change_priority (oldprio, ceiling);
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
|
||||
oldprio = ceiling;
|
||||
|
||||
oldval
|
||||
= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
|
||||
ceilval | 1, ceilval);
|
||||
|
||||
if (oldval == ceilval)
|
||||
break;
|
||||
}
|
||||
while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
|
||||
|
||||
if (oldval != ceilval)
|
||||
{
|
||||
__pthread_tpp_change_priority (oldprio, -1);
|
||||
break;
|
||||
}
|
||||
|
||||
assert (mutex->__data.__owner == 0);
|
||||
/* Record the ownership. */
|
||||
mutex->__data.__owner = id;
|
||||
++mutex->__data.__nusers;
|
||||
mutex->__data.__count = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
/* Correct code cannot set any other type. */
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
return EBUSY;
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2005-2008, 2009 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -17,10 +17,16 @@
|
||||
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <assert.h>
|
||||
#include <errno.h>
|
||||
#include <stdlib.h>
|
||||
#include "pthreadP.h"
|
||||
#include <lowlevellock.h>
|
||||
|
||||
static int
|
||||
internal_function
|
||||
__pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
|
||||
__attribute_noinline__;
|
||||
|
||||
int
|
||||
internal_function attribute_hidden
|
||||
@ -28,9 +34,26 @@ __pthread_mutex_unlock_usercnt (
|
||||
pthread_mutex_t *mutex,
|
||||
int decr)
|
||||
{
|
||||
switch (__builtin_expect (mutex->__data.__kind, PTHREAD_MUTEX_TIMED_NP))
|
||||
int type = PTHREAD_MUTEX_TYPE (mutex);
|
||||
if (__builtin_expect (type & ~PTHREAD_MUTEX_KIND_MASK_NP, 0))
|
||||
return __pthread_mutex_unlock_full (mutex, decr);
|
||||
|
||||
if (__builtin_expect (type, PTHREAD_MUTEX_TIMED_NP)
|
||||
== PTHREAD_MUTEX_TIMED_NP)
|
||||
{
|
||||
/* Always reset the owner field. */
|
||||
normal:
|
||||
mutex->__data.__owner = 0;
|
||||
if (decr)
|
||||
/* One less user. */
|
||||
--mutex->__data.__nusers;
|
||||
|
||||
/* Unlock. */
|
||||
lll_unlock (mutex->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex));
|
||||
return 0;
|
||||
}
|
||||
else if (__builtin_expect (type == PTHREAD_MUTEX_RECURSIVE_NP, 1))
|
||||
{
|
||||
case PTHREAD_MUTEX_RECURSIVE_NP:
|
||||
/* Recursive mutex. */
|
||||
if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
|
||||
return EPERM;
|
||||
@ -38,38 +61,231 @@ __pthread_mutex_unlock_usercnt (
|
||||
if (--mutex->__data.__count != 0)
|
||||
/* We still hold the mutex. */
|
||||
return 0;
|
||||
goto normal;
|
||||
}
|
||||
else if (__builtin_expect (type == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
|
||||
goto normal;
|
||||
else
|
||||
{
|
||||
/* Error checking mutex. */
|
||||
assert (type == PTHREAD_MUTEX_ERRORCHECK_NP);
|
||||
if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
|
||||
|| ! lll_islocked (mutex->__data.__lock))
|
||||
return EPERM;
|
||||
goto normal;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
internal_function
|
||||
__pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
|
||||
{
|
||||
int newowner = 0;
|
||||
|
||||
switch (PTHREAD_MUTEX_TYPE (mutex))
|
||||
{
|
||||
case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
|
||||
/* Recursive mutex. */
|
||||
if ((mutex->__data.__lock & FUTEX_TID_MASK)
|
||||
== THREAD_GETMEM (THREAD_SELF, tid)
|
||||
&& __builtin_expect (mutex->__data.__owner
|
||||
== PTHREAD_MUTEX_INCONSISTENT, 0))
|
||||
{
|
||||
if (--mutex->__data.__count != 0)
|
||||
/* We still hold the mutex. */
|
||||
return ENOTRECOVERABLE;
|
||||
|
||||
goto notrecoverable;
|
||||
}
|
||||
|
||||
if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
|
||||
return EPERM;
|
||||
|
||||
if (--mutex->__data.__count != 0)
|
||||
/* We still hold the mutex. */
|
||||
return 0;
|
||||
|
||||
goto robust;
|
||||
|
||||
case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
|
||||
case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
|
||||
case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
|
||||
if ((mutex->__data.__lock & FUTEX_TID_MASK)
|
||||
!= THREAD_GETMEM (THREAD_SELF, tid)
|
||||
|| ! lll_islocked (mutex->__data.__lock))
|
||||
return EPERM;
|
||||
|
||||
/* If the previous owner died and the caller did not succeed in
|
||||
making the state consistent, mark the mutex as unrecoverable
|
||||
and make all waiters. */
|
||||
if (__builtin_expect (mutex->__data.__owner
|
||||
== PTHREAD_MUTEX_INCONSISTENT, 0))
|
||||
notrecoverable:
|
||||
newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
|
||||
|
||||
robust:
|
||||
/* Remove mutex from the list. */
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
||||
&mutex->__data.__list.__next);
|
||||
DEQUEUE_MUTEX (mutex);
|
||||
|
||||
mutex->__data.__owner = newowner;
|
||||
if (decr)
|
||||
/* One less user. */
|
||||
--mutex->__data.__nusers;
|
||||
|
||||
/* Unlock. */
|
||||
lll_robust_unlock (mutex->__data.__lock,
|
||||
PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
|
||||
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
break;
|
||||
|
||||
case PTHREAD_MUTEX_ERRORCHECK_NP:
|
||||
case PTHREAD_MUTEX_PI_RECURSIVE_NP:
|
||||
/* Recursive mutex. */
|
||||
if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
|
||||
return EPERM;
|
||||
|
||||
if (--mutex->__data.__count != 0)
|
||||
/* We still hold the mutex. */
|
||||
return 0;
|
||||
goto continue_pi_non_robust;
|
||||
|
||||
case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
|
||||
/* Recursive mutex. */
|
||||
if ((mutex->__data.__lock & FUTEX_TID_MASK)
|
||||
== THREAD_GETMEM (THREAD_SELF, tid)
|
||||
&& __builtin_expect (mutex->__data.__owner
|
||||
== PTHREAD_MUTEX_INCONSISTENT, 0))
|
||||
{
|
||||
if (--mutex->__data.__count != 0)
|
||||
/* We still hold the mutex. */
|
||||
return ENOTRECOVERABLE;
|
||||
|
||||
goto pi_notrecoverable;
|
||||
}
|
||||
|
||||
if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
|
||||
return EPERM;
|
||||
|
||||
if (--mutex->__data.__count != 0)
|
||||
/* We still hold the mutex. */
|
||||
return 0;
|
||||
|
||||
goto continue_pi_robust;
|
||||
|
||||
case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
|
||||
case PTHREAD_MUTEX_PI_NORMAL_NP:
|
||||
case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
|
||||
case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
|
||||
case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
|
||||
case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
|
||||
if ((mutex->__data.__lock & FUTEX_TID_MASK)
|
||||
!= THREAD_GETMEM (THREAD_SELF, tid)
|
||||
|| ! lll_islocked (mutex->__data.__lock))
|
||||
return EPERM;
|
||||
|
||||
/* If the previous owner died and the caller did not succeed in
|
||||
making the state consistent, mark the mutex as unrecoverable
|
||||
and make all waiters. */
|
||||
if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0
|
||||
&& __builtin_expect (mutex->__data.__owner
|
||||
== PTHREAD_MUTEX_INCONSISTENT, 0))
|
||||
pi_notrecoverable:
|
||||
newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
|
||||
|
||||
if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0)
|
||||
{
|
||||
continue_pi_robust:
|
||||
/* Remove mutex from the list.
|
||||
Note: robust PI futexes are signaled by setting bit 0. */
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
||||
(void *) (((uintptr_t) &mutex->__data.__list.__next)
|
||||
| 1));
|
||||
DEQUEUE_MUTEX (mutex);
|
||||
}
|
||||
|
||||
continue_pi_non_robust:
|
||||
mutex->__data.__owner = newowner;
|
||||
if (decr)
|
||||
/* One less user. */
|
||||
--mutex->__data.__nusers;
|
||||
|
||||
/* Unlock. */
|
||||
if ((mutex->__data.__lock & FUTEX_WAITERS) != 0
|
||||
|| atomic_compare_and_exchange_bool_rel (&mutex->__data.__lock, 0,
|
||||
THREAD_GETMEM (THREAD_SELF,
|
||||
tid)))
|
||||
{
|
||||
int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
|
||||
int private = (robust
|
||||
? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
|
||||
: PTHREAD_MUTEX_PSHARED (mutex));
|
||||
INTERNAL_SYSCALL_DECL (__err);
|
||||
INTERNAL_SYSCALL (futex, __err, 2, &mutex->__data.__lock,
|
||||
__lll_private_flag (FUTEX_UNLOCK_PI, private));
|
||||
}
|
||||
|
||||
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||
break;
|
||||
|
||||
case PTHREAD_MUTEX_PP_RECURSIVE_NP:
|
||||
/* Recursive mutex. */
|
||||
if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
|
||||
return EPERM;
|
||||
|
||||
if (--mutex->__data.__count != 0)
|
||||
/* We still hold the mutex. */
|
||||
return 0;
|
||||
goto pp;
|
||||
|
||||
case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
|
||||
/* Error checking mutex. */
|
||||
if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
|
||||
|| ! lll_mutex_islocked (mutex->__data.__lock))
|
||||
|| (mutex->__data.__lock & ~ PTHREAD_MUTEX_PRIO_CEILING_MASK) == 0)
|
||||
return EPERM;
|
||||
break;
|
||||
/* FALLTHROUGH */
|
||||
|
||||
case PTHREAD_MUTEX_PP_NORMAL_NP:
|
||||
case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
|
||||
/* Always reset the owner field. */
|
||||
pp:
|
||||
mutex->__data.__owner = 0;
|
||||
|
||||
if (decr)
|
||||
/* One less user. */
|
||||
--mutex->__data.__nusers;
|
||||
|
||||
/* Unlock. */
|
||||
int newval, oldval;
|
||||
do
|
||||
{
|
||||
oldval = mutex->__data.__lock;
|
||||
newval = oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK;
|
||||
}
|
||||
while (atomic_compare_and_exchange_bool_rel (&mutex->__data.__lock,
|
||||
newval, oldval));
|
||||
|
||||
if ((oldval & ~PTHREAD_MUTEX_PRIO_CEILING_MASK) > 1)
|
||||
lll_futex_wake (&mutex->__data.__lock, 1,
|
||||
PTHREAD_MUTEX_PSHARED (mutex));
|
||||
|
||||
int oldprio = newval >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
|
||||
return __pthread_tpp_change_priority (oldprio, -1);
|
||||
|
||||
default:
|
||||
/* Correct code cannot set any other type. */
|
||||
case PTHREAD_MUTEX_TIMED_NP:
|
||||
case PTHREAD_MUTEX_ADAPTIVE_NP:
|
||||
/* Normal mutex. Nothing special to do. */
|
||||
break;
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
/* Always reset the owner field. */
|
||||
mutex->__data.__owner = 0;
|
||||
if (decr)
|
||||
/* One less user. */
|
||||
--mutex->__data.__nusers;
|
||||
|
||||
/* Unlock. */
|
||||
lll_mutex_unlock (mutex->__data.__lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
__pthread_mutex_unlock (pthread_mutex_t *mutex)
|
||||
__pthread_mutex_unlock (
|
||||
pthread_mutex_t *mutex)
|
||||
{
|
||||
return __pthread_mutex_unlock_usercnt (mutex, 1);
|
||||
}
|
||||
|
48
libpthread/nptl/pthread_mutexattr_getprioceiling.c
Normal file
48
libpthread/nptl/pthread_mutexattr_getprioceiling.c
Normal file
@ -0,0 +1,48 @@
|
||||
/* Get priority ceiling setting from pthread_mutexattr_t.
|
||||
Copyright (C) 2006 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Jakub Jelinek <jakub@redhat.com>, 2006.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; if not, write to the Free
|
||||
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <pthreadP.h>
|
||||
|
||||
|
||||
int
|
||||
pthread_mutexattr_getprioceiling (
|
||||
const pthread_mutexattr_t *attr,
|
||||
int *prioceiling)
|
||||
{
|
||||
const struct pthread_mutexattr *iattr;
|
||||
int ceiling;
|
||||
|
||||
iattr = (const struct pthread_mutexattr *) attr;
|
||||
|
||||
ceiling = ((iattr->mutexkind & PTHREAD_MUTEXATTR_PRIO_CEILING_MASK)
|
||||
>> PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT);
|
||||
|
||||
if (! ceiling)
|
||||
{
|
||||
if (__sched_fifo_min_prio == -1)
|
||||
__init_sched_fifo_prio ();
|
||||
if (ceiling < __sched_fifo_min_prio)
|
||||
ceiling = __sched_fifo_min_prio;
|
||||
}
|
||||
|
||||
*prioceiling = ceiling;
|
||||
|
||||
return 0;
|
||||
}
|
37
libpthread/nptl/pthread_mutexattr_getprotocol.c
Normal file
37
libpthread/nptl/pthread_mutexattr_getprotocol.c
Normal file
@ -0,0 +1,37 @@
|
||||
/* Get priority protocol setting from pthread_mutexattr_t.
|
||||
Copyright (C) 2006 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Jakub Jelinek <jakub@redhat.com>, 2006.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; if not, write to the Free
|
||||
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <pthreadP.h>
|
||||
|
||||
|
||||
int
|
||||
pthread_mutexattr_getprotocol (
|
||||
const pthread_mutexattr_t *attr,
|
||||
int *protocol)
|
||||
{
|
||||
const struct pthread_mutexattr *iattr;
|
||||
|
||||
iattr = (const struct pthread_mutexattr *) attr;
|
||||
|
||||
*protocol = ((iattr->mutexkind & PTHREAD_MUTEXATTR_PROTOCOL_MASK)
|
||||
>> PTHREAD_MUTEXATTR_PROTOCOL_SHIFT);
|
||||
|
||||
return 0;
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2005 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -29,9 +29,7 @@ pthread_mutexattr_getpshared (
|
||||
|
||||
iattr = (const struct pthread_mutexattr *) attr;
|
||||
|
||||
/* We use bit 31 to signal whether the mutex is going to be
|
||||
process-shared or not. */
|
||||
*pshared = ((iattr->mutexkind & 0x80000000) != 0
|
||||
*pshared = ((iattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_PSHARED) != 0
|
||||
? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE);
|
||||
|
||||
return 0;
|
||||
|
37
libpthread/nptl/pthread_mutexattr_getrobust.c
Normal file
37
libpthread/nptl/pthread_mutexattr_getrobust.c
Normal file
@ -0,0 +1,37 @@
|
||||
/* Copyright (C) 2005, 2010 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2005.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; if not, write to the Free
|
||||
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <pthreadP.h>
|
||||
|
||||
|
||||
int
|
||||
pthread_mutexattr_getrobust (
|
||||
const pthread_mutexattr_t *attr,
|
||||
int *robustness)
|
||||
{
|
||||
const struct pthread_mutexattr *iattr;
|
||||
|
||||
iattr = (const struct pthread_mutexattr *) attr;
|
||||
|
||||
*robustness = ((iattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST) != 0
|
||||
? PTHREAD_MUTEX_ROBUST_NP : PTHREAD_MUTEX_STALLED_NP);
|
||||
|
||||
return 0;
|
||||
}
|
||||
weak_alias (pthread_mutexattr_getrobust, pthread_mutexattr_getrobust_np)
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2005 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -29,9 +29,7 @@ pthread_mutexattr_gettype (
|
||||
|
||||
iattr = (const struct pthread_mutexattr *) attr;
|
||||
|
||||
/* We use bit 31 to signal whether the mutex is going to be
|
||||
process-shared or not. */
|
||||
*kind = iattr->mutexkind & ~0x80000000;
|
||||
*kind = iattr->mutexkind & ~PTHREAD_MUTEXATTR_FLAG_BITS;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -22,7 +22,8 @@
|
||||
|
||||
|
||||
int
|
||||
__pthread_mutexattr_init (pthread_mutexattr_t *attr)
|
||||
__pthread_mutexattr_init (
|
||||
pthread_mutexattr_t *attr)
|
||||
{
|
||||
if (sizeof (struct pthread_mutexattr) != sizeof (pthread_mutexattr_t))
|
||||
memset (attr, '\0', sizeof (*attr));
|
||||
|
47
libpthread/nptl/pthread_mutexattr_setprioceiling.c
Normal file
47
libpthread/nptl/pthread_mutexattr_setprioceiling.c
Normal file
@ -0,0 +1,47 @@
|
||||
/* Change priority ceiling setting in pthread_mutexattr_t.
|
||||
Copyright (C) 2006 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Jakub Jelinek <jakub@redhat.com>, 2006.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; if not, write to the Free
|
||||
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <errno.h>
|
||||
#include <pthreadP.h>
|
||||
|
||||
|
||||
int
|
||||
pthread_mutexattr_setprioceiling (
|
||||
pthread_mutexattr_t *attr,
|
||||
int prioceiling)
|
||||
{
|
||||
if (__sched_fifo_min_prio == -1)
|
||||
__init_sched_fifo_prio ();
|
||||
|
||||
if (__builtin_expect (prioceiling < __sched_fifo_min_prio, 0)
|
||||
|| __builtin_expect (prioceiling > __sched_fifo_max_prio, 0)
|
||||
|| __builtin_expect ((prioceiling
|
||||
& (PTHREAD_MUTEXATTR_PRIO_CEILING_MASK
|
||||
>> PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT))
|
||||
!= prioceiling, 0))
|
||||
return EINVAL;
|
||||
|
||||
struct pthread_mutexattr *iattr = (struct pthread_mutexattr *) attr;
|
||||
|
||||
iattr->mutexkind = ((iattr->mutexkind & ~PTHREAD_MUTEXATTR_PRIO_CEILING_MASK)
|
||||
| (prioceiling << PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT));
|
||||
|
||||
return 0;
|
||||
}
|
41
libpthread/nptl/pthread_mutexattr_setprotocol.c
Normal file
41
libpthread/nptl/pthread_mutexattr_setprotocol.c
Normal file
@ -0,0 +1,41 @@
|
||||
/* Change priority protocol setting in pthread_mutexattr_t.
|
||||
Copyright (C) 2006 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Jakub Jelinek <jakub@redhat.com>, 2006.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; if not, write to the Free
|
||||
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <errno.h>
|
||||
#include <pthreadP.h>
|
||||
|
||||
|
||||
int
|
||||
pthread_mutexattr_setprotocol (
|
||||
pthread_mutexattr_t *attr,
|
||||
int protocol)
|
||||
{
|
||||
if (protocol != PTHREAD_PRIO_NONE
|
||||
&& protocol != PTHREAD_PRIO_INHERIT
|
||||
&& __builtin_expect (protocol != PTHREAD_PRIO_PROTECT, 0))
|
||||
return EINVAL;
|
||||
|
||||
struct pthread_mutexattr *iattr = (struct pthread_mutexattr *) attr;
|
||||
|
||||
iattr->mutexkind = ((iattr->mutexkind & ~PTHREAD_MUTEXATTR_PROTOCOL_MASK)
|
||||
| (protocol << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT));
|
||||
|
||||
return 0;
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2005 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -34,12 +34,10 @@ pthread_mutexattr_setpshared (
|
||||
|
||||
iattr = (struct pthread_mutexattr *) attr;
|
||||
|
||||
/* We use bit 31 to signal whether the mutex is going to be
|
||||
process-shared or not. */
|
||||
if (pshared == PTHREAD_PROCESS_PRIVATE)
|
||||
iattr->mutexkind &= ~0x80000000;
|
||||
iattr->mutexkind &= ~PTHREAD_MUTEXATTR_FLAG_PSHARED;
|
||||
else
|
||||
iattr->mutexkind |= 0x80000000;
|
||||
iattr->mutexkind |= PTHREAD_MUTEXATTR_FLAG_PSHARED;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
44
libpthread/nptl/pthread_mutexattr_setrobust.c
Normal file
44
libpthread/nptl/pthread_mutexattr_setrobust.c
Normal file
@ -0,0 +1,44 @@
|
||||
/* Copyright (C) 2005, 2010 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2005.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
The GNU C Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with the GNU C Library; if not, write to the Free
|
||||
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <errno.h>
|
||||
#include <pthreadP.h>
|
||||
|
||||
|
||||
int
|
||||
pthread_mutexattr_setrobust (
|
||||
pthread_mutexattr_t *attr,
|
||||
int robustness)
|
||||
{
|
||||
if (robustness != PTHREAD_MUTEX_STALLED_NP
|
||||
&& __builtin_expect (robustness != PTHREAD_MUTEX_ROBUST_NP, 0))
|
||||
return EINVAL;
|
||||
|
||||
struct pthread_mutexattr *iattr = (struct pthread_mutexattr *) attr;
|
||||
|
||||
/* We use bit 30 to signal whether the mutex is going to be
|
||||
robust or not. */
|
||||
if (robustness == PTHREAD_MUTEX_STALLED_NP)
|
||||
iattr->mutexkind &= ~PTHREAD_MUTEXATTR_FLAG_ROBUST;
|
||||
else
|
||||
iattr->mutexkind |= PTHREAD_MUTEXATTR_FLAG_ROBUST;
|
||||
|
||||
return 0;
|
||||
}
|
||||
weak_alias (pthread_mutexattr_setrobust, pthread_mutexattr_setrobust_np)
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2005 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -33,9 +33,7 @@ __pthread_mutexattr_settype (
|
||||
|
||||
iattr = (struct pthread_mutexattr *) attr;
|
||||
|
||||
/* We use bit 31 to signal whether the mutex is going to be
|
||||
process-shared or not. */
|
||||
iattr->mutexkind = (iattr->mutexkind & 0x80000000) | kind;
|
||||
iattr->mutexkind = (iattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_BITS) | kind;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2007, 2009 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -18,6 +18,7 @@
|
||||
02111-1307 USA. */
|
||||
|
||||
#include "pthreadP.h"
|
||||
#include <bits/kernel-features.h>
|
||||
|
||||
|
||||
static const struct pthread_rwlockattr default_attr =
|
||||
@ -36,15 +37,36 @@ __pthread_rwlock_init (
|
||||
|
||||
iattr = ((const struct pthread_rwlockattr *) attr) ?: &default_attr;
|
||||
|
||||
rwlock->__data.__lock = 0;
|
||||
memset (rwlock, '\0', sizeof (*rwlock));
|
||||
|
||||
rwlock->__data.__flags
|
||||
= iattr->lockkind == PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP;
|
||||
rwlock->__data.__nr_readers = 0;
|
||||
rwlock->__data.__writer = 0;
|
||||
rwlock->__data.__readers_wakeup = 0;
|
||||
rwlock->__data.__writer_wakeup = 0;
|
||||
rwlock->__data.__nr_readers_queued = 0;
|
||||
rwlock->__data.__nr_writers_queued = 0;
|
||||
|
||||
/* The __SHARED field is computed to minimize the work that needs to
|
||||
be done while handling the futex. There are two inputs: the
|
||||
availability of private futexes and whether the rwlock is shared
|
||||
or private. Unfortunately the value of a private rwlock is
|
||||
fixed: it must be zero. The PRIVATE_FUTEX flag has the value
|
||||
0x80 in case private futexes are available and zero otherwise.
|
||||
This leads to the following table:
|
||||
|
||||
| pshared | result
|
||||
| shared private | shared private |
|
||||
------------+-----------------+-----------------+
|
||||
!avail 0 | 0 0 | 0 0 |
|
||||
avail 0x80 | 0x80 0 | 0 0x80 |
|
||||
|
||||
If the pshared value is in locking functions XORed with avail
|
||||
we get the expected result. */
|
||||
#ifdef __ASSUME_PRIVATE_FUTEX
|
||||
rwlock->__data.__shared = (iattr->pshared == PTHREAD_PROCESS_PRIVATE
|
||||
? 0 : FUTEX_PRIVATE_FLAG);
|
||||
#else
|
||||
rwlock->__data.__shared = (iattr->pshared == PTHREAD_PROCESS_PRIVATE
|
||||
? 0
|
||||
: THREAD_GETMEM (THREAD_SELF,
|
||||
header.private_futex));
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -23,15 +23,16 @@
|
||||
|
||||
|
||||
int
|
||||
__pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
|
||||
__pthread_rwlock_tryrdlock (
|
||||
pthread_rwlock_t *rwlock)
|
||||
{
|
||||
int result = EBUSY;
|
||||
|
||||
lll_mutex_lock (rwlock->__data.__lock);
|
||||
lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
|
||||
|
||||
if (rwlock->__data.__writer == 0
|
||||
&& (rwlock->__data.__nr_writers_queued == 0
|
||||
|| rwlock->__data.__flags == 0))
|
||||
|| PTHREAD_RWLOCK_PREFER_READER_P (rwlock)))
|
||||
{
|
||||
if (__builtin_expect (++rwlock->__data.__nr_readers == 0, 0))
|
||||
{
|
||||
@ -42,7 +43,7 @@ __pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
|
||||
result = 0;
|
||||
}
|
||||
|
||||
lll_mutex_unlock (rwlock->__data.__lock);
|
||||
lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -23,11 +23,12 @@
|
||||
|
||||
|
||||
int
|
||||
__pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
|
||||
__pthread_rwlock_trywrlock (
|
||||
pthread_rwlock_t *rwlock)
|
||||
{
|
||||
int result = EBUSY;
|
||||
|
||||
lll_mutex_lock (rwlock->__data.__lock);
|
||||
lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
|
||||
|
||||
if (rwlock->__data.__writer == 0 && rwlock->__data.__nr_readers == 0)
|
||||
{
|
||||
@ -35,7 +36,7 @@ __pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
|
||||
result = 0;
|
||||
}
|
||||
|
||||
lll_mutex_unlock (rwlock->__data.__lock);
|
||||
lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -26,9 +26,9 @@
|
||||
|
||||
int
|
||||
__pthread_setschedparam (
|
||||
pthread_t threadid,
|
||||
int policy,
|
||||
const struct sched_param *param)
|
||||
pthread_t threadid,
|
||||
int policy,
|
||||
const struct sched_param *param)
|
||||
{
|
||||
struct pthread *pd = (struct pthread *) threadid;
|
||||
|
||||
@ -39,10 +39,23 @@ __pthread_setschedparam (
|
||||
|
||||
int result = 0;
|
||||
|
||||
lll_lock (pd->lock);
|
||||
lll_lock (pd->lock, LLL_PRIVATE);
|
||||
|
||||
struct sched_param p;
|
||||
const struct sched_param *orig_param = param;
|
||||
|
||||
/* If the thread should have higher priority because of some
|
||||
PTHREAD_PRIO_PROTECT mutexes it holds, adjust the priority. */
|
||||
if (__builtin_expect (pd->tpp != NULL, 0)
|
||||
&& pd->tpp->priomax > param->sched_priority)
|
||||
{
|
||||
p = *param;
|
||||
p.sched_priority = pd->tpp->priomax;
|
||||
param = &p;
|
||||
}
|
||||
|
||||
/* Try to set the scheduler information. */
|
||||
if (__builtin_expect (sched_setscheduler (pd->tid, policy,
|
||||
if (__builtin_expect (__sched_setscheduler (pd->tid, policy,
|
||||
param) == -1, 0))
|
||||
result = errno;
|
||||
else
|
||||
@ -50,11 +63,11 @@ __pthread_setschedparam (
|
||||
/* We succeeded changing the kernel information. Reflect this
|
||||
change in the thread descriptor. */
|
||||
pd->schedpolicy = policy;
|
||||
memcpy (&pd->schedparam, param, sizeof (struct sched_param));
|
||||
memcpy (&pd->schedparam, orig_param, sizeof (struct sched_param));
|
||||
pd->flags |= ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET;
|
||||
}
|
||||
|
||||
lll_unlock (pd->lock);
|
||||
lll_unlock (pd->lock, LLL_PRIVATE);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2004, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -26,7 +26,9 @@
|
||||
|
||||
|
||||
int
|
||||
pthread_setschedprio (pthread_t threadid, int prio)
|
||||
pthread_setschedprio (
|
||||
pthread_t threadid,
|
||||
int prio)
|
||||
{
|
||||
struct pthread *pd = (struct pthread *) threadid;
|
||||
|
||||
@ -39,7 +41,12 @@ pthread_setschedprio (pthread_t threadid, int prio)
|
||||
struct sched_param param;
|
||||
param.sched_priority = prio;
|
||||
|
||||
lll_lock (pd->lock);
|
||||
lll_lock (pd->lock, LLL_PRIVATE);
|
||||
|
||||
/* If the thread should have higher priority because of some
|
||||
PTHREAD_PRIO_PROTECT mutexes it holds, adjust the priority. */
|
||||
if (__builtin_expect (pd->tpp != NULL, 0) && pd->tpp->priomax > prio)
|
||||
param.sched_priority = pd->tpp->priomax;
|
||||
|
||||
/* Try to set the scheduler information. */
|
||||
if (__builtin_expect (sched_setparam (pd->tid, ¶m) == -1, 0))
|
||||
@ -48,11 +55,12 @@ pthread_setschedprio (pthread_t threadid, int prio)
|
||||
{
|
||||
/* We succeeded changing the kernel information. Reflect this
|
||||
change in the thread descriptor. */
|
||||
param.sched_priority = prio;
|
||||
memcpy (&pd->schedparam, ¶m, sizeof (struct sched_param));
|
||||
pd->flags |= ATTR_FLAG_SCHED_SET;
|
||||
}
|
||||
|
||||
lll_unlock (pd->lock);
|
||||
lll_unlock (pd->lock, LLL_PRIVATE);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2006 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -52,8 +52,8 @@ __pthread_setspecific (
|
||||
}
|
||||
else
|
||||
{
|
||||
if (KEY_UNUSED ((seq = __pthread_keys[key].seq))
|
||||
|| key >= PTHREAD_KEYS_MAX)
|
||||
if (key >= PTHREAD_KEYS_MAX
|
||||
|| KEY_UNUSED ((seq = __pthread_keys[key].seq)))
|
||||
/* Not valid. */
|
||||
return EINVAL;
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -19,7 +19,7 @@
|
||||
|
||||
#include <errno.h>
|
||||
#include <stdlib.h>
|
||||
#include "atomic.h"
|
||||
#include <atomic.h>
|
||||
#include "pthreadP.h"
|
||||
|
||||
|
||||
@ -32,9 +32,9 @@ cleanup (void *arg)
|
||||
|
||||
int
|
||||
pthread_timedjoin_np (
|
||||
pthread_t threadid,
|
||||
void **thread_return,
|
||||
const struct timespec *abstime)
|
||||
pthread_t threadid,
|
||||
void **thread_return,
|
||||
const struct timespec *abstime)
|
||||
{
|
||||
struct pthread *self;
|
||||
struct pthread *pd = (struct pthread *) threadid;
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -20,12 +20,14 @@
|
||||
#include <errno.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "atomic.h"
|
||||
#include <atomic.h>
|
||||
#include "pthreadP.h"
|
||||
|
||||
|
||||
int
|
||||
pthread_tryjoin_np (pthread_t threadid, void **thread_return)
|
||||
pthread_tryjoin_np (
|
||||
pthread_t threadid,
|
||||
void **thread_return)
|
||||
{
|
||||
struct pthread *self;
|
||||
struct pthread *pd = (struct pthread *) threadid;
|
||||
|
@ -17,7 +17,6 @@
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <features.h>
|
||||
|
||||
#include <tls.h>
|
||||
#include <resolv.h>
|
||||
|
||||
|
@ -41,12 +41,13 @@ walker (const void *inodep, const VISIT which, const int depth)
|
||||
|
||||
|
||||
int
|
||||
sem_close (sem_t *sem)
|
||||
sem_close (
|
||||
sem_t *sem)
|
||||
{
|
||||
int result = 0;
|
||||
|
||||
/* Get the lock. */
|
||||
lll_lock (__sem_mappings_lock);
|
||||
lll_lock (__sem_mappings_lock, LLL_PRIVATE);
|
||||
|
||||
/* Locate the entry for the mapping the caller provided. */
|
||||
rec = NULL;
|
||||
@ -74,7 +75,7 @@ sem_close (sem_t *sem)
|
||||
}
|
||||
|
||||
/* Release the lock. */
|
||||
lll_unlock (__sem_mappings_lock);
|
||||
lll_unlock (__sem_mappings_lock, LLL_PRIVATE);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -22,7 +22,8 @@
|
||||
|
||||
|
||||
int
|
||||
__new_sem_destroy (sem_t *sem)
|
||||
__new_sem_destroy (
|
||||
sem_t *sem)
|
||||
{
|
||||
/* XXX Check for valid parameter. */
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -26,11 +26,11 @@ __new_sem_getvalue (
|
||||
sem_t *sem,
|
||||
int *sval)
|
||||
{
|
||||
struct sem *isem = (struct sem *) sem;
|
||||
struct new_sem *isem = (struct new_sem *) sem;
|
||||
|
||||
/* XXX Check for valid SEM parameter. */
|
||||
|
||||
*sval = isem->count;
|
||||
*sval = isem->value;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -21,6 +21,7 @@
|
||||
#include <semaphore.h>
|
||||
#include <lowlevellock.h>
|
||||
#include "semaphoreP.h"
|
||||
#include <bits/kernel-features.h>
|
||||
|
||||
|
||||
int
|
||||
@ -37,13 +38,18 @@ __new_sem_init (
|
||||
}
|
||||
|
||||
/* Map to the internal type. */
|
||||
struct sem *isem = (struct sem *) sem;
|
||||
struct new_sem *isem = (struct new_sem *) sem;
|
||||
|
||||
/* Use the value the user provided. */
|
||||
isem->count = value;
|
||||
/* Use the values the user provided. */
|
||||
isem->value = value;
|
||||
#ifdef __ASSUME_PRIVATE_FUTEX
|
||||
isem->private = pshared ? 0 : FUTEX_PRIVATE_FLAG;
|
||||
#else
|
||||
isem->private = pshared ? 0 : THREAD_GETMEM (THREAD_SELF,
|
||||
header.private_futex);
|
||||
#endif
|
||||
|
||||
/* We can completely ignore the PSHARED parameter since inter-process
|
||||
use needs no special preparation. */
|
||||
isem->nwaiters = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2006, 2007, 2009 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -38,14 +38,15 @@
|
||||
|
||||
|
||||
/* Compatibility defines. */
|
||||
#define __endmntent endmntent
|
||||
#define __fxstat64(vers, fd, buf) fstat64(fd, buf)
|
||||
#define __getmntent_r getmntent_r
|
||||
#define __setmntent setmntent
|
||||
#define __statfs statfs
|
||||
#define __libc_close close
|
||||
#define __libc_open open
|
||||
#define __libc_write write
|
||||
#define __endmntent endmntent
|
||||
#define __fxstat64(vers, fd, buf) fstat64(fd, buf)
|
||||
#define __getmntent_r getmntent_r
|
||||
#define __setmntent setmntent
|
||||
#define __statfs statfs
|
||||
#define __libc_close close
|
||||
#define __libc_open open
|
||||
#define __libc_write write
|
||||
|
||||
|
||||
/* Information about the mount point. */
|
||||
struct mountpoint_info mountpoint attribute_hidden;
|
||||
@ -157,7 +158,7 @@ __sem_search (const void *a, const void *b)
|
||||
void *__sem_mappings attribute_hidden;
|
||||
|
||||
/* Lock to protect the search tree. */
|
||||
lll_lock_t __sem_mappings_lock = LLL_LOCK_INITIALIZER;
|
||||
int __sem_mappings_lock attribute_hidden = LLL_LOCK_INITIALIZER;
|
||||
|
||||
|
||||
/* Search for existing mapping and if possible add the one provided. */
|
||||
@ -176,7 +177,7 @@ check_add_mapping (const char *name, size_t namelen, int fd, sem_t *existing)
|
||||
#endif
|
||||
{
|
||||
/* Get the lock. */
|
||||
lll_lock (__sem_mappings_lock);
|
||||
lll_lock (__sem_mappings_lock, LLL_PRIVATE);
|
||||
|
||||
/* Search for an existing mapping given the information we have. */
|
||||
struct inuse_sem *fake;
|
||||
@ -225,7 +226,7 @@ check_add_mapping (const char *name, size_t namelen, int fd, sem_t *existing)
|
||||
}
|
||||
|
||||
/* Release the lock. */
|
||||
lll_unlock (__sem_mappings_lock);
|
||||
lll_unlock (__sem_mappings_lock, LLL_PRIVATE);
|
||||
}
|
||||
|
||||
if (result != existing && existing != SEM_FAILED && existing != MAP_FAILED)
|
||||
@ -317,24 +318,28 @@ sem_open (const char *name, int oflag, ...)
|
||||
}
|
||||
|
||||
/* Create the initial file content. */
|
||||
sem_t initsem;
|
||||
union
|
||||
{
|
||||
sem_t initsem;
|
||||
struct new_sem newsem;
|
||||
} sem;
|
||||
|
||||
struct sem *iinitsem = (struct sem *) &initsem;
|
||||
iinitsem->count = value;
|
||||
sem.newsem.value = value;
|
||||
sem.newsem.private = 0;
|
||||
sem.newsem.nwaiters = 0;
|
||||
|
||||
/* Initialize the remaining bytes as well. */
|
||||
memset ((char *) &initsem + sizeof (struct sem), '\0',
|
||||
sizeof (sem_t) - sizeof (struct sem));
|
||||
memset ((char *) &sem.initsem + sizeof (struct new_sem), '\0',
|
||||
sizeof (sem_t) - sizeof (struct new_sem));
|
||||
|
||||
tmpfname = (char *) alloca (mountpoint.dirlen + 6 + 1);
|
||||
char *xxxxxx = mempcpy (tmpfname, mountpoint.dir, mountpoint.dirlen);
|
||||
strcpy (xxxxxx, "XXXXXX");
|
||||
mempcpy (tmpfname, mountpoint.dir, mountpoint.dirlen);
|
||||
|
||||
fd = __gen_tempname (tmpfname, __GT_FILE, mode);
|
||||
if (fd == -1)
|
||||
return SEM_FAILED;
|
||||
return SEM_FAILED;
|
||||
|
||||
if (TEMP_FAILURE_RETRY (__libc_write (fd, &initsem, sizeof (sem_t)))
|
||||
if (TEMP_FAILURE_RETRY (__libc_write (fd, &sem.initsem, sizeof (sem_t)))
|
||||
== sizeof (sem_t)
|
||||
/* Map the sem_t structure from the file. */
|
||||
&& (result = (sem_t *) mmap (NULL, sizeof (sem_t),
|
||||
|
@ -26,7 +26,8 @@
|
||||
|
||||
|
||||
int
|
||||
sem_unlink (const char *name)
|
||||
sem_unlink (
|
||||
const char *name)
|
||||
{
|
||||
char *fname;
|
||||
size_t namelen;
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2006, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -48,7 +48,7 @@ extern pthread_once_t __namedsem_once attribute_hidden;
|
||||
extern void *__sem_mappings attribute_hidden;
|
||||
|
||||
/* Lock to protect the search tree. */
|
||||
extern lll_lock_t __sem_mappings_lock;
|
||||
extern int __sem_mappings_lock attribute_hidden;
|
||||
|
||||
|
||||
/* Initializer for mountpoint. */
|
||||
@ -60,8 +60,10 @@ extern int __sem_search (const void *a, const void *b) attribute_hidden;
|
||||
|
||||
/* Prototypes of functions with multiple interfaces. */
|
||||
extern int __new_sem_init (sem_t *sem, int pshared, unsigned int value);
|
||||
extern int __old_sem_init (sem_t *sem, int pshared, unsigned int value);
|
||||
extern int __new_sem_destroy (sem_t *sem);
|
||||
extern int __new_sem_post (sem_t *sem);
|
||||
extern int __new_sem_wait (sem_t *sem);
|
||||
extern int __old_sem_wait (sem_t *sem);
|
||||
extern int __new_sem_trywait (sem_t *sem);
|
||||
extern int __new_sem_getvalue (sem_t *sem, int *sval);
|
||||
|
@ -21,7 +21,6 @@
|
||||
#define _TLS_H 1
|
||||
|
||||
#ifndef __ASSEMBLER__
|
||||
#include <dl-sysdep.h>
|
||||
|
||||
# include <stdbool.h>
|
||||
# include <stddef.h>
|
||||
|
@ -1,4 +1,5 @@
|
||||
/* Copyright (C) 2002 Free Software Foundation, Inc.
|
||||
/* Low level locking macros used in NPTL implementation. Stub version.
|
||||
Copyright (C) 2002, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -20,16 +21,6 @@
|
||||
#include <atomic.h>
|
||||
|
||||
|
||||
/* Implement generic mutex. Basic futex syscall support is required:
|
||||
|
||||
lll_futex_wait(futex, value) - call sys_futex with FUTEX_WAIT
|
||||
and third parameter VALUE
|
||||
|
||||
lll_futex_wake(futex, value) - call sys_futex with FUTEX_WAKE
|
||||
and third parameter VALUE
|
||||
*/
|
||||
|
||||
|
||||
/* Mutex lock counter:
|
||||
bit 31 clear means unlocked;
|
||||
bit 31 set means locked.
|
||||
@ -65,7 +56,9 @@ __generic_mutex_lock (int *mutex)
|
||||
if (v >= 0)
|
||||
continue;
|
||||
|
||||
lll_futex_wait (mutex, v);
|
||||
lll_futex_wait (mutex, v,
|
||||
// XYZ check mutex flag
|
||||
LLL_SHARED);
|
||||
}
|
||||
}
|
||||
|
||||
@ -81,7 +74,9 @@ __generic_mutex_unlock (int *mutex)
|
||||
|
||||
/* There are other threads waiting for this mutex, wake one of them
|
||||
up. */
|
||||
lll_futex_wake (mutex, 1);
|
||||
lll_futex_wake (mutex, 1,
|
||||
// XYZ check mutex flag
|
||||
LLL_SHARED);
|
||||
}
|
||||
|
||||
|
||||
|
@ -29,7 +29,8 @@
|
||||
|
||||
|
||||
int
|
||||
pthread_spin_lock (pthread_spinlock_t *lock)
|
||||
pthread_spin_lock (
|
||||
pthread_spinlock_t *lock)
|
||||
{
|
||||
__asm__ ("\n"
|
||||
"1:\t" LOCK_PREFIX "decl %0\n\t"
|
||||
|
@ -11,3 +11,7 @@ SYSINFO_OFFSET offsetof (tcbhead_t, sysinfo)
|
||||
CLEANUP offsetof (struct pthread, cleanup)
|
||||
CLEANUP_PREV offsetof (struct _pthread_cleanup_buffer, __prev)
|
||||
MUTEX_FUTEX offsetof (pthread_mutex_t, __data.__lock)
|
||||
POINTER_GUARD offsetof (tcbhead_t, pointer_guard)
|
||||
#ifndef __ASSUME_PRIVATE_FUTEX
|
||||
PRIVATE_FUTEX offsetof (tcbhead_t, private_futex)
|
||||
#endif
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* Definition for thread-local data handling. nptl/i386 version.
|
||||
Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
|
||||
Copyright (C) 2002-2007, 2009 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
@ -26,6 +26,8 @@
|
||||
# include <stdint.h>
|
||||
# include <stdlib.h>
|
||||
# include <list.h>
|
||||
# include <sysdep.h>
|
||||
# include <bits/kernel-features.h>
|
||||
|
||||
|
||||
/* Type for the dtv. */
|
||||
@ -49,6 +51,15 @@ typedef struct
|
||||
int multiple_threads;
|
||||
uintptr_t sysinfo;
|
||||
uintptr_t stack_guard;
|
||||
uintptr_t pointer_guard;
|
||||
int gscope_flag;
|
||||
#ifndef __ASSUME_PRIVATE_FUTEX
|
||||
int private_futex;
|
||||
#else
|
||||
int __unused1;
|
||||
#endif
|
||||
/* Reservation of some values for the TM ABI. */
|
||||
void *__private_tm[5];
|
||||
} tcbhead_t;
|
||||
|
||||
# define TLS_MULTIPLE_THREADS_IN_TCB 1
|
||||
@ -64,7 +75,8 @@ typedef struct
|
||||
#define HAVE_TLS_MODEL_ATTRIBUTE 1
|
||||
|
||||
/* Signal that TLS support is available. */
|
||||
#define USE_TLS 1
|
||||
#define USE_TLS 1
|
||||
|
||||
|
||||
/* Alignment requirement for the stack. For IA-32 this is governed by
|
||||
the SSE memory functions. */
|
||||
@ -99,6 +111,9 @@ union user_desc_init
|
||||
};
|
||||
|
||||
|
||||
/* Get the thread descriptor definition. */
|
||||
# include <descr.h>
|
||||
|
||||
/* This is the size of the initial TCB. Can't be just sizeof (tcbhead_t),
|
||||
because NPTL getpid, __libc_alloca_cutoff etc. need (almost) the whole
|
||||
struct pthread even when not linked with -lpthread. */
|
||||
@ -113,9 +128,6 @@ union user_desc_init
|
||||
/* Alignment requirements for the TCB. */
|
||||
# define TLS_TCB_ALIGN __alignof__ (struct pthread)
|
||||
|
||||
/* Get the thread descriptor definition. */
|
||||
#include <descr.h>
|
||||
|
||||
/* The TCB can have any size and the memory following the address the
|
||||
thread pointer points to is unspecified. Allocate the TCB there. */
|
||||
# define TLS_TCB_AT_TP 1
|
||||
@ -220,7 +232,7 @@ union user_desc_init
|
||||
_segdescr.vals[3] = 0x51; \
|
||||
\
|
||||
/* Install the TLS. */ \
|
||||
__asm__ volatile (TLS_LOAD_EBX \
|
||||
__asm__ volatile (TLS_LOAD_EBX \
|
||||
"int $0x80\n\t" \
|
||||
TLS_LOAD_EBX \
|
||||
: "=a" (_result), "=m" (_segdescr.desc.entry_number) \
|
||||
@ -256,7 +268,7 @@ union user_desc_init
|
||||
do not get optimized away. */
|
||||
# define THREAD_SELF \
|
||||
({ struct pthread *__self; \
|
||||
__asm__ ("movl %%gs:%c1,%0" : "=r" (__self) \
|
||||
__asm__ ("movl %%gs:%c1,%0" : "=r" (__self) \
|
||||
: "i" (offsetof (struct pthread, header.self))); \
|
||||
__self;})
|
||||
|
||||
@ -270,11 +282,11 @@ union user_desc_init
|
||||
# define THREAD_GETMEM(descr, member) \
|
||||
({ __typeof (descr->member) __value; \
|
||||
if (sizeof (__value) == 1) \
|
||||
__asm__ volatile ("movb %%gs:%P2,%b0" \
|
||||
__asm__ volatile ("movb %%gs:%P2,%b0" \
|
||||
: "=q" (__value) \
|
||||
: "0" (0), "i" (offsetof (struct pthread, member))); \
|
||||
else if (sizeof (__value) == 4) \
|
||||
__asm__ volatile ("movl %%gs:%P1,%0" \
|
||||
__asm__ volatile ("movl %%gs:%P1,%0" \
|
||||
: "=r" (__value) \
|
||||
: "i" (offsetof (struct pthread, member))); \
|
||||
else \
|
||||
@ -297,12 +309,12 @@ union user_desc_init
|
||||
# define THREAD_GETMEM_NC(descr, member, idx) \
|
||||
({ __typeof (descr->member[0]) __value; \
|
||||
if (sizeof (__value) == 1) \
|
||||
__asm__ volatile ("movb %%gs:%P2(%3),%b0" \
|
||||
__asm__ volatile ("movb %%gs:%P2(%3),%b0" \
|
||||
: "=q" (__value) \
|
||||
: "0" (0), "i" (offsetof (struct pthread, member[0])), \
|
||||
"r" (idx)); \
|
||||
else if (sizeof (__value) == 4) \
|
||||
__asm__ volatile ("movl %%gs:%P1(,%2,4),%0" \
|
||||
__asm__ volatile ("movl %%gs:%P1(,%2,4),%0" \
|
||||
: "=r" (__value) \
|
||||
: "i" (offsetof (struct pthread, member[0])), \
|
||||
"r" (idx)); \
|
||||
@ -350,7 +362,7 @@ union user_desc_init
|
||||
/* Set member of the thread descriptor directly. */
|
||||
# define THREAD_SETMEM_NC(descr, member, idx, value) \
|
||||
({ if (sizeof (descr->member[0]) == 1) \
|
||||
__asm__ volatile ("movb %b0,%%gs:%P1(%2)" : \
|
||||
__asm__ volatile ("movb %b0,%%gs:%P1(%2)" : \
|
||||
: "iq" (value), \
|
||||
"i" (offsetof (struct pthread, member)), \
|
||||
"r" (idx)); \
|
||||
@ -366,7 +378,7 @@ union user_desc_init
|
||||
4 or 8. */ \
|
||||
abort (); \
|
||||
\
|
||||
__asm__ volatile ("movl %%eax,%%gs:%P1(,%2,8)\n\t" \
|
||||
__asm__ volatile ("movl %%eax,%%gs:%P1(,%2,8)\n\t" \
|
||||
"movl %%edx,%%gs:4+%P1(,%2,8)" : \
|
||||
: "A" (value), \
|
||||
"i" (offsetof (struct pthread, member)), \
|
||||
@ -389,6 +401,17 @@ union user_desc_init
|
||||
__ret; })
|
||||
|
||||
|
||||
/* Atomic logical and. */
|
||||
#define THREAD_ATOMIC_AND(descr, member, val) \
|
||||
(void) ({ if (sizeof ((descr)->member) == 4) \
|
||||
__asm__ volatile (LOCK_PREFIX "andl %1, %%gs:%P0" \
|
||||
:: "i" (offsetof (struct pthread, member)), \
|
||||
"ir" (val)); \
|
||||
else \
|
||||
/* Not necessary for other sizes in the moment. */ \
|
||||
abort (); })
|
||||
|
||||
|
||||
/* Atomic set bit. */
|
||||
#define THREAD_ATOMIC_BIT_SET(descr, member, bit) \
|
||||
(void) ({ if (sizeof ((descr)->member) == 4) \
|
||||
@ -424,6 +447,34 @@ union user_desc_init
|
||||
= THREAD_GETMEM (THREAD_SELF, header.stack_guard))
|
||||
|
||||
|
||||
/* Set the pointer guard field in the TCB head. */
|
||||
#define THREAD_SET_POINTER_GUARD(value) \
|
||||
THREAD_SETMEM (THREAD_SELF, header.pointer_guard, value)
|
||||
#define THREAD_COPY_POINTER_GUARD(descr) \
|
||||
((descr)->header.pointer_guard \
|
||||
= THREAD_GETMEM (THREAD_SELF, header.pointer_guard))
|
||||
|
||||
|
||||
/* Get and set the global scope generation counter in the TCB head. */
|
||||
#define THREAD_GSCOPE_FLAG_UNUSED 0
|
||||
#define THREAD_GSCOPE_FLAG_USED 1
|
||||
#define THREAD_GSCOPE_FLAG_WAIT 2
|
||||
#define THREAD_GSCOPE_RESET_FLAG() \
|
||||
do \
|
||||
{ int __res; \
|
||||
__asm__ volatile ("xchgl %0, %%gs:%P1" \
|
||||
: "=r" (__res) \
|
||||
: "i" (offsetof (struct pthread, header.gscope_flag)), \
|
||||
"0" (THREAD_GSCOPE_FLAG_UNUSED)); \
|
||||
if (__res == THREAD_GSCOPE_FLAG_WAIT) \
|
||||
lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE); \
|
||||
} \
|
||||
while (0)
|
||||
#define THREAD_GSCOPE_SET_FLAG() \
|
||||
THREAD_SETMEM (THREAD_SELF, header.gscope_flag, THREAD_GSCOPE_FLAG_USED)
|
||||
#define THREAD_GSCOPE_WAIT() \
|
||||
GL(dl_wait_lookup_done) ()
|
||||
|
||||
#endif /* __ASSEMBLER__ */
|
||||
|
||||
#endif /* tls.h */
|
||||
|
@ -14,3 +14,7 @@ MULTIPLE_THREADS_OFFSET thread_offsetof (header.multiple_threads)
|
||||
#endif
|
||||
PID thread_offsetof (pid)
|
||||
TID thread_offsetof (tid)
|
||||
POINTER_GUARD (offsetof (tcbhead_t, pointer_guard) - TLS_TCB_OFFSET - sizeof (tcbhead_t))
|
||||
#ifndef __ASSUME_PRIVATE_FUTEX
|
||||
PRIVATE_FUTEX_OFFSET thread_offsetof (header.private_futex)
|
||||
#endif
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* Definition for thread-local data handling. NPTL/PowerPC version.
|
||||
Copyright (C) 2003, 2005 Free Software Foundation, Inc.
|
||||
Copyright (C) 2003, 2005, 2006, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
@ -47,7 +47,7 @@ typedef union dtv
|
||||
#endif
|
||||
|
||||
/* Signal that TLS support is available. */
|
||||
# define USE_TLS 1
|
||||
# define USE_TLS 1
|
||||
|
||||
#ifndef __ASSEMBLER__
|
||||
|
||||
@ -64,9 +64,11 @@ typedef union dtv
|
||||
# include <nptl/descr.h>
|
||||
|
||||
/* The stack_guard is accessed directly by GCC -fstack-protector code,
|
||||
so it is a part of public ABI. The dtv field is private. */
|
||||
so it is a part of public ABI. The dtv and pointer_guard fields
|
||||
are private. */
|
||||
typedef struct
|
||||
{
|
||||
uintptr_t pointer_guard;
|
||||
uintptr_t stack_guard;
|
||||
dtv_t *dtv;
|
||||
} tcbhead_t;
|
||||
@ -164,10 +166,44 @@ register void *__thread_register __asm__ ("r13");
|
||||
= ((tcbhead_t *) ((char *) __thread_register \
|
||||
- TLS_TCB_OFFSET))[-1].stack_guard)
|
||||
|
||||
/* Set the stack guard field in TCB head. */
|
||||
# define THREAD_GET_POINTER_GUARD() \
|
||||
(((tcbhead_t *) ((char *) __thread_register \
|
||||
- TLS_TCB_OFFSET))[-1].pointer_guard)
|
||||
# define THREAD_SET_POINTER_GUARD(value) \
|
||||
(THREAD_GET_POINTER_GUARD () = (value))
|
||||
# define THREAD_COPY_POINTER_GUARD(descr) \
|
||||
(((tcbhead_t *) ((char *) (descr) \
|
||||
+ TLS_PRE_TCB_SIZE))[-1].pointer_guard \
|
||||
= THREAD_GET_POINTER_GUARD())
|
||||
|
||||
/* l_tls_offset == 0 is perfectly valid on PPC, so we have to use some
|
||||
different value to mean unset l_tls_offset. */
|
||||
# define NO_TLS_OFFSET -1
|
||||
|
||||
/* Get and set the global scope generation counter in struct pthread. */
|
||||
#define THREAD_GSCOPE_FLAG_UNUSED 0
|
||||
#define THREAD_GSCOPE_FLAG_USED 1
|
||||
#define THREAD_GSCOPE_FLAG_WAIT 2
|
||||
#define THREAD_GSCOPE_RESET_FLAG() \
|
||||
do \
|
||||
{ int __res \
|
||||
= atomic_exchange_rel (&THREAD_SELF->header.gscope_flag, \
|
||||
THREAD_GSCOPE_FLAG_UNUSED); \
|
||||
if (__res == THREAD_GSCOPE_FLAG_WAIT) \
|
||||
lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE); \
|
||||
} \
|
||||
while (0)
|
||||
#define THREAD_GSCOPE_SET_FLAG() \
|
||||
do \
|
||||
{ \
|
||||
THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED; \
|
||||
atomic_write_barrier (); \
|
||||
} \
|
||||
while (0)
|
||||
#define THREAD_GSCOPE_WAIT() \
|
||||
GL(dl_wait_lookup_done) ()
|
||||
|
||||
#endif /* __ASSEMBLER__ */
|
||||
|
||||
#endif /* tls.h */
|
||||
|
@ -18,7 +18,7 @@ libpthread_CSRC = pthread_barrier_wait.c pthread_cond_broadcast.c \
|
||||
pthread_rwlock_wrlock.c pthread_sigmask.c \
|
||||
pthread_spin_destroy.c pthread_spin_init.c \
|
||||
pthread_spin_unlock.c pt-sigfillset.c \
|
||||
pt-longjmp.c
|
||||
pt-longjmp.c tpp.c
|
||||
|
||||
|
||||
ifeq ($(TARGET_ARCH),i386)
|
||||
@ -43,6 +43,13 @@ SH_PTHREAD_EXCLUDE_LIST = pthread_spin_unlock.c pthread_spin_init.c \
|
||||
libpthread_CSRC := $(filter-out $(SH_PTHREAD_EXCLUDE_LIST),$(libpthread_CSRC))
|
||||
endif
|
||||
|
||||
ifeq ($(TARGET_ARCH),sparc)
|
||||
SPARC_PTHREAD_EXCLUDE_LIST = pthread_barrier_init.c pthread_barrier_wait.c \
|
||||
pthread_barrier_destroy.c
|
||||
|
||||
libpthread_CSRC := $(filter-out $(SPARC_PTHREAD_EXCLUDE_LIST),$(libpthread_CSRC))
|
||||
endif
|
||||
|
||||
ifeq ($(TARGET_ARCH),x86_64)
|
||||
X64_PTHREAD_EXCLUDE_LIST = pthread_spin_unlock.c pthread_spin_init.c \
|
||||
pthread_barrier_wait.c pthread_cond_broadcast.c \
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* Determine whether block of given size can be allocated on the stack or not.
|
||||
Copyright (C) 2002 Free Software Foundation, Inc.
|
||||
Copyright (C) 2002, 2006 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
@ -21,7 +21,8 @@
|
||||
#include <limits.h>
|
||||
|
||||
|
||||
extern inline int
|
||||
extern int
|
||||
__always_inline
|
||||
__libc_use_alloca (size_t size)
|
||||
{
|
||||
return (__builtin_expect (size <= PTHREAD_STACK_MIN / 4, 1)
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* libc-internal interface for mutex locks. NPTL version.
|
||||
Copyright (C) 1996-2001, 2002, 2003, 2005 Free Software Foundation, Inc.
|
||||
Copyright (C) 1996-2003, 2005, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
@ -150,13 +150,17 @@ typedef pthread_key_t __libc_key_t;
|
||||
|
||||
/* Call thread functions through the function pointer table. */
|
||||
#if defined SHARED && !defined NOT_IN_libc
|
||||
# define PTF(NAME) __libc_pthread_functions.ptr_##NAME
|
||||
# define PTFAVAIL(NAME) __libc_pthread_functions_init
|
||||
# define __libc_ptf_call(FUNC, ARGS, ELSE) \
|
||||
(PTF(FUNC) != NULL ? PTF(FUNC) ARGS : ELSE)
|
||||
(__libc_pthread_functions_init ? PTHFCT_CALL (ptr_##FUNC, ARGS) : ELSE)
|
||||
# define __libc_ptf_call_always(FUNC, ARGS) \
|
||||
PTHFCT_CALL (ptr_##FUNC, ARGS)
|
||||
#else
|
||||
# define PTF(NAME) NAME
|
||||
# define PTFAVAIL(NAME) (NAME != NULL)
|
||||
# define __libc_ptf_call(FUNC, ARGS, ELSE) \
|
||||
__libc_maybe_call (FUNC, ARGS, ELSE)
|
||||
# define __libc_ptf_call_always(FUNC, ARGS) \
|
||||
FUNC ARGS
|
||||
#endif
|
||||
|
||||
|
||||
@ -168,8 +172,15 @@ typedef pthread_key_t __libc_key_t;
|
||||
# define __libc_lock_init(NAME) \
|
||||
__libc_maybe_call (__pthread_mutex_init, (&(NAME), NULL), 0)
|
||||
#endif
|
||||
#define __libc_rwlock_init(NAME) \
|
||||
#if defined SHARED && !defined NOT_IN_libc
|
||||
/* ((NAME) = (__libc_rwlock_t) PTHREAD_RWLOCK_INITIALIZER, 0) is
|
||||
inefficient. */
|
||||
# define __libc_rwlock_init(NAME) \
|
||||
(__builtin_memset (&(NAME), '\0', sizeof (NAME)), 0)
|
||||
#else
|
||||
# define __libc_rwlock_init(NAME) \
|
||||
__libc_maybe_call (__pthread_rwlock_init, (&(NAME), NULL), 0)
|
||||
#endif
|
||||
|
||||
/* Same as last but this time we initialize a recursive mutex. */
|
||||
#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
|
||||
@ -210,8 +221,12 @@ typedef pthread_key_t __libc_key_t;
|
||||
# define __libc_lock_fini(NAME) \
|
||||
__libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0)
|
||||
#endif
|
||||
#define __libc_rwlock_fini(NAME) \
|
||||
#if defined SHARED && !defined NOT_IN_libc
|
||||
# define __libc_rwlock_fini(NAME) ((void) 0)
|
||||
#else
|
||||
# define __libc_rwlock_fini(NAME) \
|
||||
__libc_maybe_call (__pthread_rwlock_destroy, (&(NAME)), 0)
|
||||
#endif
|
||||
|
||||
/* Finalize recursive named lock. */
|
||||
#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
|
||||
@ -224,7 +239,7 @@ typedef pthread_key_t __libc_key_t;
|
||||
/* Lock the named lock variable. */
|
||||
#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
|
||||
# define __libc_lock_lock(NAME) \
|
||||
({ lll_lock (NAME); 0; })
|
||||
({ lll_lock (NAME, LLL_PRIVATE); 0; })
|
||||
#else
|
||||
# define __libc_lock_lock(NAME) \
|
||||
__libc_maybe_call (__pthread_mutex_lock, (&(NAME)), 0)
|
||||
@ -241,7 +256,7 @@ typedef pthread_key_t __libc_key_t;
|
||||
void *self = THREAD_SELF; \
|
||||
if ((NAME).owner != self) \
|
||||
{ \
|
||||
lll_lock ((NAME).lock); \
|
||||
lll_lock ((NAME).lock, LLL_PRIVATE); \
|
||||
(NAME).owner = self; \
|
||||
} \
|
||||
++(NAME).cnt; \
|
||||
@ -295,7 +310,7 @@ typedef pthread_key_t __libc_key_t;
|
||||
/* Unlock the named lock variable. */
|
||||
#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
|
||||
# define __libc_lock_unlock(NAME) \
|
||||
lll_unlock (NAME)
|
||||
lll_unlock (NAME, LLL_PRIVATE)
|
||||
#else
|
||||
# define __libc_lock_unlock(NAME) \
|
||||
__libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
|
||||
@ -311,7 +326,7 @@ typedef pthread_key_t __libc_key_t;
|
||||
if (--(NAME).cnt == 0) \
|
||||
{ \
|
||||
(NAME).owner = NULL; \
|
||||
lll_unlock ((NAME).lock); \
|
||||
lll_unlock ((NAME).lock, LLL_PRIVATE); \
|
||||
} \
|
||||
} while (0)
|
||||
#else
|
||||
@ -353,8 +368,9 @@ typedef pthread_key_t __libc_key_t;
|
||||
/* Call handler iff the first call. */
|
||||
#define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \
|
||||
do { \
|
||||
if (PTF(__pthread_once) != NULL) \
|
||||
PTF(__pthread_once) (&(ONCE_CONTROL), INIT_FUNCTION); \
|
||||
if (PTFAVAIL (__pthread_once)) \
|
||||
__libc_ptf_call_always (__pthread_once, (&(ONCE_CONTROL), \
|
||||
INIT_FUNCTION)); \
|
||||
else if ((ONCE_CONTROL) == PTHREAD_ONCE_INIT) { \
|
||||
INIT_FUNCTION (); \
|
||||
(ONCE_CONTROL) |= 2; \
|
||||
@ -380,9 +396,10 @@ extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer
|
||||
{ struct _pthread_cleanup_buffer _buffer; \
|
||||
int _avail; \
|
||||
if (DOIT) { \
|
||||
_avail = PTF(_pthread_cleanup_push_defer) != NULL; \
|
||||
_avail = PTFAVAIL (_pthread_cleanup_push_defer); \
|
||||
if (_avail) { \
|
||||
PTF(_pthread_cleanup_push_defer) (&_buffer, FCT, ARG); \
|
||||
__libc_ptf_call_always (_pthread_cleanup_push_defer, (&_buffer, FCT, \
|
||||
ARG)); \
|
||||
} else { \
|
||||
_buffer.__routine = (FCT); \
|
||||
_buffer.__arg = (ARG); \
|
||||
@ -394,7 +411,7 @@ extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer
|
||||
/* End critical region with cleanup. */
|
||||
#define __libc_cleanup_region_end(DOIT) \
|
||||
if (_avail) { \
|
||||
PTF(_pthread_cleanup_pop_restore) (&_buffer, DOIT); \
|
||||
__libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\
|
||||
} else if (DOIT) \
|
||||
_buffer.__routine (_buffer.__arg); \
|
||||
}
|
||||
@ -402,15 +419,12 @@ extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer
|
||||
/* Sometimes we have to exit the block in the middle. */
|
||||
#define __libc_cleanup_end(DOIT) \
|
||||
if (_avail) { \
|
||||
PTF(_pthread_cleanup_pop_restore) (&_buffer, DOIT); \
|
||||
__libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\
|
||||
} else if (DOIT) \
|
||||
_buffer.__routine (_buffer.__arg)
|
||||
|
||||
|
||||
/* Normal cleanup handling, based on C cleanup attribute. */
|
||||
__extern_inline void
|
||||
__libc_cleanup_routine (struct __pthread_cleanup_frame *f);
|
||||
|
||||
__extern_inline void
|
||||
__libc_cleanup_routine (struct __pthread_cleanup_frame *f)
|
||||
{
|
||||
@ -531,6 +545,7 @@ weak_extern (__pthread_key_create)
|
||||
weak_extern (__pthread_setspecific)
|
||||
weak_extern (__pthread_getspecific)
|
||||
weak_extern (__pthread_once)
|
||||
weak_extern (__pthread_initialize)
|
||||
weak_extern (__pthread_atfork)
|
||||
#ifdef SHARED
|
||||
weak_extern (_pthread_cleanup_push_defer)
|
||||
@ -556,6 +571,7 @@ weak_extern (pthread_setcancelstate)
|
||||
# pragma weak __pthread_setspecific
|
||||
# pragma weak __pthread_getspecific
|
||||
# pragma weak __pthread_once
|
||||
# pragma weak __pthread_initialize
|
||||
# pragma weak __pthread_atfork
|
||||
# pragma weak _pthread_cleanup_push_defer
|
||||
# pragma weak _pthread_cleanup_pop_restore
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* Signal handling function for threaded programs.
|
||||
Copyright (C) 1998, 1999, 2000, 2002 Free Software Foundation, Inc.
|
||||
Copyright (C) 1998, 1999, 2000, 2002, 2009 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
@ -35,4 +35,10 @@ extern int pthread_sigmask (int __how,
|
||||
/* Send signal SIGNO to the given thread. */
|
||||
extern int pthread_kill (pthread_t __threadid, int __signo) __THROW;
|
||||
|
||||
#ifdef __USE_GNU
|
||||
/* Queue signal and data to a thread. */
|
||||
extern int pthread_sigqueue (pthread_t __threadid, int __signo,
|
||||
const union sigval __value) __THROW;
|
||||
#endif
|
||||
|
||||
#endif /* bits/sigthread.h */
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* Thread package specific definitions of stream lock type. NPTL version.
|
||||
Copyright (C) 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
|
||||
Copyright (C) 2000, 2001, 2002, 2003, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
@ -42,7 +42,7 @@ typedef struct { int lock; int cnt; void *owner; } _IO_lock_t;
|
||||
void *__self = THREAD_SELF; \
|
||||
if ((_name).owner != __self) \
|
||||
{ \
|
||||
lll_lock ((_name).lock); \
|
||||
lll_lock ((_name).lock, LLL_PRIVATE); \
|
||||
(_name).owner = __self; \
|
||||
} \
|
||||
++(_name).cnt; \
|
||||
@ -72,7 +72,7 @@ typedef struct { int lock; int cnt; void *owner; } _IO_lock_t;
|
||||
if (--(_name).cnt == 0) \
|
||||
{ \
|
||||
(_name).owner = NULL; \
|
||||
lll_unlock ((_name).lock); \
|
||||
lll_unlock ((_name).lock, LLL_PRIVATE); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
@ -94,9 +94,15 @@ typedef struct { int lock; int cnt; void *owner; } _IO_lock_t;
|
||||
__attribute__((cleanup (_IO_acquire_lock_fct))) \
|
||||
= (_fp); \
|
||||
_IO_flockfile (_IO_acquire_lock_file);
|
||||
|
||||
# define _IO_acquire_lock_clear_flags2(_fp) \
|
||||
do { \
|
||||
_IO_FILE *_IO_acquire_lock_file \
|
||||
__attribute__((cleanup (_IO_acquire_lock_clear_flags2_fct))) \
|
||||
= (_fp); \
|
||||
_IO_flockfile (_IO_acquire_lock_file);
|
||||
# else
|
||||
# define _IO_acquire_lock(_fp) _IO_acquire_lock_needs_exceptions_enabled
|
||||
# define _IO_acquire_lock_clear_flags2(_fp) _IO_acquire_lock (_fp)
|
||||
# endif
|
||||
# define _IO_release_lock(_fp) ; } while (0)
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002-2007, 2008 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -56,11 +56,11 @@ do_clone (struct pthread *pd, const struct pthread_attr *attr,
|
||||
PREPARE_CREATE;
|
||||
#endif
|
||||
|
||||
if (stopped)
|
||||
/* We Make sure the thread does not run far by forcing it to get a
|
||||
if (__builtin_expect (stopped != 0, 0))
|
||||
/* We make sure the thread does not run far by forcing it to get a
|
||||
lock. We lock it here too so that the new thread cannot continue
|
||||
until we tell it to. */
|
||||
lll_lock (pd->lock);
|
||||
lll_lock (pd->lock, LLL_PRIVATE);
|
||||
|
||||
/* One more thread. We cannot have the thread do this itself, since it
|
||||
might exist but not have been scheduled yet by the time we've returned
|
||||
@ -84,7 +84,8 @@ do_clone (struct pthread *pd, const struct pthread_attr *attr,
|
||||
if (IS_DETACHED (pd))
|
||||
__deallocate_stack (pd);
|
||||
|
||||
return errno;
|
||||
/* We have to translate error codes. */
|
||||
return errno == ENOMEM ? EAGAIN : errno;
|
||||
}
|
||||
|
||||
/* Now we have the possibility to set scheduling parameters etc. */
|
||||
@ -97,7 +98,7 @@ do_clone (struct pthread *pd, const struct pthread_attr *attr,
|
||||
if (attr->cpuset != NULL)
|
||||
{
|
||||
res = INTERNAL_SYSCALL (sched_setaffinity, err, 3, pd->tid,
|
||||
sizeof (cpu_set_t), attr->cpuset);
|
||||
attr->cpusetsize, attr->cpuset);
|
||||
|
||||
if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
|
||||
{
|
||||
@ -223,7 +224,7 @@ create_thread (struct pthread *pd, const struct pthread_attr *attr,
|
||||
__nptl_create_event ();
|
||||
|
||||
/* And finally restart the new thread. */
|
||||
lll_unlock (pd->lock);
|
||||
lll_unlock (pd->lock, LLL_PRIVATE);
|
||||
}
|
||||
|
||||
return res;
|
||||
@ -242,6 +243,7 @@ create_thread (struct pthread *pd, const struct pthread_attr *attr,
|
||||
|| (attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0))
|
||||
stopped = true;
|
||||
pd->stopped_start = stopped;
|
||||
pd->parent_cancelhandling = THREAD_GETMEM (THREAD_SELF, cancelhandling);
|
||||
|
||||
/* Actually create the thread. */
|
||||
int res = do_clone (pd, attr, clone_flags, start_thread,
|
||||
@ -249,7 +251,7 @@ create_thread (struct pthread *pd, const struct pthread_attr *attr,
|
||||
|
||||
if (res == 0 && stopped)
|
||||
/* And finally restart the new thread. */
|
||||
lll_unlock (pd->lock);
|
||||
lll_unlock (pd->lock, LLL_PRIVATE);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2009 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -17,92 +17,9 @@
|
||||
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
|
||||
02111-1307 USA. */
|
||||
|
||||
#include <setjmp.h>
|
||||
#include <signal.h>
|
||||
#include <stdlib.h>
|
||||
#include "pthreadP.h"
|
||||
#include "atomic.h"
|
||||
|
||||
|
||||
#ifdef IS_IN_librt
|
||||
/* The next two functions are similar to pthread_setcanceltype() but
|
||||
more specialized for the use in the cancelable functions like write().
|
||||
They do not need to check parameters etc. */
|
||||
int
|
||||
attribute_hidden
|
||||
__librt_enable_asynccancel (void)
|
||||
{
|
||||
struct pthread *self = THREAD_SELF;
|
||||
int oldval = THREAD_GETMEM (self, cancelhandling);
|
||||
|
||||
while (1)
|
||||
{
|
||||
int newval = oldval | CANCELTYPE_BITMASK;
|
||||
|
||||
if (__builtin_expect ((oldval & CANCELED_BITMASK) != 0, 0))
|
||||
{
|
||||
/* If we are already exiting or if PTHREAD_CANCEL_DISABLED,
|
||||
stop right here. */
|
||||
if ((oldval & (EXITING_BITMASK | CANCELSTATE_BITMASK)) != 0)
|
||||
break;
|
||||
|
||||
int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling,
|
||||
newval, oldval);
|
||||
if (__builtin_expect (curval != oldval, 0))
|
||||
{
|
||||
/* Somebody else modified the word, try again. */
|
||||
oldval = curval;
|
||||
continue;
|
||||
}
|
||||
|
||||
THREAD_SETMEM (self, result, PTHREAD_CANCELED);
|
||||
|
||||
__do_cancel ();
|
||||
|
||||
/* NOTREACHED */
|
||||
}
|
||||
|
||||
int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
|
||||
oldval);
|
||||
if (__builtin_expect (curval == oldval, 1))
|
||||
break;
|
||||
|
||||
/* Prepare the next round. */
|
||||
oldval = curval;
|
||||
}
|
||||
|
||||
return oldval;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
internal_function attribute_hidden
|
||||
__librt_disable_asynccancel (int oldtype)
|
||||
{
|
||||
/* If asynchronous cancellation was enabled before we do not have
|
||||
anything to do. */
|
||||
if (oldtype & CANCELTYPE_BITMASK)
|
||||
return;
|
||||
|
||||
struct pthread *self = THREAD_SELF;
|
||||
int oldval = THREAD_GETMEM (self, cancelhandling);
|
||||
|
||||
while (1)
|
||||
{
|
||||
int newval = oldval & ~CANCELTYPE_BITMASK;
|
||||
|
||||
if (newval == oldval)
|
||||
break;
|
||||
|
||||
int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
|
||||
oldval);
|
||||
if (__builtin_expect (curval == oldval, 1))
|
||||
break;
|
||||
|
||||
/* Prepare the next round. */
|
||||
oldval = curval;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
#define __pthread_enable_asynccancel __librt_enable_asynccancel
|
||||
#define __pthread_disable_asynccancel __librt_disable_asynccancel
|
||||
#include "cancellation.c"
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2002 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2009 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
|
||||
|
||||
@ -46,24 +46,13 @@ typedef struct list_head
|
||||
static inline void
|
||||
list_add (list_t *newp, list_t *head)
|
||||
{
|
||||
head->next->prev = newp;
|
||||
newp->next = head->next;
|
||||
newp->prev = head;
|
||||
head->next->prev = newp;
|
||||
head->next = newp;
|
||||
}
|
||||
|
||||
|
||||
/* Add new element at the tail of the list. */
|
||||
static inline void
|
||||
list_add_tail (list_t *newp, list_t *head)
|
||||
{
|
||||
head->prev->next = newp;
|
||||
newp->next = head;
|
||||
newp->prev = head->prev;
|
||||
head->prev = newp;
|
||||
}
|
||||
|
||||
|
||||
/* Remove element from list. */
|
||||
static inline void
|
||||
list_del (list_t *elem)
|
||||
|
@ -1,6 +1,6 @@
|
||||
/* Basic platform-independent macro definitions for mutexes,
|
||||
thread-specific data and parameters for malloc.
|
||||
Copyright (C) 2003 Free Software Foundation, Inc.
|
||||
Copyright (C) 2003, 2007, 2008 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
@ -38,13 +38,24 @@ extern void *__dso_handle __attribute__ ((__weak__));
|
||||
|
||||
#include <fork.h>
|
||||
|
||||
#define ATFORK_MEM static struct fork_handler atfork_mem
|
||||
|
||||
#ifdef SHARED
|
||||
# define thread_atfork(prepare, parent, child) \
|
||||
__register_atfork (prepare, parent, child, __dso_handle)
|
||||
atfork_mem.prepare_handler = prepare; \
|
||||
atfork_mem.parent_handler = parent; \
|
||||
atfork_mem.child_handler = child; \
|
||||
atfork_mem.dso_handle = __dso_handle; \
|
||||
atfork_mem.refcntr = 1; \
|
||||
__linkin_atfork (&atfork_mem)
|
||||
#else
|
||||
# define thread_atfork(prepare, parent, child) \
|
||||
__register_atfork (prepare, parent, child, \
|
||||
&__dso_handle == NULL ? NULL : __dso_handle)
|
||||
atfork_mem.prepare_handler = prepare; \
|
||||
atfork_mem.parent_handler = parent; \
|
||||
atfork_mem.child_handler = child; \
|
||||
atfork_mem.dso_handle = &__dso_handle == NULL ? NULL : __dso_handle; \
|
||||
atfork_mem.refcntr = 1; \
|
||||
__linkin_atfork (&atfork_mem)
|
||||
#endif
|
||||
|
||||
/* thread specific data for glibc */
|
||||
@ -52,10 +63,10 @@ extern void *__dso_handle __attribute__ ((__weak__));
|
||||
#include <bits/libc-tsd.h>
|
||||
|
||||
typedef int tsd_key_t[1]; /* no key data structure, libc magic does it */
|
||||
__libc_tsd_define (static, MALLOC) /* declaration/common definition */
|
||||
__libc_tsd_define (static, void *, MALLOC) /* declaration/common definition */
|
||||
#define tsd_key_create(key, destr) ((void) (key))
|
||||
#define tsd_setspecific(key, data) __libc_tsd_set (MALLOC, (data))
|
||||
#define tsd_getspecific(key, vptr) ((vptr) = __libc_tsd_get (MALLOC))
|
||||
#define tsd_setspecific(key, data) __libc_tsd_set (void *, MALLOC, (data))
|
||||
#define tsd_getspecific(key, vptr) ((vptr) = __libc_tsd_get (void *, MALLOC))
|
||||
|
||||
#include <sysdeps/generic/malloc-machine.h>
|
||||
|
||||
|
@ -72,7 +72,7 @@ call_initialize_minimal (void)
|
||||
}
|
||||
|
||||
SECTION (".init");
|
||||
extern void _init (void);
|
||||
extern void __attribute__ ((section (".init"))) _init (void);
|
||||
void
|
||||
_init (void)
|
||||
{
|
||||
@ -93,7 +93,7 @@ asm ("\n/*@_init_EPILOG_ENDS*/");
|
||||
asm ("\n/*@_fini_PROLOG_BEGINS*/");
|
||||
|
||||
SECTION (".fini");
|
||||
extern void _fini (void);
|
||||
extern void __attribute__ ((section (".fini"))) _fini (void);
|
||||
void
|
||||
_fini (void)
|
||||
{
|
||||
|
@ -21,8 +21,6 @@
|
||||
#include <stdlib.h>
|
||||
#include "pthreadP.h"
|
||||
|
||||
extern void __libc_longjmp (sigjmp_buf env, int val)
|
||||
__attribute__ ((noreturn));
|
||||
void
|
||||
longjmp (jmp_buf env, int val)
|
||||
{
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Ulrich Drepper <drepper@redhat.com>, 2003.
|
||||
|
||||
@ -23,6 +23,7 @@
|
||||
#include <pthread.h>
|
||||
#include <setjmp.h>
|
||||
#include <internaltypes.h>
|
||||
#include <sysdep.h>
|
||||
|
||||
struct xid_command;
|
||||
|
||||
@ -72,12 +73,8 @@ struct pthread_functions
|
||||
int (*ptr_pthread_mutex_destroy) (pthread_mutex_t *);
|
||||
int (*ptr_pthread_mutex_init) (pthread_mutex_t *,
|
||||
const pthread_mutexattr_t *);
|
||||
int (*ptr_pthread_mutex_trylock) (pthread_mutex_t *);
|
||||
int (*ptr_pthread_mutex_lock) (pthread_mutex_t *);
|
||||
int (*ptr_pthread_mutex_unlock) (pthread_mutex_t *);
|
||||
int (*ptr_pthread_mutexattr_init) (pthread_mutexattr_t *attr);
|
||||
int (*ptr_pthread_mutexattr_destroy) (pthread_mutexattr_t *attr);
|
||||
int (*ptr_pthread_mutexattr_settype) (pthread_mutexattr_t *attr, int kind);
|
||||
pthread_t (*ptr_pthread_self) (void);
|
||||
int (*ptr_pthread_setcancelstate) (int, int *);
|
||||
int (*ptr_pthread_setcanceltype) (int, int *);
|
||||
@ -99,9 +96,22 @@ struct pthread_functions
|
||||
__attribute ((noreturn)) __cleanup_fct_attribute;
|
||||
void (*ptr__nptl_deallocate_tsd) (void);
|
||||
int (*ptr__nptl_setxid) (struct xid_command *);
|
||||
void (*ptr_freeres) (void);
|
||||
};
|
||||
|
||||
/* Variable in libc.so. */
|
||||
extern struct pthread_functions __libc_pthread_functions attribute_hidden;
|
||||
extern int __libc_pthread_functions_init attribute_hidden;
|
||||
|
||||
#ifdef PTR_DEMANGLE
|
||||
# define PTHFCT_CALL(fct, params) \
|
||||
({ __typeof (__libc_pthread_functions.fct) __p; \
|
||||
__p = __libc_pthread_functions.fct; \
|
||||
PTR_DEMANGLE (__p); \
|
||||
__p params; })
|
||||
#else
|
||||
# define PTHFCT_CALL(fct, params) \
|
||||
__libc_pthread_functions.fct params
|
||||
#endif
|
||||
|
||||
#endif /* pthread-functions.h */
|
||||
|
@ -1,4 +1,5 @@
|
||||
/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
|
||||
Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
|
||||
The GNU C Library is free software; you can redistribute it and/or
|
||||
@ -20,6 +21,7 @@
|
||||
#define _PTHREAD_H 1
|
||||
|
||||
#include <features.h>
|
||||
#include <endian.h>
|
||||
#include <sched.h>
|
||||
#include <time.h>
|
||||
|
||||
@ -50,7 +52,7 @@ enum
|
||||
PTHREAD_MUTEX_RECURSIVE_NP,
|
||||
PTHREAD_MUTEX_ERRORCHECK_NP,
|
||||
PTHREAD_MUTEX_ADAPTIVE_NP
|
||||
#ifdef __USE_UNIX98
|
||||
#if defined __USE_UNIX98 || defined __USE_XOPEN2K8
|
||||
,
|
||||
PTHREAD_MUTEX_NORMAL = PTHREAD_MUTEX_TIMED_NP,
|
||||
PTHREAD_MUTEX_RECURSIVE = PTHREAD_MUTEX_RECURSIVE_NP,
|
||||
@ -63,6 +65,30 @@ enum
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
#ifdef __USE_XOPEN2K
|
||||
/* Robust mutex or not flags. */
|
||||
enum
|
||||
{
|
||||
PTHREAD_MUTEX_STALLED,
|
||||
PTHREAD_MUTEX_STALLED_NP = PTHREAD_MUTEX_STALLED,
|
||||
PTHREAD_MUTEX_ROBUST,
|
||||
PTHREAD_MUTEX_ROBUST_NP = PTHREAD_MUTEX_ROBUST
|
||||
};
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef __USE_UNIX98
|
||||
/* Mutex protocols. */
|
||||
enum
|
||||
{
|
||||
PTHREAD_PRIO_NONE,
|
||||
PTHREAD_PRIO_INHERIT,
|
||||
PTHREAD_PRIO_PROTECT
|
||||
};
|
||||
#endif
|
||||
|
||||
|
||||
/* Mutex initializers. */
|
||||
#if __WORDSIZE == 64
|
||||
# define PTHREAD_MUTEX_INITIALIZER \
|
||||
@ -88,6 +114,7 @@ enum
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
||||
/* Read-write lock types. */
|
||||
#if defined __USE_UNIX98 || defined __USE_XOPEN2K
|
||||
enum
|
||||
@ -99,21 +126,23 @@ enum
|
||||
};
|
||||
|
||||
/* Read-write lock initializers. */
|
||||
# if __WORDSIZE == 64
|
||||
# define PTHREAD_RWLOCK_INITIALIZER \
|
||||
# define PTHREAD_RWLOCK_INITIALIZER \
|
||||
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
|
||||
# else
|
||||
# define PTHREAD_RWLOCK_INITIALIZER \
|
||||
{ { 0, 0, 0, 0, 0, 0, 0, 0 } }
|
||||
# endif
|
||||
# ifdef __USE_GNU
|
||||
# if __WORDSIZE == 64
|
||||
# define PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP \
|
||||
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
|
||||
PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP } }
|
||||
PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP } }
|
||||
# else
|
||||
# define PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP \
|
||||
{ { 0, 0, 0, 0, 0, 0, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP, 0 } }
|
||||
# if __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
# define PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP \
|
||||
{ { 0, 0, 0, 0, 0, 0, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP, \
|
||||
0, 0, 0, 0 } }
|
||||
# else
|
||||
# define PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP \
|
||||
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP,\
|
||||
0 } }
|
||||
# endif
|
||||
# endif
|
||||
# endif
|
||||
#endif /* Unix98 or XOpen2K */
|
||||
@ -201,7 +230,7 @@ __BEGIN_DECLS
|
||||
extern int pthread_create (pthread_t *__restrict __newthread,
|
||||
__const pthread_attr_t *__restrict __attr,
|
||||
void *(*__start_routine) (void *),
|
||||
void *__restrict __arg) __THROW;
|
||||
void *__restrict __arg) __THROW __nonnull ((1, 3));
|
||||
|
||||
/* Terminate calling thread.
|
||||
|
||||
@ -251,71 +280,78 @@ extern int pthread_equal (pthread_t __thread1, pthread_t __thread2) __THROW;
|
||||
/* Initialize thread attribute *ATTR with default attributes
|
||||
(detachstate is PTHREAD_JOINABLE, scheduling policy is SCHED_OTHER,
|
||||
no user-provided stack). */
|
||||
extern int pthread_attr_init (pthread_attr_t *__attr) __THROW;
|
||||
extern int pthread_attr_init (pthread_attr_t *__attr) __THROW __nonnull ((1));
|
||||
|
||||
/* Destroy thread attribute *ATTR. */
|
||||
extern int pthread_attr_destroy (pthread_attr_t *__attr) __THROW;
|
||||
extern int pthread_attr_destroy (pthread_attr_t *__attr)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
/* Get detach state attribute. */
|
||||
extern int pthread_attr_getdetachstate (__const pthread_attr_t *__attr,
|
||||
int *__detachstate) __THROW;
|
||||
int *__detachstate)
|
||||
__THROW __nonnull ((1, 2));
|
||||
|
||||
/* Set detach state attribute. */
|
||||
extern int pthread_attr_setdetachstate (pthread_attr_t *__attr,
|
||||
int __detachstate) __THROW;
|
||||
int __detachstate)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
|
||||
/* Get the size of the guard area created for stack overflow protection. */
|
||||
extern int pthread_attr_getguardsize (__const pthread_attr_t *__attr,
|
||||
size_t *__guardsize) __THROW;
|
||||
size_t *__guardsize)
|
||||
__THROW __nonnull ((1, 2));
|
||||
|
||||
/* Set the size of the guard area created for stack overflow protection. */
|
||||
extern int pthread_attr_setguardsize (pthread_attr_t *__attr,
|
||||
size_t __guardsize) __THROW;
|
||||
size_t __guardsize)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
|
||||
/* Return in *PARAM the scheduling parameters of *ATTR. */
|
||||
extern int pthread_attr_getschedparam (__const pthread_attr_t *__restrict
|
||||
__attr,
|
||||
struct sched_param *__restrict __param)
|
||||
__THROW;
|
||||
__THROW __nonnull ((1, 2));
|
||||
|
||||
/* Set scheduling parameters (priority, etc) in *ATTR according to PARAM. */
|
||||
extern int pthread_attr_setschedparam (pthread_attr_t *__restrict __attr,
|
||||
__const struct sched_param *__restrict
|
||||
__param) __THROW;
|
||||
__param) __THROW __nonnull ((1, 2));
|
||||
|
||||
/* Return in *POLICY the scheduling policy of *ATTR. */
|
||||
extern int pthread_attr_getschedpolicy (__const pthread_attr_t *__restrict
|
||||
__attr, int *__restrict __policy)
|
||||
__THROW;
|
||||
__THROW __nonnull ((1, 2));
|
||||
|
||||
/* Set scheduling policy in *ATTR according to POLICY. */
|
||||
extern int pthread_attr_setschedpolicy (pthread_attr_t *__attr, int __policy)
|
||||
__THROW;
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
/* Return in *INHERIT the scheduling inheritance mode of *ATTR. */
|
||||
extern int pthread_attr_getinheritsched (__const pthread_attr_t *__restrict
|
||||
__attr, int *__restrict __inherit)
|
||||
__THROW;
|
||||
__THROW __nonnull ((1, 2));
|
||||
|
||||
/* Set scheduling inheritance mode in *ATTR according to INHERIT. */
|
||||
extern int pthread_attr_setinheritsched (pthread_attr_t *__attr,
|
||||
int __inherit) __THROW;
|
||||
int __inherit)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
|
||||
/* Return in *SCOPE the scheduling contention scope of *ATTR. */
|
||||
extern int pthread_attr_getscope (__const pthread_attr_t *__restrict __attr,
|
||||
int *__restrict __scope) __THROW;
|
||||
int *__restrict __scope)
|
||||
__THROW __nonnull ((1, 2));
|
||||
|
||||
/* Set scheduling contention scope in *ATTR according to SCOPE. */
|
||||
extern int pthread_attr_setscope (pthread_attr_t *__attr, int __scope)
|
||||
__THROW;
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
/* Return the previously set address for the stack. */
|
||||
extern int pthread_attr_getstackaddr (__const pthread_attr_t *__restrict
|
||||
__attr, void **__restrict __stackaddr)
|
||||
__THROW __attribute_deprecated__;
|
||||
__THROW __nonnull ((1, 2)) __attribute_deprecated__;
|
||||
|
||||
/* Set the starting address of the stack of the thread to be created.
|
||||
Depending on whether the stack grows up or down the value must either
|
||||
@ -323,30 +359,32 @@ extern int pthread_attr_getstackaddr (__const pthread_attr_t *__restrict
|
||||
minimal size of the block must be PTHREAD_STACK_MIN. */
|
||||
extern int pthread_attr_setstackaddr (pthread_attr_t *__attr,
|
||||
void *__stackaddr)
|
||||
__THROW __attribute_deprecated__;
|
||||
__THROW __nonnull ((1)) __attribute_deprecated__;
|
||||
|
||||
/* Return the currently used minimal stack size. */
|
||||
extern int pthread_attr_getstacksize (__const pthread_attr_t *__restrict
|
||||
__attr, size_t *__restrict __stacksize)
|
||||
__THROW;
|
||||
__THROW __nonnull ((1, 2));
|
||||
|
||||
/* Add information about the minimum stack size needed for the thread
|
||||
to be started. This size must never be less than PTHREAD_STACK_MIN
|
||||
and must also not exceed the system limits. */
|
||||
extern int pthread_attr_setstacksize (pthread_attr_t *__attr,
|
||||
size_t __stacksize) __THROW;
|
||||
size_t __stacksize)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
#ifdef __USE_XOPEN2K
|
||||
/* Return the previously set address for the stack. */
|
||||
extern int pthread_attr_getstack (__const pthread_attr_t *__restrict __attr,
|
||||
void **__restrict __stackaddr,
|
||||
size_t *__restrict __stacksize) __THROW;
|
||||
size_t *__restrict __stacksize)
|
||||
__THROW __nonnull ((1, 2, 3));
|
||||
|
||||
/* The following two interfaces are intended to replace the last two. They
|
||||
require setting the address as well as the size since only setting the
|
||||
address will make the implementation on some architectures impossible. */
|
||||
extern int pthread_attr_setstack (pthread_attr_t *__attr, void *__stackaddr,
|
||||
size_t __stacksize) __THROW;
|
||||
size_t __stacksize) __THROW __nonnull ((1));
|
||||
#endif
|
||||
|
||||
#ifdef __USE_GNU
|
||||
@ -354,19 +392,22 @@ extern int pthread_attr_setstack (pthread_attr_t *__attr, void *__stackaddr,
|
||||
the processors represented in CPUSET. */
|
||||
extern int pthread_attr_setaffinity_np (pthread_attr_t *__attr,
|
||||
size_t __cpusetsize,
|
||||
__const cpu_set_t *__cpuset) __THROW;
|
||||
__const cpu_set_t *__cpuset)
|
||||
__THROW __nonnull ((1, 3));
|
||||
|
||||
/* Get bit set in CPUSET representing the processors threads created with
|
||||
ATTR can run on. */
|
||||
extern int pthread_attr_getaffinity_np (__const pthread_attr_t *__attr,
|
||||
size_t __cpusetsize,
|
||||
cpu_set_t *__cpuset) __THROW;
|
||||
cpu_set_t *__cpuset)
|
||||
__THROW __nonnull ((1, 3));
|
||||
|
||||
|
||||
/* Initialize thread attribute *ATTR with attributes corresponding to the
|
||||
already running thread TH. It shall be called on unitialized ATTR
|
||||
already running thread TH. It shall be called on uninitialized ATTR
|
||||
and destroyed with pthread_attr_destroy when no longer needed. */
|
||||
extern int pthread_getattr_np (pthread_t __th, pthread_attr_t *__attr) __THROW;
|
||||
extern int pthread_getattr_np (pthread_t __th, pthread_attr_t *__attr)
|
||||
__THROW __nonnull ((2));
|
||||
#endif
|
||||
|
||||
|
||||
@ -376,13 +417,13 @@ extern int pthread_getattr_np (pthread_t __th, pthread_attr_t *__attr) __THROW;
|
||||
and *PARAM. */
|
||||
extern int pthread_setschedparam (pthread_t __target_thread, int __policy,
|
||||
__const struct sched_param *__param)
|
||||
__THROW;
|
||||
__THROW __nonnull ((3));
|
||||
|
||||
/* Return in *POLICY and *PARAM the scheduling parameters for TARGET_THREAD. */
|
||||
extern int pthread_getschedparam (pthread_t __target_thread,
|
||||
int *__restrict __policy,
|
||||
struct sched_param *__restrict __param)
|
||||
__THROW;
|
||||
__THROW __nonnull ((2, 3));
|
||||
|
||||
/* Set the scheduling priority for TARGET_THREAD. */
|
||||
extern int pthread_setschedprio (pthread_t __target_thread, int __prio)
|
||||
@ -408,11 +449,13 @@ extern int pthread_yield (void) __THROW;
|
||||
/* Limit specified thread TH to run only on the processors represented
|
||||
in CPUSET. */
|
||||
extern int pthread_setaffinity_np (pthread_t __th, size_t __cpusetsize,
|
||||
__const cpu_set_t *__cpuset) __THROW;
|
||||
__const cpu_set_t *__cpuset)
|
||||
__THROW __nonnull ((3));
|
||||
|
||||
/* Get bit set in CPUSET representing the processors TH can run on. */
|
||||
extern int pthread_getaffinity_np (pthread_t __th, size_t __cpusetsize,
|
||||
cpu_set_t *__cpuset) __THROW;
|
||||
cpu_set_t *__cpuset)
|
||||
__THROW __nonnull ((3));
|
||||
#endif
|
||||
|
||||
|
||||
@ -426,7 +469,7 @@ extern int pthread_getaffinity_np (pthread_t __th, size_t __cpusetsize,
|
||||
The initialization functions might throw exception which is why
|
||||
this function is not marked with __THROW. */
|
||||
extern int pthread_once (pthread_once_t *__once_control,
|
||||
void (*__init_routine) (void));
|
||||
void (*__init_routine) (void)) __nonnull ((1, 2));
|
||||
|
||||
|
||||
/* Functions for handling cancellation.
|
||||
@ -538,9 +581,6 @@ class __pthread_cleanup_class
|
||||
function the compiler is free to decide inlining the change when
|
||||
needed or fall back on the copy which must exist somewhere
|
||||
else. */
|
||||
__extern_inline void
|
||||
__pthread_cleanup_routine (struct __pthread_cleanup_frame *__frame);
|
||||
|
||||
__extern_inline void
|
||||
__pthread_cleanup_routine (struct __pthread_cleanup_frame *__frame)
|
||||
{
|
||||
@ -603,7 +643,7 @@ __pthread_cleanup_routine (struct __pthread_cleanup_frame *__frame)
|
||||
__pthread_unwind_buf_t __cancel_buf; \
|
||||
void (*__cancel_routine) (void *) = (routine); \
|
||||
void *__cancel_arg = (arg); \
|
||||
int not_first_call = __sigsetjmp ((struct __jmp_buf_tag *) \
|
||||
int not_first_call = __sigsetjmp ((struct __jmp_buf_tag *) (void *) \
|
||||
__cancel_buf.__cancel_jmp_buf, 0); \
|
||||
if (__builtin_expect (not_first_call, 0)) \
|
||||
{ \
|
||||
@ -620,6 +660,7 @@ extern void __pthread_register_cancel (__pthread_unwind_buf_t *__buf)
|
||||
/* Remove a cleanup handler installed by the matching pthread_cleanup_push.
|
||||
If EXECUTE is non-zero, the handler function is called. */
|
||||
# define pthread_cleanup_pop(execute) \
|
||||
do { } while (0);/* Empty to allow label before pthread_cleanup_pop. */\
|
||||
} while (0); \
|
||||
__pthread_unregister_cancel (&__cancel_buf); \
|
||||
if (execute) \
|
||||
@ -637,7 +678,7 @@ extern void __pthread_unregister_cancel (__pthread_unwind_buf_t *__buf)
|
||||
__pthread_unwind_buf_t __cancel_buf; \
|
||||
void (*__cancel_routine) (void *) = (routine); \
|
||||
void *__cancel_arg = (arg); \
|
||||
int not_first_call = __sigsetjmp ((struct __jmp_buf_tag *) \
|
||||
int not_first_call = __sigsetjmp ((struct __jmp_buf_tag *) (void *) \
|
||||
__cancel_buf.__cancel_jmp_buf, 0); \
|
||||
if (__builtin_expect (not_first_call, 0)) \
|
||||
{ \
|
||||
@ -655,6 +696,7 @@ extern void __pthread_register_cancel_defer (__pthread_unwind_buf_t *__buf)
|
||||
restores the cancellation type that was in effect when the matching
|
||||
pthread_cleanup_push_defer was called. */
|
||||
# define pthread_cleanup_pop_restore_np(execute) \
|
||||
do { } while (0);/* Empty to allow label before pthread_cleanup_pop. */\
|
||||
} while (0); \
|
||||
__pthread_unregister_cancel_restore (&__cancel_buf); \
|
||||
if (execute) \
|
||||
@ -666,9 +708,9 @@ extern void __pthread_unregister_cancel_restore (__pthread_unwind_buf_t *__buf)
|
||||
|
||||
/* Internal interface to initiate cleanup. */
|
||||
extern void __pthread_unwind_next (__pthread_unwind_buf_t *__buf)
|
||||
__cleanup_fct_attribute __attribute ((__noreturn__))
|
||||
__cleanup_fct_attribute __attribute__ ((__noreturn__))
|
||||
# ifndef SHARED
|
||||
__attribute ((__weak__))
|
||||
__attribute__ ((__weak__))
|
||||
# endif
|
||||
;
|
||||
#endif
|
||||
@ -683,56 +725,135 @@ extern int __sigsetjmp (struct __jmp_buf_tag *__env, int __savemask) __THROW;
|
||||
/* Initialize a mutex. */
|
||||
extern int pthread_mutex_init (pthread_mutex_t *__mutex,
|
||||
__const pthread_mutexattr_t *__mutexattr)
|
||||
__THROW;
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
/* Destroy a mutex. */
|
||||
extern int pthread_mutex_destroy (pthread_mutex_t *__mutex) __THROW;
|
||||
extern int pthread_mutex_destroy (pthread_mutex_t *__mutex)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
/* Try locking a mutex. */
|
||||
extern int pthread_mutex_trylock (pthread_mutex_t *_mutex) __THROW;
|
||||
extern int pthread_mutex_trylock (pthread_mutex_t *__mutex)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
/* Lock a mutex. */
|
||||
extern int pthread_mutex_lock (pthread_mutex_t *__mutex) __THROW;
|
||||
extern int pthread_mutex_lock (pthread_mutex_t *__mutex)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
#ifdef __USE_XOPEN2K
|
||||
/* Wait until lock becomes available, or specified time passes. */
|
||||
extern int pthread_mutex_timedlock (pthread_mutex_t *__restrict __mutex,
|
||||
__const struct timespec *__restrict
|
||||
__abstime) __THROW;
|
||||
__const struct timespec *__restrict
|
||||
__abstime) __THROW __nonnull ((1, 2));
|
||||
#endif
|
||||
|
||||
/* Unlock a mutex. */
|
||||
extern int pthread_mutex_unlock (pthread_mutex_t *__mutex) __THROW;
|
||||
extern int pthread_mutex_unlock (pthread_mutex_t *__mutex)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
|
||||
/* Get the priority ceiling of MUTEX. */
|
||||
extern int pthread_mutex_getprioceiling (__const pthread_mutex_t *
|
||||
__restrict __mutex,
|
||||
int *__restrict __prioceiling)
|
||||
__THROW __nonnull ((1, 2));
|
||||
|
||||
/* Set the priority ceiling of MUTEX to PRIOCEILING, return old
|
||||
priority ceiling value in *OLD_CEILING. */
|
||||
extern int pthread_mutex_setprioceiling (pthread_mutex_t *__restrict __mutex,
|
||||
int __prioceiling,
|
||||
int *__restrict __old_ceiling)
|
||||
__THROW __nonnull ((1, 3));
|
||||
|
||||
|
||||
#ifdef __USE_XOPEN2K8
|
||||
/* Declare the state protected by MUTEX as consistent. */
|
||||
extern int pthread_mutex_consistent (pthread_mutex_t *__mutex)
|
||||
__THROW __nonnull ((1));
|
||||
# ifdef __USE_GNU
|
||||
extern int pthread_mutex_consistent_np (pthread_mutex_t *__mutex)
|
||||
__THROW __nonnull ((1));
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
||||
/* Functions for handling mutex attributes. */
|
||||
|
||||
/* Initialize mutex attribute object ATTR with default attributes
|
||||
(kind is PTHREAD_MUTEX_TIMED_NP). */
|
||||
extern int pthread_mutexattr_init (pthread_mutexattr_t *__attr) __THROW;
|
||||
extern int pthread_mutexattr_init (pthread_mutexattr_t *__attr)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
/* Destroy mutex attribute object ATTR. */
|
||||
extern int pthread_mutexattr_destroy (pthread_mutexattr_t *__attr) __THROW;
|
||||
extern int pthread_mutexattr_destroy (pthread_mutexattr_t *__attr)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
/* Get the process-shared flag of the mutex attribute ATTR. */
|
||||
extern int pthread_mutexattr_getpshared (__const pthread_mutexattr_t *
|
||||
__restrict __attr,
|
||||
int *__restrict __pshared) __THROW;
|
||||
int *__restrict __pshared)
|
||||
__THROW __nonnull ((1, 2));
|
||||
|
||||
/* Set the process-shared flag of the mutex attribute ATTR. */
|
||||
extern int pthread_mutexattr_setpshared (pthread_mutexattr_t *__attr,
|
||||
int __pshared) __THROW;
|
||||
int __pshared)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
#ifdef __USE_UNIX98
|
||||
#if defined __USE_UNIX98 || defined __USE_XOPEN2K8
|
||||
/* Return in *KIND the mutex kind attribute in *ATTR. */
|
||||
extern int pthread_mutexattr_gettype (__const pthread_mutexattr_t *__restrict
|
||||
__attr, int *__restrict __kind) __THROW;
|
||||
__attr, int *__restrict __kind)
|
||||
__THROW __nonnull ((1, 2));
|
||||
|
||||
/* Set the mutex kind attribute in *ATTR to KIND (either PTHREAD_MUTEX_NORMAL,
|
||||
PTHREAD_MUTEX_RECURSIVE, PTHREAD_MUTEX_ERRORCHECK, or
|
||||
PTHREAD_MUTEX_DEFAULT). */
|
||||
extern int pthread_mutexattr_settype (pthread_mutexattr_t *__attr, int __kind)
|
||||
__THROW;
|
||||
__THROW __nonnull ((1));
|
||||
#endif
|
||||
|
||||
/* Return in *PROTOCOL the mutex protocol attribute in *ATTR. */
|
||||
extern int pthread_mutexattr_getprotocol (__const pthread_mutexattr_t *
|
||||
__restrict __attr,
|
||||
int *__restrict __protocol)
|
||||
__THROW __nonnull ((1, 2));
|
||||
|
||||
/* Set the mutex protocol attribute in *ATTR to PROTOCOL (either
|
||||
PTHREAD_PRIO_NONE, PTHREAD_PRIO_INHERIT, or PTHREAD_PRIO_PROTECT). */
|
||||
extern int pthread_mutexattr_setprotocol (pthread_mutexattr_t *__attr,
|
||||
int __protocol)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
/* Return in *PRIOCEILING the mutex prioceiling attribute in *ATTR. */
|
||||
extern int pthread_mutexattr_getprioceiling (__const pthread_mutexattr_t *
|
||||
__restrict __attr,
|
||||
int *__restrict __prioceiling)
|
||||
__THROW __nonnull ((1, 2));
|
||||
|
||||
/* Set the mutex prioceiling attribute in *ATTR to PRIOCEILING. */
|
||||
extern int pthread_mutexattr_setprioceiling (pthread_mutexattr_t *__attr,
|
||||
int __prioceiling)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
#ifdef __USE_XOPEN2K
|
||||
/* Get the robustness flag of the mutex attribute ATTR. */
|
||||
extern int pthread_mutexattr_getrobust (__const pthread_mutexattr_t *__attr,
|
||||
int *__robustness)
|
||||
__THROW __nonnull ((1, 2));
|
||||
# ifdef __USE_GNU
|
||||
extern int pthread_mutexattr_getrobust_np (__const pthread_mutexattr_t *__attr,
|
||||
int *__robustness)
|
||||
__THROW __nonnull ((1, 2));
|
||||
# endif
|
||||
|
||||
/* Set the robustness flag of the mutex attribute ATTR. */
|
||||
extern int pthread_mutexattr_setrobust (pthread_mutexattr_t *__attr,
|
||||
int __robustness)
|
||||
__THROW __nonnull ((1));
|
||||
# ifdef __USE_GNU
|
||||
extern int pthread_mutexattr_setrobust_np (pthread_mutexattr_t *__attr,
|
||||
int __robustness)
|
||||
__THROW __nonnull ((1));
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
||||
@ -743,66 +864,77 @@ extern int pthread_mutexattr_settype (pthread_mutexattr_t *__attr, int __kind)
|
||||
the default values if later is NULL. */
|
||||
extern int pthread_rwlock_init (pthread_rwlock_t *__restrict __rwlock,
|
||||
__const pthread_rwlockattr_t *__restrict
|
||||
__attr) __THROW;
|
||||
__attr) __THROW __nonnull ((1));
|
||||
|
||||
/* Destroy read-write lock RWLOCK. */
|
||||
extern int pthread_rwlock_destroy (pthread_rwlock_t *__rwlock) __THROW;
|
||||
extern int pthread_rwlock_destroy (pthread_rwlock_t *__rwlock)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
/* Acquire read lock for RWLOCK. */
|
||||
extern int pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock) __THROW;
|
||||
extern int pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
/* Try to acquire read lock for RWLOCK. */
|
||||
extern int pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock) __THROW;
|
||||
extern int pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
# ifdef __USE_XOPEN2K
|
||||
/* Try to acquire read lock for RWLOCK or return after specfied time. */
|
||||
extern int pthread_rwlock_timedrdlock (pthread_rwlock_t *__restrict __rwlock,
|
||||
__const struct timespec *__restrict
|
||||
__abstime) __THROW;
|
||||
__abstime) __THROW __nonnull ((1, 2));
|
||||
# endif
|
||||
|
||||
/* Acquire write lock for RWLOCK. */
|
||||
extern int pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock) __THROW;
|
||||
extern int pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
/* Try to acquire write lock for RWLOCK. */
|
||||
extern int pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock) __THROW;
|
||||
extern int pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
# ifdef __USE_XOPEN2K
|
||||
/* Try to acquire write lock for RWLOCK or return after specfied time. */
|
||||
extern int pthread_rwlock_timedwrlock (pthread_rwlock_t *__restrict __rwlock,
|
||||
__const struct timespec *__restrict
|
||||
__abstime) __THROW;
|
||||
__abstime) __THROW __nonnull ((1, 2));
|
||||
# endif
|
||||
|
||||
/* Unlock RWLOCK. */
|
||||
extern int pthread_rwlock_unlock (pthread_rwlock_t *__rwlock) __THROW;
|
||||
extern int pthread_rwlock_unlock (pthread_rwlock_t *__rwlock)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
|
||||
/* Functions for handling read-write lock attributes. */
|
||||
|
||||
/* Initialize attribute object ATTR with default values. */
|
||||
extern int pthread_rwlockattr_init (pthread_rwlockattr_t *__attr) __THROW;
|
||||
extern int pthread_rwlockattr_init (pthread_rwlockattr_t *__attr)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
/* Destroy attribute object ATTR. */
|
||||
extern int pthread_rwlockattr_destroy (pthread_rwlockattr_t *__attr) __THROW;
|
||||
extern int pthread_rwlockattr_destroy (pthread_rwlockattr_t *__attr)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
/* Return current setting of process-shared attribute of ATTR in PSHARED. */
|
||||
extern int pthread_rwlockattr_getpshared (__const pthread_rwlockattr_t *
|
||||
__restrict __attr,
|
||||
int *__restrict __pshared) __THROW;
|
||||
int *__restrict __pshared)
|
||||
__THROW __nonnull ((1, 2));
|
||||
|
||||
/* Set process-shared attribute of ATTR to PSHARED. */
|
||||
extern int pthread_rwlockattr_setpshared (pthread_rwlockattr_t *__attr,
|
||||
int __pshared) __THROW;
|
||||
int __pshared)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
/* Return current setting of reader/writer preference. */
|
||||
extern int pthread_rwlockattr_getkind_np (__const pthread_rwlockattr_t *
|
||||
__restrict __attr,
|
||||
int *__restrict __pref) __THROW;
|
||||
int *__restrict __pref)
|
||||
__THROW __nonnull ((1, 2));
|
||||
|
||||
/* Set reader/write preference. */
|
||||
extern int pthread_rwlockattr_setkind_np (pthread_rwlockattr_t *__attr,
|
||||
int __pref) __THROW;
|
||||
int __pref) __THROW __nonnull ((1));
|
||||
#endif
|
||||
|
||||
|
||||
@ -812,16 +944,19 @@ extern int pthread_rwlockattr_setkind_np (pthread_rwlockattr_t *__attr,
|
||||
the default values if later is NULL. */
|
||||
extern int pthread_cond_init (pthread_cond_t *__restrict __cond,
|
||||
__const pthread_condattr_t *__restrict
|
||||
__cond_attr) __THROW;
|
||||
__cond_attr) __THROW __nonnull ((1));
|
||||
|
||||
/* Destroy condition variable COND. */
|
||||
extern int pthread_cond_destroy (pthread_cond_t *__cond) __THROW;
|
||||
extern int pthread_cond_destroy (pthread_cond_t *__cond)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
/* Wake up one thread waiting for condition variable COND. */
|
||||
extern int pthread_cond_signal (pthread_cond_t *__cond) __THROW;
|
||||
extern int pthread_cond_signal (pthread_cond_t *__cond)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
/* Wake up all threads waiting for condition variables COND. */
|
||||
extern int pthread_cond_broadcast (pthread_cond_t *__cond) __THROW;
|
||||
extern int pthread_cond_broadcast (pthread_cond_t *__cond)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
/* Wait for condition variable COND to be signaled or broadcast.
|
||||
MUTEX is assumed to be locked before.
|
||||
@ -829,7 +964,8 @@ extern int pthread_cond_broadcast (pthread_cond_t *__cond) __THROW;
|
||||
This function is a cancellation point and therefore not marked with
|
||||
__THROW. */
|
||||
extern int pthread_cond_wait (pthread_cond_t *__restrict __cond,
|
||||
pthread_mutex_t *__restrict __mutex);
|
||||
pthread_mutex_t *__restrict __mutex)
|
||||
__nonnull ((1, 2));
|
||||
|
||||
/* Wait for condition variable COND to be signaled or broadcast until
|
||||
ABSTIME. MUTEX is assumed to be locked before. ABSTIME is an
|
||||
@ -841,36 +977,39 @@ extern int pthread_cond_wait (pthread_cond_t *__restrict __cond,
|
||||
extern int pthread_cond_timedwait (pthread_cond_t *__restrict __cond,
|
||||
pthread_mutex_t *__restrict __mutex,
|
||||
__const struct timespec *__restrict
|
||||
__abstime);
|
||||
__abstime) __nonnull ((1, 2, 3));
|
||||
|
||||
/* Functions for handling condition variable attributes. */
|
||||
|
||||
/* Initialize condition variable attribute ATTR. */
|
||||
extern int pthread_condattr_init (pthread_condattr_t *__attr) __THROW;
|
||||
extern int pthread_condattr_init (pthread_condattr_t *__attr)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
/* Destroy condition variable attribute ATTR. */
|
||||
extern int pthread_condattr_destroy (pthread_condattr_t *__attr) __THROW;
|
||||
extern int pthread_condattr_destroy (pthread_condattr_t *__attr)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
/* Get the process-shared flag of the condition variable attribute ATTR. */
|
||||
extern int pthread_condattr_getpshared (__const pthread_condattr_t *
|
||||
__restrict __attr,
|
||||
int *__restrict __pshared) __THROW;
|
||||
__restrict __attr,
|
||||
int *__restrict __pshared)
|
||||
__THROW __nonnull ((1, 2));
|
||||
|
||||
/* Set the process-shared flag of the condition variable attribute ATTR. */
|
||||
extern int pthread_condattr_setpshared (pthread_condattr_t *__attr,
|
||||
int __pshared) __THROW;
|
||||
int __pshared) __THROW __nonnull ((1));
|
||||
|
||||
#ifdef __USE_XOPEN2K
|
||||
/* Get the clock selected for the conditon variable attribute ATTR. */
|
||||
extern int pthread_condattr_getclock (__const pthread_condattr_t *
|
||||
__restrict __attr,
|
||||
__clockid_t *__restrict __clock_id)
|
||||
__THROW;
|
||||
__THROW __nonnull ((1, 2));
|
||||
|
||||
/* Set the clock selected for the conditon variable attribute ATTR. */
|
||||
extern int pthread_condattr_setclock (pthread_condattr_t *__attr,
|
||||
__clockid_t __clock_id) __THROW;
|
||||
|
||||
__clockid_t __clock_id)
|
||||
__THROW __nonnull ((1));
|
||||
#endif
|
||||
|
||||
|
||||
@ -880,19 +1019,23 @@ extern int pthread_condattr_setclock (pthread_condattr_t *__attr,
|
||||
/* Initialize the spinlock LOCK. If PSHARED is nonzero the spinlock can
|
||||
be shared between different processes. */
|
||||
extern int pthread_spin_init (pthread_spinlock_t *__lock, int __pshared)
|
||||
__THROW;
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
/* Destroy the spinlock LOCK. */
|
||||
extern int pthread_spin_destroy (pthread_spinlock_t *__lock) __THROW;
|
||||
extern int pthread_spin_destroy (pthread_spinlock_t *__lock)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
/* Wait until spinlock LOCK is retrieved. */
|
||||
extern int pthread_spin_lock (pthread_spinlock_t *__lock) __THROW;
|
||||
extern int pthread_spin_lock (pthread_spinlock_t *__lock)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
/* Try to lock spinlock LOCK. */
|
||||
extern int pthread_spin_trylock (pthread_spinlock_t *__lock) __THROW;
|
||||
extern int pthread_spin_trylock (pthread_spinlock_t *__lock)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
/* Release spinlock LOCK. */
|
||||
extern int pthread_spin_unlock (pthread_spinlock_t *__lock) __THROW;
|
||||
extern int pthread_spin_unlock (pthread_spinlock_t *__lock)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
|
||||
/* Functions to handle barriers. */
|
||||
@ -901,29 +1044,36 @@ extern int pthread_spin_unlock (pthread_spinlock_t *__lock) __THROW;
|
||||
opened when COUNT waiters arrived. */
|
||||
extern int pthread_barrier_init (pthread_barrier_t *__restrict __barrier,
|
||||
__const pthread_barrierattr_t *__restrict
|
||||
__attr, unsigned int __count) __THROW;
|
||||
__attr, unsigned int __count)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
/* Destroy a previously dynamically initialized barrier BARRIER. */
|
||||
extern int pthread_barrier_destroy (pthread_barrier_t *__barrier) __THROW;
|
||||
extern int pthread_barrier_destroy (pthread_barrier_t *__barrier)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
/* Wait on barrier BARRIER. */
|
||||
extern int pthread_barrier_wait (pthread_barrier_t *__barrier) __THROW;
|
||||
extern int pthread_barrier_wait (pthread_barrier_t *__barrier)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
|
||||
/* Initialize barrier attribute ATTR. */
|
||||
extern int pthread_barrierattr_init (pthread_barrierattr_t *__attr) __THROW;
|
||||
extern int pthread_barrierattr_init (pthread_barrierattr_t *__attr)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
/* Destroy previously dynamically initialized barrier attribute ATTR. */
|
||||
extern int pthread_barrierattr_destroy (pthread_barrierattr_t *__attr) __THROW;
|
||||
extern int pthread_barrierattr_destroy (pthread_barrierattr_t *__attr)
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
/* Get the process-shared flag of the barrier attribute ATTR. */
|
||||
extern int pthread_barrierattr_getpshared (__const pthread_barrierattr_t *
|
||||
__restrict __attr,
|
||||
int *__restrict __pshared) __THROW;
|
||||
int *__restrict __pshared)
|
||||
__THROW __nonnull ((1, 2));
|
||||
|
||||
/* Set the process-shared flag of the barrier attribute ATTR. */
|
||||
extern int pthread_barrierattr_setpshared (pthread_barrierattr_t *__attr,
|
||||
int __pshared) __THROW;
|
||||
int __pshared)
|
||||
__THROW __nonnull ((1));
|
||||
#endif
|
||||
|
||||
|
||||
@ -936,7 +1086,8 @@ extern int pthread_barrierattr_setpshared (pthread_barrierattr_t *__attr,
|
||||
DESTR_FUNCTION is not called if the value associated is NULL when
|
||||
the key is destroyed. */
|
||||
extern int pthread_key_create (pthread_key_t *__key,
|
||||
void (*__destr_function) (void *)) __THROW;
|
||||
void (*__destr_function) (void *))
|
||||
__THROW __nonnull ((1));
|
||||
|
||||
/* Destroy KEY. */
|
||||
extern int pthread_key_delete (pthread_key_t __key) __THROW;
|
||||
@ -946,13 +1097,14 @@ extern void *pthread_getspecific (pthread_key_t __key) __THROW;
|
||||
|
||||
/* Store POINTER in the thread-specific data slot identified by KEY. */
|
||||
extern int pthread_setspecific (pthread_key_t __key,
|
||||
__const void *__pointer) __THROW;
|
||||
__const void *__pointer) __THROW ;
|
||||
|
||||
|
||||
#ifdef __USE_XOPEN2K
|
||||
/* Get ID of CPU-time clock for thread THREAD_ID. */
|
||||
extern int pthread_getcpuclockid (pthread_t __thread_id,
|
||||
__clockid_t *__clock_id) __THROW;
|
||||
__clockid_t *__clock_id)
|
||||
__THROW __nonnull ((2));
|
||||
#endif
|
||||
|
||||
|
||||
@ -971,6 +1123,16 @@ extern int pthread_atfork (void (*__prepare) (void),
|
||||
void (*__parent) (void),
|
||||
void (*__child) (void)) __THROW;
|
||||
|
||||
|
||||
#ifdef __USE_EXTERN_INLINES
|
||||
/* Optimizations. */
|
||||
__extern_inline int
|
||||
__NTH (pthread_equal (pthread_t __thread1, pthread_t __thread2))
|
||||
{
|
||||
return __thread1 == __thread2;
|
||||
}
|
||||
#endif
|
||||
|
||||
__END_DECLS
|
||||
|
||||
#endif /* pthread.h */
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
|
||||
|
||||
@ -25,13 +25,14 @@
|
||||
|
||||
/* Wait on barrier. */
|
||||
int
|
||||
pthread_barrier_wait (pthread_barrier_t *barrier)
|
||||
pthread_barrier_wait (
|
||||
pthread_barrier_t *barrier)
|
||||
{
|
||||
struct pthread_barrier *ibarrier = (struct pthread_barrier *) barrier;
|
||||
int result = 0;
|
||||
|
||||
/* Make sure we are alone. */
|
||||
lll_lock (ibarrier->lock);
|
||||
lll_lock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
|
||||
|
||||
/* One more arrival. */
|
||||
--ibarrier->left;
|
||||
@ -44,7 +45,8 @@ pthread_barrier_wait (pthread_barrier_t *barrier)
|
||||
++ibarrier->curr_event;
|
||||
|
||||
/* Wake up everybody. */
|
||||
lll_futex_wake (&ibarrier->curr_event, INT_MAX);
|
||||
lll_futex_wake (&ibarrier->curr_event, INT_MAX,
|
||||
ibarrier->private ^ FUTEX_PRIVATE_FLAG);
|
||||
|
||||
/* This is the thread which finished the serialization. */
|
||||
result = PTHREAD_BARRIER_SERIAL_THREAD;
|
||||
@ -56,11 +58,12 @@ pthread_barrier_wait (pthread_barrier_t *barrier)
|
||||
unsigned int event = ibarrier->curr_event;
|
||||
|
||||
/* Before suspending, make the barrier available to others. */
|
||||
lll_unlock (ibarrier->lock);
|
||||
lll_unlock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
|
||||
|
||||
/* Wait for the event counter of the barrier to change. */
|
||||
do
|
||||
lll_futex_wait (&ibarrier->curr_event, event);
|
||||
lll_futex_wait (&ibarrier->curr_event, event,
|
||||
ibarrier->private ^ FUTEX_PRIVATE_FLAG);
|
||||
while (event == ibarrier->curr_event);
|
||||
}
|
||||
|
||||
@ -70,7 +73,7 @@ pthread_barrier_wait (pthread_barrier_t *barrier)
|
||||
/* If this was the last woken thread, unlock. */
|
||||
if (atomic_increment_val (&ibarrier->left) == init_count)
|
||||
/* We are done. */
|
||||
lll_unlock (ibarrier->lock);
|
||||
lll_unlock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
|
||||
|
||||
@ -23,14 +23,18 @@
|
||||
#include <lowlevellock.h>
|
||||
#include <pthread.h>
|
||||
#include <pthreadP.h>
|
||||
|
||||
#include <bits/kernel-features.h>
|
||||
|
||||
|
||||
int
|
||||
__pthread_cond_broadcast (pthread_cond_t *cond)
|
||||
__pthread_cond_broadcast (
|
||||
pthread_cond_t *cond)
|
||||
{
|
||||
int pshared = (cond->__data.__mutex == (void *) ~0l)
|
||||
? LLL_SHARED : LLL_PRIVATE;
|
||||
/* Make sure we are alone. */
|
||||
lll_mutex_lock (cond->__data.__lock);
|
||||
lll_lock (cond->__data.__lock, pshared);
|
||||
|
||||
/* Are there any waiters to be woken? */
|
||||
if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
|
||||
@ -44,7 +48,7 @@ __pthread_cond_broadcast (pthread_cond_t *cond)
|
||||
++cond->__data.__broadcast_seq;
|
||||
|
||||
/* We are done. */
|
||||
lll_mutex_unlock (cond->__data.__lock);
|
||||
lll_unlock (cond->__data.__lock, pshared);
|
||||
|
||||
/* Do not use requeue for pshared condvars. */
|
||||
if (cond->__data.__mutex == (void *) ~0l)
|
||||
@ -52,15 +56,24 @@ __pthread_cond_broadcast (pthread_cond_t *cond)
|
||||
|
||||
/* Wake everybody. */
|
||||
pthread_mutex_t *mut = (pthread_mutex_t *) cond->__data.__mutex;
|
||||
|
||||
/* XXX: Kernel so far doesn't support requeue to PI futex. */
|
||||
/* XXX: Kernel so far can only requeue to the same type of futex,
|
||||
in this case private (we don't requeue for pshared condvars). */
|
||||
if (__builtin_expect (mut->__data.__kind
|
||||
& (PTHREAD_MUTEX_PRIO_INHERIT_NP
|
||||
| PTHREAD_MUTEX_PSHARED_BIT), 0))
|
||||
goto wake_all;
|
||||
|
||||
/* lll_futex_requeue returns 0 for success and non-zero
|
||||
for errors. */
|
||||
if (__builtin_expect (lll_futex_requeue (&cond->__data.__futex, 1,
|
||||
INT_MAX, &mut->__data.__lock,
|
||||
futex_val), 0))
|
||||
futex_val, LLL_PRIVATE), 0))
|
||||
{
|
||||
/* The requeue functionality is not available. */
|
||||
wake_all:
|
||||
lll_futex_wake (&cond->__data.__futex, INT_MAX);
|
||||
lll_futex_wake (&cond->__data.__futex, INT_MAX, pshared);
|
||||
}
|
||||
|
||||
/* That's all. */
|
||||
@ -68,8 +81,9 @@ __pthread_cond_broadcast (pthread_cond_t *cond)
|
||||
}
|
||||
|
||||
/* We are done. */
|
||||
lll_mutex_unlock (cond->__data.__lock);
|
||||
lll_unlock (cond->__data.__lock, pshared);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
weak_alias(__pthread_cond_broadcast, pthread_cond_broadcast)
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
|
||||
|
||||
@ -23,14 +23,19 @@
|
||||
#include <lowlevellock.h>
|
||||
#include <pthread.h>
|
||||
#include <pthreadP.h>
|
||||
|
||||
#include <bits/kernel-features.h>
|
||||
|
||||
|
||||
int
|
||||
__pthread_cond_signal (pthread_cond_t *cond)
|
||||
__pthread_cond_signal (
|
||||
pthread_cond_t *cond)
|
||||
{
|
||||
int pshared = (cond->__data.__mutex == (void *) ~0l)
|
||||
? LLL_SHARED : LLL_PRIVATE;
|
||||
|
||||
/* Make sure we are alone. */
|
||||
lll_mutex_lock (cond->__data.__lock);
|
||||
lll_lock (cond->__data.__lock, pshared);
|
||||
|
||||
/* Are there any waiters to be woken? */
|
||||
if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
|
||||
@ -40,12 +45,18 @@ __pthread_cond_signal (pthread_cond_t *cond)
|
||||
++cond->__data.__futex;
|
||||
|
||||
/* Wake one. */
|
||||
lll_futex_wake (&cond->__data.__futex, 1);
|
||||
if (! __builtin_expect (lll_futex_wake_unlock (&cond->__data.__futex, 1,
|
||||
1, &cond->__data.__lock,
|
||||
pshared), 0))
|
||||
return 0;
|
||||
|
||||
lll_futex_wake (&cond->__data.__futex, 1, pshared);
|
||||
}
|
||||
|
||||
/* We are done. */
|
||||
lll_mutex_unlock (cond->__data.__lock);
|
||||
lll_unlock (cond->__data.__lock, pshared);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
weak_alias(__pthread_cond_signal, pthread_cond_signal)
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
|
||||
/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
|
||||
This file is part of the GNU C Library.
|
||||
Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
|
||||
|
||||
@ -23,6 +23,7 @@
|
||||
#include <lowlevellock.h>
|
||||
#include <pthread.h>
|
||||
#include <pthreadP.h>
|
||||
#include <bits/kernel-features.h>
|
||||
|
||||
|
||||
/* Cleanup handler, defined in pthread_cond_wait.c. */
|
||||
@ -51,21 +52,24 @@ __pthread_cond_timedwait (
|
||||
if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
|
||||
return EINVAL;
|
||||
|
||||
int pshared = (cond->__data.__mutex == (void *) ~0l)
|
||||
? LLL_SHARED : LLL_PRIVATE;
|
||||
|
||||
/* Make sure we are along. */
|
||||
lll_mutex_lock (cond->__data.__lock);
|
||||
lll_lock (cond->__data.__lock, pshared);
|
||||
|
||||
/* Now we can release the mutex. */
|
||||
int err = __pthread_mutex_unlock_usercnt (mutex, 0);
|
||||
if (err)
|
||||
{
|
||||
lll_mutex_unlock (cond->__data.__lock);
|
||||
lll_unlock (cond->__data.__lock, pshared);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* We have one new user of the condvar. */
|
||||
++cond->__data.__total_seq;
|
||||
++cond->__data.__futex;
|
||||
cond->__data.__nwaiters += 1 << COND_CLOCK_BITS;
|
||||
cond->__data.__nwaiters += 1 << COND_NWAITERS_SHIFT;
|
||||
|
||||
/* Remember the mutex we are using here. If there is already a
|
||||
different address store this is a bad user bug. Do not store
|
||||
@ -98,7 +102,7 @@ __pthread_cond_timedwait (
|
||||
int ret;
|
||||
ret = INTERNAL_SYSCALL (clock_gettime, err, 2,
|
||||
(cond->__data.__nwaiters
|
||||
& ((1 << COND_CLOCK_BITS) - 1)),
|
||||
& ((1 << COND_NWAITERS_SHIFT) - 1)),
|
||||
&rt);
|
||||
# ifndef __ASSUME_POSIX_TIMERS
|
||||
if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (ret, err), 0))
|
||||
@ -144,20 +148,20 @@ __pthread_cond_timedwait (
|
||||
unsigned int futex_val = cond->__data.__futex;
|
||||
|
||||
/* Prepare to wait. Release the condvar futex. */
|
||||
lll_mutex_unlock (cond->__data.__lock);
|
||||
lll_unlock (cond->__data.__lock, pshared);
|
||||
|
||||
/* Enable asynchronous cancellation. Required by the standard. */
|
||||
cbuffer.oldtype = __pthread_enable_asynccancel ();
|
||||
|
||||
/* Wait until woken by signal or broadcast. */
|
||||
err = lll_futex_timed_wait (&cond->__data.__futex,
|
||||
futex_val, &rt);
|
||||
futex_val, &rt, pshared);
|
||||
|
||||
/* Disable asynchronous cancellation. */
|
||||
__pthread_disable_asynccancel (cbuffer.oldtype);
|
||||
|
||||
/* We are going to look at shared data again, so get the lock. */
|
||||
lll_mutex_lock(cond->__data.__lock);
|
||||
lll_lock (cond->__data.__lock, pshared);
|
||||
|
||||
/* If a broadcast happened, we are done. */
|
||||
if (cbuffer.bc_seq != cond->__data.__broadcast_seq)
|
||||
@ -187,17 +191,17 @@ __pthread_cond_timedwait (
|
||||
|
||||
bc_out:
|
||||
|
||||
cond->__data.__nwaiters -= 1 << COND_CLOCK_BITS;
|
||||
cond->__data.__nwaiters -= 1 << COND_NWAITERS_SHIFT;
|
||||
|
||||
/* If pthread_cond_destroy was called on this variable already,
|
||||
notify the pthread_cond_destroy caller all waiters have left
|
||||
and it can be successfully destroyed. */
|
||||
if (cond->__data.__total_seq == -1ULL
|
||||
&& cond->__data.__nwaiters < (1 << COND_CLOCK_BITS))
|
||||
lll_futex_wake (&cond->__data.__nwaiters, 1);
|
||||
&& cond->__data.__nwaiters < (1 << COND_NWAITERS_SHIFT))
|
||||
lll_futex_wake (&cond->__data.__nwaiters, 1, pshared);
|
||||
|
||||
/* We are done with the condvar. */
|
||||
lll_mutex_unlock (cond->__data.__lock);
|
||||
lll_unlock (cond->__data.__lock, pshared);
|
||||
|
||||
/* The cancellation handling is back to normal, remove the handler. */
|
||||
__pthread_cleanup_pop (&buffer, 0);
|
||||
@ -207,4 +211,5 @@ __pthread_cond_timedwait (
|
||||
|
||||
return err ?: result;
|
||||
}
|
||||
|
||||
weak_alias(__pthread_cond_timedwait, pthread_cond_timedwait)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user