[PATCH] glibc 2.2.94 - hppa - pthreads support
libc-alpha,
Adding pthreads support to hppa.
Modified patch: removed files related to IPC on hppa, and updated
for PowerPC 32/64 pt-machine.h split to match latest 2.2.94 changes.
Cheers,
c.
---
2002-09-24 Carlos O'Donell <carlos@baldric.uwo.ca>
* linuxthreads/sysdeps/powerpc/powerpc64/pt-machine.h:
(try_lock): New.
(lock_held): New.
2002-09-10 Carlos O'Donell <carlos@baldric.uwo.ca>
* linuxthreads/descr.h:
In "struct pthread_atomic" change "int p_spinlock"
to "__atomic_lock_t p_spinlock"
2001-11-14 Matthew Wilcox <willy@debian.org>
* linuxthreads/oldsemaphore.c:
(__old_sem_init): Change "__LT_SPINLOCK_INIT" to
"__ATOMIC_LOCK_INIT".
* linuxthreads/pt-machine.c: Define extern for "try_lock"
function call.
* linuxthreads/spinlock.c:
Change "int * spinlock" to "__atomic_lock_t * spinlock",
"__LT_SPINLOCK_INIT" to "__ATOMIC_LOCK_INIT", and
"0" to "__ATOMIC_LOCK_INIT", where required.
In "struct wait_node" change "int abandoned" to
"__atomic_lock_t abandoned"
(__pthread_release): Function call argument changed from
"int * spinlock" to "__atomic_lock_t * spinlock", and
"__LT_SPINLOCK_INIT" changed to "__ATOMIC_LOCK_INIT".
(__pthread_alt_timedlock): "0" to "__ATOMIC_LOCK_INIT" and
change calls from "testandset" to "try_lock";
(__pthread_alt_unlock): "0" to "__ATOMIC_LOCK_INIT", and
"p_node->abandoned" to "lock_held(&p_node->abandoned)", and
changed calls from "testandset" to "try_lock" where required.
(__pthread_compare_and_swap): Function call argument changed from
"int * spinlock" to "__atomic_lock_t * spinlock".
(__pthread_acquire): Function call argument changed from
"int * spinlock" to "__atomic_lock_t * spinlock", and
changed calls from "testandset" to "try_lock" where required.
* linuxthreads/spinlock.h:
Remove testandset macro.
(__pthread_compare_and_swap): Extern changed to match function.
(compare_and_swap): Likewise.
(compare_and_swap_with_release_semantics): Function arg. type change.
(compare_and_swap): Function arg. type change.
(__pthread_compare_and_swap): Extern changed to match function.
(__pthread_init_lock): "__LT_SPINLOCK_INIT" changed to
"__ATOMIC_LOCK_INIT".
(__pthread_alt_trylock): "__LT_SPINLOCK_INIT" to
"__ATOMIC_LOCK_INIT" and change calls from "testandset" to
"try_lock".
* linuxthreads/sysdeps/alpha/pt-machine.h:
(testandset): Deleted (renamed to try_lock).
(try_lock): New.
(lock_held): New.
* linuxthreads/sysdeps/arm/pt-machine.h: Likewise.
* linuxthreads/sysdeps/cris/pt-machine.h: Likewise.
* linuxthreads/sysdeps/i386/i686/pt-machine.h: Likewise.
* linuxthreads/sysdeps/i386/pt-machine.h: Likewise.
* linuxthreads/sysdeps/ia64/pt-machine.h: Likewise.
* linuxthreads/sysdeps/m68k/pt-machine.h: Likewise.
* linuxthreads/sysdeps/mips/pt-machine.h: Likewise.
* linuxthreads/sysdeps/powerpc/powerpc32/pt-machine.h: Likewise.
* linuxthreads/sysdeps/s390/s390-32/pt-machine.h: Likewise.
* linuxthreads/sysdeps/s390/s390-64/pt-machine.h: Likewise.
* linuxthreads/sysdeps/sh/pt-machine.h: Likewise.
* linuxthreads/sysdeps/sparc/sparc32/pt-machine.h: Likewise.
* linuxthreads/sysdeps/sparc/sparc64/pt-machine.h: Likewise.
* linuxthreads/sysdeps/pthread/bits/pthreadtypes.h:
typedef __atomic_lock_t to int for all other arches
leaving them the same as before.
(_pthread_fastlock): Change "int" to "__atomic_lock_t".
* linuxthreads/sysdeps/pthread/bits/libc-lock.h:
Include the use of PTHREAD_MUTEX_INITIALIZER for
the case of __LOCK_INITIALISER_NOT_ZERO.
* linuxthreads/sysdeps/pthread/bits/initspin.h:
Define __ATOMIC_LOCK_INIT as 0 for all arches.
Include changes to associated macros.
* linuxthreads/sysdeps/hppa/pspinlock.c :
(__ldcw): New.
(__pthread_spin_lock): Cleanup.
(__pthread_spin_trylock): Cleanup.
* linuxthreads/sysdeps/hppa/pt-machine.h:
(__get_cr27): New.
(__set_cr27): New.
(testandset): Deleted (renamed to try_lock).
(try_lock): New.
(lock_held): New.
* linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/initspin.h:
Remove "__LT_SPINLOCK_INIT" and define "__ATOMIC_LOCK_INIT"
to "((__atomic_lock_t) { 1 })" while changing the associated
macros that relied on this.
* linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/pthreadtypes.h:
New file.
* sysdeps/hppa/dl-fptr.c:
Change the use of "1" to "__ATOMIC_LOCK_INIT".
(__hppa_make_fptr): Call 'try_lock' instead of 'testandset',
and change the use of "1" to "__ATOMIC_LOCK_INIT".
(_dl_unmap): Likewise.
(_dl_lookup_address): Likewise.
* sysdeps/ia64/dl-fptr.c:
In "static struct local" change "int lock" to
"__atomic_lock_t lock"
(lock): Change calls from "testandset" to "try_lock",
and change "0" to "__ATOMIC_LOCK_INIT" where needed.
diff -urN libc/linuxthreads/descr.h libc/linuxthreads/descr.h
--- libc/linuxthreads/descr.h 2002-08-01 23:31:54.000000000 -0400
+++ libc/linuxthreads/descr.h 2002-09-02 14:49:22.000000000 -0400
@@ -61,7 +61,7 @@
/* Atomic counter made possible by compare_and_swap */
struct pthread_atomic {
long p_count;
- int p_spinlock;
+ __atomic_lock_t p_spinlock;
};
diff -urN libc/linuxthreads/oldsemaphore.c libc/linuxthreads/oldsemaphore.c
--- libc/linuxthreads/oldsemaphore.c Wed Nov 14 09:47:09 2001
+++ libc/linuxthreads/oldsemaphore.c Wed Nov 14 09:43:58 2001
@@ -73,7 +73,7 @@
errno = ENOSYS;
return -1;
}
- sem->sem_spinlock = __LT_SPINLOCK_INIT;
+ sem->sem_spinlock = __ATOMIC_LOCK_INIT;
sem->sem_status = ((long)value << 1) + 1;
return 0;
}
diff -urN libc/linuxthreads/pt-machine.c libc/linuxthreads/pt-machine.c
--- libc/linuxthreads/pt-machine.c Wed Nov 14 09:47:09 2001
+++ libc/linuxthreads/pt-machine.c Wed Nov 14 09:43:58 2001
@@ -19,7 +19,9 @@
#define PT_EI
-extern long int testandset (int *spinlock);
+#include <pthread.h>
+
+extern int try_lock(__atomic_lock_t *spinlock);
extern int __compare_and_swap (long int *p, long int oldval, long int newval);
#include <pt-machine.h>
diff -urN libc/linuxthreads/spinlock.c libc/linuxthreads/spinlock.c
--- libc/linuxthreads/spinlock.c Wed Nov 14 09:47:09 2001
+++ libc/linuxthreads/spinlock.c Wed Nov 14 09:43:58 2001
@@ -24,12 +24,12 @@
#include "spinlock.h"
#include "restart.h"
-static void __pthread_acquire(int * spinlock);
+static void __pthread_acquire(__atomic_lock_t * spinlock);
-static inline void __pthread_release(int * spinlock)
+static inline void __pthread_release(__atomic_lock_t * spinlock)
{
WRITE_MEMORY_BARRIER();
- *spinlock = __LT_SPINLOCK_INIT;
+ *spinlock = __ATOMIC_LOCK_INIT;
__asm __volatile ("" : "=m" (*spinlock) : "0" (*spinlock));
}
@@ -269,11 +269,11 @@
struct wait_node {
struct wait_node *next; /* Next node in null terminated linked list */
pthread_descr thr; /* The thread waiting with this node */
- int abandoned; /* Atomic flag */
+ __atomic_lock_t abandoned; /* Atomic flag */
};
static long wait_node_free_list;
-static int wait_node_free_list_spinlock;
+static __atomic_lock_t wait_node_free_list_spinlock = __ATOMIC_LOCK_INIT;
/* Allocate a new node from the head of the free list using an atomic
operation, or else using malloc if that list is empty. A fundamental
@@ -427,7 +425,7 @@
if (self == NULL)
self = thread_self();
- wait_node.abandoned = 0;
+ wait_node.abandoned = __ATOMIC_LOCK_INIT;
wait_node.next = (struct wait_node *) lock->__status;
wait_node.thr = self;
lock->__status = (long) &wait_node;
@@ -453,7 +451,7 @@
wait_node.thr = self;
newstatus = (long) &wait_node;
}
- wait_node.abandoned = 0;
+ wait_node.abandoned = __ATOMIC_LOCK_INIT;
wait_node.next = (struct wait_node *) oldstatus;
/* Make sure the store in wait_node.next completes before performing
the compare-and-swap */
@@ -502,7 +500,7 @@
if (self == NULL)
self = thread_self();
- p_wait_node->abandoned = 0;
+ p_wait_node->abandoned = __ATOMIC_LOCK_INIT;
p_wait_node->next = (struct wait_node *) lock->__status;
p_wait_node->thr = self;
lock->__status = (long) p_wait_node;
@@ -525,7 +523,7 @@
p_wait_node->thr = self;
newstatus = (long) p_wait_node;
}
- p_wait_node->abandoned = 0;
+ p_wait_node->abandoned = __ATOMIC_LOCK_INIT;
p_wait_node->next = (struct wait_node *) oldstatus;
/* Make sure the store in wait_node.next completes before performing
the compare-and-swap */
@@ -546,7 +544,7 @@
if (oldstatus != 0) {
if (timedsuspend(self, abstime) == 0) {
- if (!testandset(&p_wait_node->abandoned))
+ if (!try_lock(&p_wait_node->abandoned))
return 0; /* Timeout! */
/* Eat oustanding resume from owner, otherwise wait_node_free() below
@@ -625,7 +623,7 @@
while (p_node != (struct wait_node *) 1) {
int prio;
- if (p_node->abandoned) {
+ if (lock_held(&p_node->abandoned)) {
/* Remove abandoned node. */
#if defined TEST_FOR_COMPARE_AND_SWAP
if (!__pthread_has_cas)
@@ -656,7 +654,7 @@
p_max_prio = p_node;
}
- /* This canno6 jump backward in the list, so no further read
+ /* This cannot jump backward in the list, so no further read
barrier is needed. */
pp_node = &p_node->next;
p_node = *pp_node;
@@ -675,7 +673,7 @@
thread timed out and abandoned the node in which case we repeat the
whole unlock operation. */
- if (!testandset(&p_max_prio->abandoned)) {
+ if (!try_lock(&p_max_prio->abandoned)) {
#if defined TEST_FOR_COMPARE_AND_SWAP
if (!__pthread_has_cas)
#endif
@@ -713,7 +711,7 @@
#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
- int * spinlock)
+ __atomic_lock_t * spinlock)
{
int res;
@@ -751,14 +749,14 @@
- When nanosleep() returns, we try again, doing MAX_SPIN_COUNT
sched_yield(), then sleeping again if needed. */
-static void __pthread_acquire(int * spinlock)
+static void __pthread_acquire(__atomic_lock_t * spinlock)
{
int cnt = 0;
struct timespec tm;
READ_MEMORY_BARRIER();
- while (testandset(spinlock)) {
+ while (try_lock(spinlock)) {
if (cnt < MAX_SPIN_COUNT) {
sched_yield();
cnt++;
diff -urN libc/linuxthreads/spinlock.h libc/linuxthreads/spinlock.h
--- libc/linuxthreads/spinlock.h Wed Nov 14 09:47:09 2001
+++ libc/linuxthreads/spinlock.h Wed Nov 14 09:43:58 2001
@@ -14,7 +14,6 @@
#include <bits/initspin.h>
-
/* There are 2 compare and swap synchronization primitives with
different semantics:
@@ -37,10 +36,10 @@
extern int __pthread_has_cas;
extern int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
- int * spinlock);
+ __atomic_lock_t * spinlock);
static inline int compare_and_swap(long * ptr, long oldval, long newval,
- int * spinlock)
+ __atomic_lock_t * spinlock)
{
if (__builtin_expect (__pthread_has_cas, 1))
return __compare_and_swap(ptr, oldval, newval);
@@ -50,15 +49,11 @@
#elif defined(HAS_COMPARE_AND_SWAP)
-#ifdef IMPLEMENT_TAS_WITH_CAS
-#define testandset(p) !__compare_and_swap((long int *) p, 0, 1)
-#endif
-
#ifdef HAS_COMPARE_AND_SWAP_WITH_RELEASE_SEMANTICS
static inline int
compare_and_swap_with_release_semantics (long * ptr, long oldval,
- long newval, int * spinlock)
+ long newval, __atomic_lock_t * spinlock)
{
return __compare_and_swap_with_release_semantics (ptr, oldval,
newval);
@@ -67,7 +62,7 @@
#endif
static inline int compare_and_swap(long * ptr, long oldval, long newval,
- int * spinlock)
+ __atomic_lock_t * spinlock)
{
return __compare_and_swap(ptr, oldval, newval);
}
@@ -75,10 +70,10 @@
#else
extern int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
- int * spinlock);
+ __atomic_lock_t * spinlock);
static inline int compare_and_swap(long * ptr, long oldval, long newval,
- int * spinlock)
+ __atomic_lock_t * spinlock)
{
return __pthread_compare_and_swap(ptr, oldval, newval, spinlock);
}
@@ -99,7 +94,7 @@
static inline void __pthread_init_lock(struct _pthread_fastlock * lock)
{
lock->__status = 0;
- lock->__spinlock = __LT_SPINLOCK_INIT;
+ lock->__spinlock = __ATOMIC_LOCK_INIT;
}
static inline int __pthread_trylock (struct _pthread_fastlock * lock)
@@ -109,7 +104,7 @@
#endif
#if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
{
- return (testandset(&lock->__spinlock) ? EBUSY : 0);
+ return (try_lock(&lock->__spinlock) ? EBUSY : 0);
}
#endif
@@ -136,7 +131,7 @@
static inline void __pthread_alt_init_lock(struct _pthread_fastlock * lock)
{
lock->__status = 0;
- lock->__spinlock = __LT_SPINLOCK_INIT;
+ lock->__spinlock = __ATOMIC_LOCK_INIT;
}
static inline int __pthread_alt_trylock (struct _pthread_fastlock * lock)
@@ -148,7 +143,7 @@
{
int res = EBUSY;
- if (testandset(&lock->__spinlock) == 0)
+ if (try_lock(&lock->__spinlock) == 0)
{
if (lock->__status == 0)
{
@@ -156,7 +151,7 @@
WRITE_MEMORY_BARRIER();
res = 0;
}
- lock->__spinlock = __LT_SPINLOCK_INIT;
+ lock->__spinlock = __ATOMIC_LOCK_INIT;
}
return res;
}
diff -urN libc/linuxthreads/sysdeps/alpha/pt-machine.h libc/linuxthreads/sysdeps/alpha/pt-machine.h
--- libc/linuxthreads/sysdeps/alpha/pt-machine.h Wed Nov 14 09:47:09 2001
+++ libc/linuxthreads/sysdeps/alpha/pt-machine.h Wed Nov 14 09:43:58 2001
@@ -39,8 +39,8 @@
/* Spinlock implementation; required. */
-PT_EI long int
-testandset (int *spinlock)
+PT_EI int
+try_lock (int *spinlock)
{
long int ret, temp;
@@ -58,9 +58,10 @@
: "m"(*spinlock)
: "memory");
- return ret;
+ return (int) ret;
}
+#define lock_held(p) *(p)
/* Begin allocating thread stacks at this address. Default is to allocate
them just below the initial program stack. */
diff -urN libc/linuxthreads/sysdeps/arm/pt-machine.h libc/linuxthreads/sysdeps/arm/pt-machine.h
--- libc/linuxthreads/sysdeps/arm/pt-machine.h Wed Nov 14 09:47:09 2001
+++ libc/linuxthreads/sysdeps/arm/pt-machine.h Wed Nov 14 09:43:58 2001
@@ -29,8 +29,8 @@
time; let's hope nobody tries to use one. */
/* Spinlock implementation; required. */
-PT_EI long int
-testandset (int *spinlock)
+PT_EI int
+try_lock(__atomic_lock_t *spinlock)
{
register unsigned int ret;
@@ -41,6 +41,7 @@
return ret;
}
+#define lock_held(p) *(p)
/* Get some notion of the current stack. Need not be exactly the top
of the stack, just something somewhere in the current frame. */
diff -urN libc/linuxthreads/sysdeps/cris/pt-machine.h libc/linuxthreads/sysdeps/cris/pt-machine.h
--- libc/linuxthreads/sysdeps/cris/pt-machine.h Wed Nov 14 09:47:09 2001
+++ libc/linuxthreads/sysdeps/cris/pt-machine.h Wed Nov 14 09:43:58 2001
@@ -22,10 +22,10 @@
# define PT_EI extern inline
#endif
-PT_EI long int
-testandset (int *spinlock)
+PT_EI int
+try_lock(__atomic_lock_t *spinlock)
{
- register unsigned long int ret;
+ register unsigned int ret;
/* Note the use of a dummy output of *spinlock to expose the write. The
memory barrier is to stop *other* writes being moved past this code. */
@@ -42,6 +42,7 @@
return ret;
}
+#define lock_held(p) *(p)
/* Get some notion of the current stack. Need not be exactly the top
of the stack, just something somewhere in the current frame.
diff -urN libc/linuxthreads/sysdeps/hppa/pspinlock.c libc/linuxthreads/sysdeps/hppa/pspinlock.c
--- libc/linuxthreads/sysdeps/hppa/pspinlock.c Wed Nov 14 09:47:09 2001
+++ libc/linuxthreads/sysdeps/hppa/pspinlock.c Wed Nov 14 09:43:58 2001
@@ -21,18 +21,20 @@
#include <pthread.h>
#include "internals.h"
+/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */
+#define __ldcw(a) ({ \
+ unsigned __ret; \
+ __asm__ __volatile__("ldcw 0(%1),%0" : "=r" (__ret) : "r" (a)); \
+ __ret; \
+})
+
int
__pthread_spin_lock (pthread_spinlock_t *lock)
{
- unsigned int val;
-
- do
- asm volatile ("ldcw %1,%0"
- : "=r" (val), "=m" (*lock)
- : "m" (*lock));
- while (!val);
+ while (__ldcw (*lock) == 0)
+ while (*lock == 0) ;
- return 0;
+ return 0;
}
weak_alias (__pthread_spin_lock, pthread_spin_lock)
@@ -40,11 +42,7 @@
int
__pthread_spin_trylock (pthread_spinlock_t *lock)
{
- unsigned int val;
-
- asm volatile ("ldcw %1,%0"
- : "=r" (val), "=m" (*lock)
- : "m" (*lock));
+ unsigned int val = __ldcw(*lock);
return val ? 0 : EBUSY;
}
diff -urN libc/linuxthreads/sysdeps/hppa/pt-machine.h libc/linuxthreads/sysdeps/hppa/pt-machine.h
--- libc/linuxthreads/sysdeps/hppa/pt-machine.h Wed Nov 14 09:47:09 2001
+++ libc/linuxthreads/sysdeps/hppa/pt-machine.h Wed Nov 14 09:43:58 2001
@@ -19,6 +19,7 @@
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
+#include <sys/types.h>
#include <bits/initspin.h>
#ifndef PT_EI
@@ -30,16 +31,39 @@
#define CURRENT_STACK_FRAME stack_pointer
register char * stack_pointer __asm__ ("%r30");
+/* Get/Set thread-specific pointer. We have to call into the kernel to
+ * modify it, but we can read it in user mode. */
+
+#define THREAD_SELF __get_cr27()
+
+static inline struct _pthread_descr_struct * __get_cr27(void)
+{
+ long cr27;
+ asm("mfctl %%cr27, %0" : "=r" (cr27) : );
+ return (struct _pthread_descr_struct *) cr27;
+}
+
+#define INIT_THREAD_SELF(descr, nr) __set_cr27(descr)
+
+static inline void __set_cr27(struct _pthread_descr_struct * cr27)
+{
+ asm(
+ "ble 0xe0(%%sr2, %%r0)\n\t"
+ "copy %0, %%r26"
+ : : "r" (cr27) : "r26" );
+}
+
+/* We want the OS to assign stack addresses. */
+#define FLOATING_STACKS 1
+#define ARCH_STACK_MAX_SIZE 8*1024*1024
/* The hppa only has one atomic read and modify memory operation,
load and clear, so hppa spinlocks must use zero to signify that
someone is holding the lock. */
-#define xstr(s) str(s)
-#define str(s) #s
/* Spinlock implementation; required. */
-PT_EI long int
-testandset (int *spinlock)
+PT_EI int
+try_lock(__atomic_lock_t *spinlock)
{
int ret;
@@ -80,7 +80,11 @@
return ret == 0;
}
-#undef str
-#undef xstr
+PT_EI int
+lock_held(__atomic_lock_t *spinlock)
+{
+ return spinlock->lock == 0;
+}
+
#endif /* pt-machine.h */
diff -urN libc/linuxthreads/sysdeps/i386/i686/pt-machine.h libc/linuxthreads/sysdeps/i386/i686/pt-machine.h
--- libc/linuxthreads/sysdeps/i386/i686/pt-machine.h Wed Nov 14 09:47:09 2001
+++ libc/linuxthreads/sysdeps/i386/i686/pt-machine.h Wed Nov 14 09:43:58 2001
@@ -30,10 +30,10 @@
/* Spinlock implementation; required. */
-PT_EI long int
-testandset (int *spinlock)
+PT_EI int
+try_lock (int *spinlock)
{
- long int ret;
+ int ret;
__asm__ __volatile__ (
"xchgl %0, %1"
@@ -44,6 +44,7 @@
return ret;
}
+#define lock_held(p) *(p)
/* Compare-and-swap for semaphores. It's always available on i686. */
#define HAS_COMPARE_AND_SWAP
diff -urN libc/linuxthreads/sysdeps/i386/pt-machine.h libc/linuxthreads/sysdeps/i386/pt-machine.h
--- libc/linuxthreads/sysdeps/i386/pt-machine.h Wed Nov 14 09:47:09 2001
+++ libc/linuxthreads/sysdeps/i386/pt-machine.h Wed Nov 14 09:43:58 2001
@@ -29,10 +29,10 @@
/* Spinlock implementation; required. */
-PT_EI long int
-testandset (int *spinlock)
+PT_EI int
+try_lock(__atomic_lock_t *spinlock)
{
- long int ret;
+ int ret;
__asm__ __volatile__(
"xchgl %0, %1"
@@ -43,6 +43,7 @@
return ret;
}
+#define lock_held(p) *(p)
/* Compare-and-swap for semaphores.
Available on the 486 and above, but not on the 386.
diff -urN libc/linuxthreads/sysdeps/ia64/pt-machine.h libc/linuxthreads/sysdeps/ia64/pt-machine.h
--- libc/linuxthreads/sysdeps/ia64/pt-machine.h Wed Nov 14 09:47:09 2001
+++ libc/linuxthreads/sysdeps/ia64/pt-machine.h Wed Nov 14 09:43:58 2001
@@ -97,8 +97,8 @@
#endif /* ELF_MACHINE_NAME */
/* Spinlock implementation; required. */
-PT_EI long int
-testandset (int *spinlock)
+PT_EI int
+try_lock(__atomic_lock_t *spinlock)
{
long int ret;
@@ -120,7 +120,9 @@
: "r"(1), "1"(__atomic_fool_gcc (spinlock))
: "memory");
- return ret;
+ return (int) ret;
}
+#define lock_held(p) *(p)
+
#endif /* pt-machine.h */
diff -urN libc/linuxthreads/sysdeps/m68k/pt-machine.h libc/linuxthreads/sysdeps/m68k/pt-machine.h
--- libc/linuxthreads/sysdeps/m68k/pt-machine.h Wed Nov 14 09:47:09 2001
+++ libc/linuxthreads/sysdeps/m68k/pt-machine.h Wed Nov 14 09:43:58 2001
@@ -25,8 +25,8 @@
/* Spinlock implementation; required. */
-PT_EI long int
-testandset (int *spinlock)
+PT_EI int
+try_lock(__atomic_lock_t *spinlock)
{
char ret;
@@ -38,6 +38,7 @@
return ret;
}
+#define lock_held(p) *(p)
/* Get some notion of the current stack. Need not be exactly the top
of the stack, just something somewhere in the current frame. */
diff -urN libc/linuxthreads/sysdeps/mips/pt-machine.h libc/linuxthreads/sysdeps/mips/pt-machine.h
--- libc/linuxthreads/sysdeps/mips/pt-machine.h Sat Mar 2 19:37:24 2002
+++ libc/linuxthreads/sysdeps/mips/pt-machine.h Sat Mar 2 19:38:20 2002
@@ -33,12 +33,13 @@
/* Spinlock implementation; required. */
-PT_EI long int
-testandset (int *spinlock)
+PT_EI int
+try_lock (__atomic_lock_t *spinlock)
{
return _test_and_set (spinlock, 1);
}
+#define lock_held(p) *(p)
/* Get some notion of the current stack. Need not be exactly the top
of the stack, just something somewhere in the current frame. */
diff -urN libc/linuxthreads/sysdeps/powerpc/powerpc32/pt-machine.h libc/linuxthreads/sysdeps/powerpc/powerpc32/pt-machine.h
--- libc/linuxthreads/sysdeps/powerpc/powerpc32/pt-machine.h Wed Nov 14 09:47:09 2001
+++ libc/linuxthreads/sysdeps/powerpc/powerpc32/pt-machine.h Wed Nov 14 09:43:58 2001
@@ -39,7 +39,13 @@
/* note that test-and-set(x) is the same as !compare-and-swap(x, 0, 1) */
#define HAS_COMPARE_AND_SWAP_WITH_RELEASE_SEMANTICS
-#define IMPLEMENT_TAS_WITH_CAS
+
+static inline int try_lock(__atomic_lock_t *p)
+{
+ return !__compare_and_swap((long int *) p, 0, 1);
+}
+
+#define lock_held(p) *(p)
PT_EI int
__compare_and_swap (long int *p, long int oldval, long int newval)
--- glibc-2.2.93/linuxthreads/sysdeps/powerpc/powerpc64/pt-machine.h.orig 2002-09-14 04:55:04.000000000 -0400
+++ glibc-2.2.93/linuxthreads/sysdeps/powerpc/powerpc64/pt-machine.h 2002-09-20 19:46:43.000000000 -0400
@@ -63,6 +63,13 @@
#define HAS_COMPARE_AND_SWAP
#define HAS_COMPARE_AND_SWAP_WITH_RELEASE_SEMANTICS
+static inline int try_lock(__atomic_lock_t *p)
+{
+ return !__compare_and_swap((long int *) p, 0, 1);
+}
+
+#define lock_held(p) *(p)
+
PT_EI int
__compare_and_swap (long int *p, long int oldval, long int newval)
{
diff -urN libc/linuxthreads/sysdeps/pthread/bits/initspin.h libc/linuxthreads/sysdeps/pthread/bits/initspin.h
--- libc/linuxthreads/sysdeps/pthread/bits/initspin.h Wed Nov 14 09:47:09 2001
+++ libc/linuxthreads/sysdeps/pthread/bits/initspin.h Wed Nov 14 09:43:58 2001
@@ -20,9 +20,8 @@
/* Initial value of a spinlock. Most platforms should use zero,
unless they only implement a "test and clear" operation instead of
the usual "test and set". */
-#define __LT_SPINLOCK_INIT 0
+#define __ATOMIC_LOCK_INIT 0
/* Macros for lock initializers, using the above definition. */
-#define __LOCK_INITIALIZER { 0, __LT_SPINLOCK_INIT }
-#define __ALT_LOCK_INITIALIZER { 0, __LT_SPINLOCK_INIT }
-#define __ATOMIC_INITIALIZER { 0, __LT_SPINLOCK_INIT }
+#define __LOCK_INITIALIZER { 0, 0 }
+#define __ATOMIC_INITIALIZER { 0, 0 }
diff -urN libc/linuxthreads/sysdeps/pthread/bits/libc-lock.h libc/linuxthreads/sysdeps/pthread/bits/libc-lock.h
--- libc/linuxthreads/sysdeps/pthread/bits/libc-lock.h Wed Nov 14 09:47:09 2001
+++ libc/linuxthreads/sysdeps/pthread/bits/libc-lock.h Wed Nov 14 09:43:58 2001
@@ -63,12 +63,12 @@
initialized locks must be set to one due to the lack of normal
atomic operations.) */
-#if __LT_SPINLOCK_INIT == 0
+#ifdef __LOCK_INITIALISER_NOT_ZERO
# define __libc_lock_define_initialized(CLASS,NAME) \
- CLASS __libc_lock_t NAME;
+ CLASS __libc_lock_t NAME = PTHREAD_MUTEX_INITIALIZER;
#else
# define __libc_lock_define_initialized(CLASS,NAME) \
- CLASS __libc_lock_t NAME = PTHREAD_MUTEX_INITIALIZER;
+ CLASS __libc_lock_t NAME;
#endif
#define __libc_rwlock_define_initialized(CLASS,NAME) \
diff -urN libc/linuxthreads/sysdeps/pthread/bits/pthreadtypes.h libc/linuxthreads/sysdeps/pthread/bits/pthreadtypes.h
--- libc/linuxthreads/sysdeps/pthread/bits/pthreadtypes.h Wed Nov 14 09:47:09 2001
+++ libc/linuxthreads/sysdeps/pthread/bits/pthreadtypes.h Wed Nov 14 09:43:58 2001
@@ -22,11 +22,13 @@
#define __need_schedparam
#include <bits/sched.h>
+typedef int __atomic_lock_t;
+
/* Fast locks (not abstract because mutexes and conditions aren't abstract). */
struct _pthread_fastlock
{
long int __status; /* "Free" or "taken" or head of waiting list */
- int __spinlock; /* Used by compare_and_swap emulation. Also,
+ __atomic_lock_t __spinlock; /* Used by compare_and_swap emulation. Also,
adaptive SMP lock stores spin count here. */
};
diff -urN libc/linuxthreads/sysdeps/s390/s390-32/pt-machine.h libc/linuxthreads/sysdeps/s390/s390-32/pt-machine.h
--- libc/linuxthreads/sysdeps/s390/s390-32/pt-machine.h Wed Nov 14 09:47:09 2001
+++ libc/linuxthreads/sysdeps/s390/s390-32/pt-machine.h Wed Nov 14 09:43:58 2001
@@ -24,8 +24,8 @@
#endif
/* Spinlock implementation; required. */
-PT_EI long int
-testandset (int *spinlock)
+PT_EI int
+try_lock (int *spinlock)
{
int ret;
@@ -41,6 +41,7 @@
return ret;
}
+#define lock_held(p) *(p)
/* Get some notion of the current stack. Need not be exactly the top
of the stack, just something somewhere in the current frame. */
diff -urN libc/linuxthreads/sysdeps/s390/s390-64/pt-machine.h libc/linuxthreads/sysdeps/s390/s390-64/pt-machine.h
--- libc/linuxthreads/sysdeps/s390/s390-64/pt-machine.h Wed Nov 14 09:47:09 2001
+++ libc/linuxthreads/sysdeps/s390/s390-64/pt-machine.h Wed Nov 14 09:43:58 2001
@@ -24,7 +24,7 @@
#endif
/* Spinlock implementation; required. */
-PT_EI long int
+PT_EI int
testandset (int *spinlock)
{
int ret;
@@ -41,6 +41,7 @@
return ret;
}
+#define lock_held(p) *(p)
/* Get some notion of the current stack. Need not be exactly the top
of the stack, just something somewhere in the current frame. */
diff -urN libc/linuxthreads/sysdeps/sh/pt-machine.h libc/linuxthreads/sysdeps/sh/pt-machine.h
--- libc/linuxthreads/sysdeps/sh/pt-machine.h Wed Nov 14 09:47:09 2001
+++ libc/linuxthreads/sysdeps/sh/pt-machine.h Wed Nov 14 09:43:58 2001
@@ -24,8 +24,8 @@
#endif
/* Spinlock implementation; required. */
-PT_EI long int
-testandset (int *spinlock)
+PT_EI int
+try_lock(__atomic_lock_t *spinlock)
{
int ret;
@@ -45,6 +45,8 @@
return (ret == 0);
}
+#define lock_held(p) *(p)
+
/* We want the OS to assign stack addresses. */
#define FLOATING_STACKS 1
diff -urN libc/linuxthreads/sysdeps/sparc/sparc32/pt-machine.h libc/linuxthreads/sysdeps/sparc/sparc32/pt-machine.h
--- libc/linuxthreads/sysdeps/sparc/sparc32/pt-machine.h Wed Nov 14 09:47:09 2001
+++ libc/linuxthreads/sysdeps/sparc/sparc32/pt-machine.h Wed Nov 14 09:43:58 2001
@@ -24,8 +24,8 @@
#endif
/* Spinlock implementation; required. */
-PT_EI long int
-testandset (int *spinlock)
+PT_EI int
+try_lock (int *spinlock)
{
int ret;
@@ -36,6 +36,7 @@
return ret;
}
+#define lock_held(p) *(p)
/* Memory barrier; default is to do nothing */
#define MEMORY_BARRIER() __asm__ __volatile__("stbar" : : : "memory")
diff -urN libc/linuxthreads/sysdeps/sparc/sparc64/pt-machine.h libc/linuxthreads/sysdeps/sparc/sparc64/pt-machine.h
--- libc/linuxthreads/sysdeps/sparc/sparc64/pt-machine.h Wed Nov 14 09:47:09 2001
+++ libc/linuxthreads/sysdeps/sparc/sparc64/pt-machine.h Wed Nov 14 09:43:58 2001
@@ -25,8 +25,8 @@
/* Spinlock implementation; required. */
-PT_EI long int
-testandset (int *spinlock)
+PT_EI int
+try_lock (int *spinlock)
{
int ret;
@@ -36,6 +36,7 @@
return ret;
}
+#define lock_held(p) *(p)
/* Memory barrier; default is to do nothing */
#define MEMORY_BARRIER() \
diff -urN libc/linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/initspin.h libc/linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/initspin.h
--- libc/linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/initspin.h Wed Nov 14 09:47:09 2001
+++ libc/linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/initspin.h Wed Nov 14 09:43:58 2001
@@ -19,9 +19,12 @@
/* Initial value of a spinlock. PA-RISC only implements atomic load
and clear so this must be non-zero. */
-#define __LT_SPINLOCK_INIT 1
+#define __ATOMIC_LOCK_INIT ((__atomic_lock_t) { 1 })
/* Macros for lock initializers, using the above definition. */
-#define __LOCK_INITIALIZER { 0, __LT_SPINLOCK_INIT }
-#define __ALT_LOCK_INITIALIZER { 0, __LT_SPINLOCK_INIT }
-#define __ATOMIC_INITIALIZER { 0, __LT_SPINLOCK_INIT }
+#define __LOCK_INITIALIZER { { 1 }, 0 }
+#define __ATOMIC_INITIALIZER { 0, { 1 } }
+
+/* Tell the generic code it can't put locks in the bss section */
+
+#define __LOCK_INITIALISER_NOT_ZERO
diff -urN libc/linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/pthreadtypes.h libc/linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/pthreadtypes.h
--- libc/linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/pthreadtypes.h Wed Dec 31 19:00:00 1969
+++ libc/linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/pthreadtypes.h Wed Nov 14 09:43:58 2001
@@ -0,0 +1,146 @@
+/* Linuxthreads - a simple clone()-based implementation of Posix */
+/* threads for Linux. */
+/* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */
+/* */
+/* This program is free software; you can redistribute it and/or */
+/* modify it under the terms of the GNU Library General Public License */
+/* as published by the Free Software Foundation; either version 2 */
+/* of the License, or (at your option) any later version. */
+/* */
+/* This program is distributed in the hope that it will be useful, */
+/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
+/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
+/* GNU Library General Public License for more details. */
+
+#if !defined _BITS_TYPES_H && !defined _PTHREAD_H
+# error "Never include <bits/pthreadtypes.h> directly; use <sys/types.h> instead."
+#endif
+
+#ifndef _BITS_PTHREADTYPES_H
+#define _BITS_PTHREADTYPES_H 1
+
+#define __need_schedparam
+#include <bits/sched.h>
+
+typedef struct {
+ int lock;
+} __attribute__((aligned (16))) __atomic_lock_t;
+
+/* Fast locks (not abstract because mutexes and conditions aren't abstract). */
+struct _pthread_fastlock
+{
+ __atomic_lock_t __spinlock; /* Used by compare_and_swap emulation. Also,
+ adaptive SMP lock stores spin count here. */
+ long int __status; /* "Free" or "taken" or head of waiting list */
+};
+
+#ifndef _PTHREAD_DESCR_DEFINED
+/* Thread descriptors */
+typedef struct _pthread_descr_struct *_pthread_descr;
+# define _PTHREAD_DESCR_DEFINED
+#endif
+
+
+/* Attributes for threads. */
+typedef struct __pthread_attr_s
+{
+ int __detachstate;
+ int __schedpolicy;
+ struct __sched_param __schedparam;
+ int __inheritsched;
+ int __scope;
+ size_t __guardsize;
+ int __stackaddr_set;
+ void *__stackaddr;
+ size_t __stacksize;
+} pthread_attr_t;
+
+
+/* Conditions (not abstract because of PTHREAD_COND_INITIALIZER */
+typedef struct
+{
+ struct _pthread_fastlock __c_lock; /* Protect against concurrent access */
+ _pthread_descr __c_waiting; /* Threads waiting on this condition */
+} pthread_cond_t;
+
+
+/* Attribute for conditionally variables. */
+typedef struct
+{
+ int __dummy;
+} pthread_condattr_t;
+
+/* Keys for thread-specific data */
+typedef unsigned int pthread_key_t;
+
+
+/* Mutexes (not abstract because of PTHREAD_MUTEX_INITIALIZER). */
+/* (The layout is unnatural to maintain binary compatibility
+ with earlier releases of LinuxThreads.) */
+typedef struct
+{
+ int __m_reserved; /* Reserved for future use */
+ int __m_count; /* Depth of recursive locking */
+ _pthread_descr __m_owner; /* Owner thread (if recursive or errcheck) */
+ int __m_kind; /* Mutex kind: fast, recursive or errcheck */
+ struct _pthread_fastlock __m_lock; /* Underlying fast lock */
+} pthread_mutex_t;
+
+
+/* Attribute for mutex. */
+typedef struct
+{
+ int __mutexkind;
+} pthread_mutexattr_t;
+
+
+/* Once-only execution */
+typedef int pthread_once_t;
+
+
+#ifdef __USE_UNIX98
+/* Read-write locks. */
+typedef struct _pthread_rwlock_t
+{
+ struct _pthread_fastlock __rw_lock; /* Lock to guarantee mutual exclusion */
+ int __rw_readers; /* Number of readers */
+ _pthread_descr __rw_writer; /* Identity of writer, or NULL if none */
+ _pthread_descr __rw_read_waiting; /* Threads waiting for reading */
+ _pthread_descr __rw_write_waiting; /* Threads waiting for writing */
+ int __rw_kind; /* Reader/Writer preference selection */
+ int __rw_pshared; /* Shared between processes or not */
+} pthread_rwlock_t;
+
+
+/* Attribute for read-write locks. */
+typedef struct
+{
+ int __lockkind;
+ int __pshared;
+} pthread_rwlockattr_t;
+#endif
+
+#ifdef __USE_XOPEN2K
+/* POSIX spinlock data type. */
+typedef volatile int pthread_spinlock_t __attribute__((aligned (16)));
+
+/* POSIX barrier. */
+typedef struct {
+ struct _pthread_fastlock __ba_lock; /* Lock to guarantee mutual exclusion */
+ int __ba_required; /* Threads needed for completion */
+ int __ba_present; /* Threads waiting */
+ _pthread_descr __ba_waiting; /* Queue of waiting threads */
+} pthread_barrier_t;
+
+/* barrier attribute */
+typedef struct {
+ int __pshared;
+} pthread_barrierattr_t;
+
+#endif
+
+
+/* Thread identifiers */
+typedef unsigned long int pthread_t;
+
+#endif /* bits/pthreadtypes.h */
diff -urN libc/sysdeps/hppa/dl-fptr.c libc/sysdeps/hppa/dl-fptr.c
--- libc/sysdeps/hppa/dl-fptr.c Wed Nov 14 09:47:09 2001
+++ libc/sysdeps/hppa/dl-fptr.c Wed Nov 14 09:43:58 2001
@@ -29,8 +29,7 @@
#ifdef _LIBC_REENTRANT
# include <pt-machine.h>
-/* Remember, we use 0 to mean that a lock is taken on PA-RISC. */
-static int __hppa_fptr_lock = 1;
+static __atomic_lock_t __hppa_fptr_lock = __ATOMIC_LOCK_INIT;
#endif
/* Because ld.so is now versioned, these functions can be in their own
@@ -66,7 +65,7 @@
#ifdef _LIBC_REENTRANT
/* Make sure we are alone. We don't need a lock during bootstrap. */
if (mem == NULL)
- while (testandset (&__hppa_fptr_lock));
+ while (try_lock(&__hppa_fptr_lock));
#endif
/* Search the sorted linked list for an existing entry for this
@@ -126,9 +125,8 @@
found:
#ifdef _LIBC_REENTRANT
- /* Release the lock. Again, remember, zero means the lock is taken! */
if (mem == NULL)
- __hppa_fptr_lock = 1;
+ __hppa_fptr_lock = __ATOMIC_LOCK_INIT;
#endif
/* Set bit 30 to indicate to $$dyncall that this is a PLABEL. */
@@ -147,7 +145,7 @@
#ifdef _LIBC_REENTRANT
/* Make sure we are alone. */
- while (testandset (&__hppa_fptr_lock));
+ while (try_lock(&__hppa_fptr_lock));
#endif
/* Search the sorted linked list for the first entry for this object. */
@@ -180,8 +178,7 @@
}
#ifdef _LIBC_REENTRANT
- /* Release the lock. */
- __hppa_fptr_lock = 1;
+ __hppa_fptr_lock = __ATOMIC_LOCK_INIT;
#endif
}
@@ -191,9 +188,11 @@
Elf32_Addr addr = (Elf32_Addr) address;
struct hppa_fptr *f;
+ address = (unsigned long)address &~ 3; /* Clear the bottom two bits. See make_fptr. */
+
#ifdef _LIBC_REENTRANT
/* Make sure we are alone. */
- while (testandset (&__hppa_fptr_lock));
+ while (try_lock(&__hppa_fptr_lock));
#endif
for (f = __fptr_root; f != NULL; f = f->next)
@@ -204,8 +203,7 @@
}
#ifdef _LIBC_REENTRANT
- /* Release the lock. */
- __hppa_fptr_lock = 1;
+ __hppa_fptr_lock = __ATOMIC_LOCK_INIT;
#endif
return addr;
diff -urN libc/sysdeps/ia64/dl-fptr.c libc/sysdeps/ia64/dl-fptr.c
--- libc/sysdeps/ia64/dl-fptr.c Wed Nov 14 09:47:09 2001
+++ libc/sysdeps/ia64/dl-fptr.c Wed Nov 14 09:43:58 2001
@@ -40,7 +40,7 @@
struct ia64_fdesc *free_list;
unsigned int npages; /* # of pages to allocate */
#ifdef _LIBC_REENTRANT
- volatile int lock;
+ __atomic_lock_t lock;
sigset_t full_sigset;
#endif
/* the next to members MUST be consecutive! */
@@ -73,7 +73,7 @@
if (!__sigismember (&(l)->full_sigset, SIGINT)) \
__sigfillset (&(l)->full_sigset); \
\
- while (testandset ((int *) &(l)->lock)) \
+ while (try_lock (&(l)->lock)) \
{ \
struct timespec ts; \
if (i > 0) \
@@ -88,7 +88,7 @@
__sigprocmask (SIG_BLOCK, &(l)->full_sigset, &_saved_set);
# define unlock(l) \
__sigprocmask (SIG_SETMASK, &_saved_set, NULL); \
- (l)->lock = 0; \
+ (l)->lock = __ATOMIC_LOCK_INIT; \
}
#else
# define lock(l)
Reply to: