[Date Prev][Date Next] [Thread Prev][Thread Next] [Date Index] [Thread Index]

r956 - in glibc-package/trunk/debian: . patches



Author: gotom
Date: 2005-07-17 16:06:06 +0000 (Sun, 17 Jul 2005)
New Revision: 956

Added:
   glibc-package/trunk/debian/patches/glibc235-hppa-lt.dpatch
   glibc-package/trunk/debian/patches/glibc235-hppa-sysdeps.dpatch
Modified:
   glibc-package/trunk/debian/changelog
   glibc-package/trunk/debian/patches/00list
Log:
    * To make glibc-2.3.5 buidable with gcc-4.0:
      - debian/patches/00list: Drop glibc234-hppa-full-nptl-2004-12-20.dpatch
        line replaced by the new patches: glibc235-hppa-sysdeps.dpatch and
        glibc235-hppa-lt.dpatch.  It'll be removed when hppa unstable works
        nicely.
    * Jeff Bailey <jbailey@ubuntu.com>:
      - debian/patches/glibc235-hppa-sysdeps.dpatch: New file, to build hppa
        linuxthreads locking problem and sysdeps correctly again.
      - debian/patches/glibc235-hppa-lt.dpatch: New file, likewise.


Modified: glibc-package/trunk/debian/changelog
===================================================================
--- glibc-package/trunk/debian/changelog	2005-07-17 15:36:05 UTC (rev 955)
+++ glibc-package/trunk/debian/changelog	2005-07-17 16:06:06 UTC (rev 956)
@@ -3,19 +3,30 @@
   * GOTO Masanori <gotom@debian.org>
 
     * Localedata update:
-      - debian/patches/cvs-localedata.dpatch: Added to update localedata
+      - debian/patches/cvs-localedata.dpatch: New file, to update localedata
         to the latest cvs.  Reported by Safir Secerovic <esafir@yahoo.com>,
         that is already available in Denis Barbier's belocs-locales-data.
 
+    * To make glibc-2.3.5 buidable with gcc-4.0:
+      - debian/patches/00list: Drop glibc234-hppa-full-nptl-2004-12-20.dpatch
+        line replaced by the new patches: glibc235-hppa-sysdeps.dpatch and
+        glibc235-hppa-lt.dpatch.  It'll be removed when hppa unstable works
+        nicely.
+  
     * Michael Banck <mbanck@debian.org>:
-      - debian/patches/hurd-enable-ldconfig.dpatch: Added to build ldconfig
+      - debian/patches/hurd-enable-ldconfig.dpatch: New file, to build ldconfig
         again on Hurd.  (Closes: #309489)
       - debian/sysdeps/gnu.mk: Hurd-i386 needs --without-tls option to build.
-      - debian/patches/hurd-libpthread-indirect-loading.dpatch: Added to
+      - debian/patches/hurd-libpthread-indirect-loading.dpatch: New file, to
         make libpthread load indirectly on Hurd.  (Closes: #312488)
-      - debian/patches/hurd-ioctl-pfinet.dpatch: Added to support part of
+      - debian/patches/hurd-ioctl-pfinet.dpatch: New file, to support part of
         SIOCGIFHWADDR for pfinet on Hurd.  (Closes: #295117)
 
+    * Jeff Bailey <jbailey@ubuntu.com>:
+      - debian/patches/glibc235-hppa-sysdeps.dpatch: New file, to build hppa
+        linuxthreads locking problem and sysdeps correctly again.
+      - debian/patches/glibc235-hppa-lt.dpatch: New file, likewise.
+
  -- GOTO Masanori <gotom@debian.org>  Sun, 17 Jul 2005 17:27:30 +0900
 
 glibc (2.3.5-2) experimental; urgency=low

Modified: glibc-package/trunk/debian/patches/00list
===================================================================
--- glibc-package/trunk/debian/patches/00list	2005-07-17 15:36:05 UTC (rev 955)
+++ glibc-package/trunk/debian/patches/00list	2005-07-17 16:06:06 UTC (rev 956)
@@ -54,8 +54,8 @@
 hppa-drop-utimes
 glibc234-alpha-xstat
 glibc234-hppa-linesep
-glibc234-hppa-full-nptl-2004-12-20
-#50_glibc232-hppa-full-nptl-2003-10-22	# g: lock initializer part is not applied, cheated by glibc234-hppa-remove-mallocdef.dpatch.
+#glibc234-hppa-full-nptl-2004-12-20	# g: they both two hppa-full-nptl patches
+#50_glibc232-hppa-full-nptl-2003-10-22	# g: will be dropped, waiting unstable works.
 glibc234-hppa-remove-mallocdef
 linuxthreads-sizefix
 glibc232-tls-crashfix
@@ -74,3 +74,5 @@
 glibc235-gcc4-mips-sysdeps
 hurd-libpthread-indirect-loading
 hurd-ioctl-pfinet
+glibc235-hppa-lt
+glibc235-hppa-sysdeps

Added: glibc-package/trunk/debian/patches/glibc235-hppa-lt.dpatch
===================================================================
--- glibc-package/trunk/debian/patches/glibc235-hppa-lt.dpatch	2005-07-17 15:36:05 UTC (rev 955)
+++ glibc-package/trunk/debian/patches/glibc235-hppa-lt.dpatch	2005-07-17 16:06:06 UTC (rev 956)
@@ -0,0 +1,924 @@
+#! /bin/sh -e
+
+# All lines beginning with `# DP:' are a description of the patch.
+# DP: Description: Make glibc-2.3.5 compile to enable hppa linuxthreads
+#		   correctly again.
+# DP: Related bugs: 
+# DP: Dpatch author: Jeff Bailey <jbailey@ubuntu.com>
+# DP: Patch author: Carlos O'Donell
+# DP: Upstream status: Pending
+# DP: Status Details: 
+# DP: Date: 2005-07-17
+
+PATCHLEVEL=1
+
+if [ $# -ne 2 ]; then
+    echo >&2 "`basename $0`: script expects -patch|-unpatch as argument"
+    exit 1
+fi
+case "$1" in
+    -patch) patch -d "$2" -f --no-backup-if-mismatch -p$PATCHLEVEL < $0;;
+    -unpatch) patch -d "$2" -f --no-backup-if-mismatch -R -p$PATCHLEVEL < $0;;
+    *)
+	echo >&2 "`basename $0`: script expects -patch|-unpatch as argument"
+	exit 1
+esac
+exit 0
+
+# append the patch here and adjust the -p? flag in the patch calls.
+--- libc-orig/linuxthreads/descr.h	2005-02-16 10:14:12.000000000 -0500
++++ libc/linuxthreads/descr.h	2005-02-16 10:11:09.000000000 -0500
+@@ -71,7 +71,7 @@
+ /* Atomic counter made possible by compare_and_swap */
+ struct pthread_atomic {
+   long p_count;
+-  int p_spinlock;
++  __atomic_lock_t p_spinlock;
+ };
+ 
+ 
+--- libc-orig/linuxthreads/oldsemaphore.c	2004-04-25 23:01:18.000000000 -0400
++++ libc/linuxthreads/oldsemaphore.c	2004-04-25 22:51:35.000000000 -0400
+@@ -31,7 +31,7 @@
+ 
+ typedef struct {
+     long int sem_status;
+-    int sem_spinlock;
++    __atomic_lock_t sem_spinlock;
+ } old_sem_t;
+ 
+ extern int __old_sem_init (old_sem_t *__sem, int __pshared, unsigned int __value);
+--- libc-orig/linuxthreads/pt-machine.c	2002-08-26 18:39:45.000000000 -0400
++++ libc/linuxthreads/pt-machine.c	2003-12-08 21:24:59.000000000 -0500
+@@ -19,7 +19,9 @@
+ 
+ #define PT_EI
+ 
+-extern long int testandset (int *spinlock);
++#include <pthread.h>
++
++extern long int testandset (__atomic_lock_t *spinlock);
+ extern int __compare_and_swap (long int *p, long int oldval, long int newval);
+ 
+ #include <pt-machine.h>
+--- libc-orig/linuxthreads/pthread.c	2005-01-28 14:39:43.000000000 -0500
++++ libc/linuxthreads/pthread.c	2005-01-28 14:15:29.000000000 -0500
+@@ -301,7 +301,7 @@
+   pthread_descr self;
+ 
+   /* First of all init __pthread_handles[0] and [1] if needed.  */
+-# if __LT_SPINLOCK_INIT != 0
++# ifdef __LT_INITIALIZER_NOT_ZERO
+   __pthread_handles[0].h_lock = __LOCK_INITIALIZER;
+   __pthread_handles[1].h_lock = __LOCK_INITIALIZER;
+ # endif
+@@ -371,7 +371,7 @@
+ # endif
+   /* self->p_start_args need not be initialized, it's all zero.  */
+   self->p_userstack = 1;
+-# if __LT_SPINLOCK_INIT != 0
++# ifdef __LT_INITIALIZER_NOT_ZERO 
+   self->p_resume_count = (struct pthread_atomic) __ATOMIC_INITIALIZER;
+ # endif
+   self->p_alloca_cutoff = __MAX_ALLOCA_CUTOFF;
+@@ -385,7 +385,7 @@
+ #else  /* USE_TLS */
+ 
+   /* First of all init __pthread_handles[0] and [1].  */
+-# if __LT_SPINLOCK_INIT != 0
++# ifdef __LT_INITIALIZER_NOT_ZERO
+   __pthread_handles[0].h_lock = __LOCK_INITIALIZER;
+   __pthread_handles[1].h_lock = __LOCK_INITIALIZER;
+ # endif
+@@ -688,7 +688,7 @@
+ # endif
+   mgr->p_start_args = (struct pthread_start_args) PTHREAD_START_ARGS_INITIALIZER(__pthread_manager);
+   mgr->p_nr = 1;
+-# if __LT_SPINLOCK_INIT != 0
++# ifdef __LT_INITIALIZER_NOT_ZERO
+   self->p_resume_count = (struct pthread_atomic) __ATOMIC_INITIALIZER;
+ # endif
+   mgr->p_alloca_cutoff = PTHREAD_STACK_MIN / 4;
+--- libc-orig/linuxthreads/spinlock.c	2004-01-30 14:13:45.000000000 -0500
++++ libc/linuxthreads/spinlock.c	2004-01-30 10:47:31.000000000 -0500
+@@ -24,9 +24,9 @@
+ #include "spinlock.h"
+ #include "restart.h"
+ 
+-static void __pthread_acquire(int * spinlock);
++static void __pthread_acquire(__atomic_lock_t * spinlock);
+ 
+-static inline void __pthread_release(int * spinlock)
++static inline void __pthread_release(__atomic_lock_t * spinlock)
+ {
+   WRITE_MEMORY_BARRIER();
+   *spinlock = __LT_SPINLOCK_INIT;
+@@ -269,11 +269,11 @@
+ struct wait_node {
+   struct wait_node *next;	/* Next node in null terminated linked list */
+   pthread_descr thr;		/* The thread waiting with this node */
+-  int abandoned;		/* Atomic flag */
++  __atomic_lock_t abandoned;	/* Atomic flag */
+ };
+ 
+ static long wait_node_free_list;
+-static int wait_node_free_list_spinlock;
++__pthread_lock_define_initialized(static, wait_node_free_list_spinlock);
+ 
+ /* Allocate a new node from the head of the free list using an atomic
+    operation, or else using malloc if that list is empty.  A fundamental
+@@ -376,7 +376,7 @@
+       if (self == NULL)
+ 	self = thread_self();
+ 
+-      wait_node.abandoned = 0;
++      wait_node.abandoned = __LT_SPINLOCK_INIT;
+       wait_node.next = (struct wait_node *) lock->__status;
+       wait_node.thr = self;
+       lock->__status = (long) &wait_node;
+@@ -402,7 +402,7 @@
+       wait_node.thr = self;
+       newstatus = (long) &wait_node;
+     }
+-    wait_node.abandoned = 0;
++    wait_node.abandoned = __LT_SPINLOCK_INIT;
+     wait_node.next = (struct wait_node *) oldstatus;
+     /* Make sure the store in wait_node.next completes before performing
+        the compare-and-swap */
+@@ -451,7 +451,7 @@
+       if (self == NULL)
+ 	self = thread_self();
+ 
+-      p_wait_node->abandoned = 0;
++      p_wait_node->abandoned = __LT_SPINLOCK_INIT;
+       p_wait_node->next = (struct wait_node *) lock->__status;
+       p_wait_node->thr = self;
+       lock->__status = (long) p_wait_node;
+@@ -474,7 +474,7 @@
+       p_wait_node->thr = self;
+       newstatus = (long) p_wait_node;
+     }
+-    p_wait_node->abandoned = 0;
++    p_wait_node->abandoned = __LT_SPINLOCK_INIT;
+     p_wait_node->next = (struct wait_node *) oldstatus;
+     /* Make sure the store in wait_node.next completes before performing
+        the compare-and-swap */
+@@ -574,7 +574,7 @@
+     while (p_node != (struct wait_node *) 1) {
+       int prio;
+ 
+-      if (p_node->abandoned) {
++      if (lock_held(&p_node->abandoned)) {
+ 	/* Remove abandoned node. */
+ #if defined TEST_FOR_COMPARE_AND_SWAP
+ 	if (!__pthread_has_cas)
+@@ -662,7 +662,7 @@
+ #if !defined HAS_COMPARE_AND_SWAP || defined TEST_FOR_COMPARE_AND_SWAP
+ 
+ int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
+-                               int * spinlock)
++                               __atomic_lock_t * spinlock)
+ {
+   int res;
+ 
+@@ -699,7 +699,7 @@
+    - When nanosleep() returns, we try again, doing MAX_SPIN_COUNT
+      sched_yield(), then sleeping again if needed. */
+ 
+-static void __pthread_acquire(int * spinlock)
++static void __pthread_acquire(__atomic_lock_t * spinlock)	
+ {
+   int cnt = 0;
+   struct timespec tm;
+--- libc-orig/linuxthreads/spinlock.h	2003-07-31 15:16:04.000000000 -0400
++++ libc/linuxthreads/spinlock.h	2003-12-08 21:24:59.000000000 -0500
+@@ -33,14 +33,28 @@
+ #endif
+ #endif
+ 
++/* Define lock_held for all arches that don't need a modified copy. */
++#ifndef __LT_INITIALIZER_NOT_ZERO
++# define lock_held(p) *(p)
++#endif
++
++/* Initliazers for possibly complex structures */
++#ifdef __LT_INITIALIZER_NOT_ZERO
++# define __pthread_lock_define_initialized(CLASS,NAME) \
++	CLASS __atomic_lock_t NAME = __LT_SPINLOCK_ALT_INIT
++#else
++# define __pthread_lock_define_initialized(CLASS,NAME) \
++	CLASS __atomic_lock_t NAME
++#endif
++
+ #if defined(TEST_FOR_COMPARE_AND_SWAP)
+ 
+ extern int __pthread_has_cas;
+ extern int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
+-                                      int * spinlock);
++                                      __atomic_lock_t * spinlock);
+ 
+ static inline int compare_and_swap(long * ptr, long oldval, long newval,
+-                                   int * spinlock)
++                                   __atomic_lock_t * spinlock)
+ {
+   if (__builtin_expect (__pthread_has_cas, 1))
+     return __compare_and_swap(ptr, oldval, newval);
+@@ -58,7 +72,7 @@
+ 
+ static inline int
+ compare_and_swap_with_release_semantics (long * ptr, long oldval,
+-					 long newval, int * spinlock)
++					 long newval, __atomic_lock_t * spinlock)
+ {
+   return __compare_and_swap_with_release_semantics (ptr, oldval,
+ 						    newval);
+@@ -67,7 +81,7 @@
+ #endif
+ 
+ static inline int compare_and_swap(long * ptr, long oldval, long newval,
+-                                   int * spinlock)
++                                   __atomic_lock_t * spinlock)
+ {
+   return __compare_and_swap(ptr, oldval, newval);
+ }
+@@ -75,10 +89,10 @@
+ #else
+ 
+ extern int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
+-                                      int * spinlock);
++                                      __atomic_lock_t * spinlock);
+ 
+ static inline int compare_and_swap(long * ptr, long oldval, long newval,
+-                                   int * spinlock)
++                                   __atomic_lock_t * spinlock)
+ {
+   return __pthread_compare_and_swap(ptr, oldval, newval, spinlock);
+ }
+--- libc-orig/linuxthreads/sysdeps/hppa/pspinlock.c	2002-08-26 18:39:51.000000000 -0400
++++ libc/linuxthreads/sysdeps/hppa/pspinlock.c	2004-08-15 14:22:02.000000000 -0400
+@@ -24,13 +24,10 @@
+ int
+ __pthread_spin_lock (pthread_spinlock_t *lock)
+ {
+-  unsigned int val;
++  volatile unsigned int *addr = __ldcw_align (lock);
+ 
+-  do
+-    asm volatile ("ldcw %1,%0"
+-		  : "=r" (val), "=m" (*lock)
+-		  : "m" (*lock));
+-  while (!val);
++  while (__ldcw (addr) == 0)
++    while (*addr == 0) ;
+ 
+   return 0;
+ }
+@@ -40,13 +37,9 @@
+ int
+ __pthread_spin_trylock (pthread_spinlock_t *lock)
+ {
+-  unsigned int val;
++  volatile unsigned int *a = __ldcw_align (lock);
+ 
+-  asm volatile ("ldcw %1,%0"
+-		: "=r" (val), "=m" (*lock)
+-		: "m" (*lock));
+-
+-  return val ? 0 : EBUSY;
++  return __ldcw (a) ? 0 : EBUSY;
+ }
+ weak_alias (__pthread_spin_trylock, pthread_spin_trylock)
+ 
+@@ -54,7 +47,11 @@
+ int
+ __pthread_spin_unlock (pthread_spinlock_t *lock)
+ {
+-  *lock = 1;
++  volatile unsigned int *a = __ldcw_align (lock);
++  int tmp = 1;
++  /* This should be a memory barrier to newer compilers */
++  __asm__ __volatile__ ("stw,ma %1,0(%0)"
++                        : : "r" (a), "r" (tmp) : "memory");           
+   return 0;
+ }
+ weak_alias (__pthread_spin_unlock, pthread_spin_unlock)
+@@ -66,7 +63,11 @@
+   /* We can ignore the `pshared' parameter.  Since we are busy-waiting
+      all processes which can access the memory location `lock' points
+      to can use the spinlock.  */
+-  *lock = 1;
++  volatile unsigned int *a = __ldcw_align (lock);
++  int tmp = 1;
++  /* This should be a memory barrier to newer compilers */
++  __asm__ __volatile__ ("stw,ma %1,0(%0)"
++                        : : "r" (a), "r" (tmp) : "memory");           
+   return 0;
+ }
+ weak_alias (__pthread_spin_init, pthread_spin_init)
+--- libc-orig/linuxthreads/sysdeps/hppa/pt-machine.h	2003-07-31 15:15:42.000000000 -0400
++++ libc/linuxthreads/sysdeps/hppa/pt-machine.h	2004-08-23 14:39:23.000000000 -0400
+@@ -22,41 +22,103 @@
+ #ifndef _PT_MACHINE_H
+ #define _PT_MACHINE_H   1
+ 
++#include <sys/types.h>
+ #include <bits/initspin.h>
+ 
+ #ifndef PT_EI
+ # define PT_EI extern inline __attribute__ ((always_inline))
+ #endif
+ 
+-extern long int testandset (int *spinlock);
+-extern int __compare_and_swap (long int *p, long int oldval, long int newval);
++extern inline long int testandset (__atomic_lock_t *spinlock);
++extern inline int __compare_and_swap (long int *p, long int oldval, long int newval);
++extern inline int lock_held (__atomic_lock_t *spinlock); 
++extern inline int __load_and_clear (__atomic_lock_t *spinlock);
+ 
+ /* Get some notion of the current stack.  Need not be exactly the top
+    of the stack, just something somewhere in the current frame.  */
+ #define CURRENT_STACK_FRAME  stack_pointer
+ register char * stack_pointer __asm__ ("%r30");
+ 
++/* Get/Set thread-specific pointer.  We have to call into the kernel to
++ * modify it, but we can read it in user mode.  */
++
++#define THREAD_SELF __get_cr27()
++
++static inline struct _pthread_descr_struct * __get_cr27(void)
++{
++	long cr27;
++	asm("mfctl %%cr27, %0" : "=r" (cr27) : );
++	return (struct _pthread_descr_struct *) cr27;
++}
++
++#define INIT_THREAD_SELF(descr, nr) __set_cr27(descr)
++
++static inline void __set_cr27(struct _pthread_descr_struct * cr27)
++{
++	asm(
++		"ble	0xe0(%%sr2, %%r0)\n\t"
++		"copy	%0, %%r26"
++	 : : "r" (cr27) : "r26" );
++}
++
++/* We want the OS to assign stack addresses.  */
++#define FLOATING_STACKS	1
++#define ARCH_STACK_MAX_SIZE	8*1024*1024
+ 
+ /* The hppa only has one atomic read and modify memory operation,
+    load and clear, so hppa spinlocks must use zero to signify that
+-   someone is holding the lock.  */
++   someone is holding the lock.  The address used for the ldcw
++   semaphore must be 16-byte aligned.  */
++#define __ldcw(a) ({ \
++  unsigned int __ret;							\
++  __asm__ __volatile__("ldcw 0(%1),%0"					\
++                      : "=r" (__ret) : "r" (a) : "memory");		\
++  __ret;								\
++})
++
++/* Strongly ordered lock reset */
++#define __lock_reset(lock_addr, tmp) ({						\
++	__asm__ __volatile__ ("stw,ma %1,0(%0)"					\
++				: : "r" (lock_addr), "r" (tmp) : "memory"); 	\
++    })
++
++/* Because malloc only guarantees 8-byte alignment for malloc'd data,
++   and GCC only guarantees 8-byte alignment for stack locals, we can't
++   be assured of 16-byte alignment for atomic lock data even if we
++   specify "__attribute ((aligned(16)))" in the type declaration.  So,
++   we use a struct containing an array of four ints for the atomic lock
++   type and dynamically select the 16-byte aligned int from the array
++   for the semaphore.  */
++#define __PA_LDCW_ALIGNMENT 16
++#define __ldcw_align(a) ({ \
++  volatile unsigned int __ret = (unsigned int) a;			\
++  if ((__ret & ~(__PA_LDCW_ALIGNMENT - 1)) < (unsigned int) a)		\
++    __ret = (__ret & ~(__PA_LDCW_ALIGNMENT - 1)) + __PA_LDCW_ALIGNMENT; \
++  (unsigned int *) __ret;						\
++})
+ 
+-#define xstr(s) str(s)
+-#define str(s) #s
+ /* Spinlock implementation; required.  */
+-PT_EI long int
+-testandset (int *spinlock)
++PT_EI int
++__load_and_clear (__atomic_lock_t *spinlock)
+ {
+-  int ret;
++  volatile unsigned int *a = __ldcw_align (spinlock);
+ 
+-  __asm__ __volatile__(
+-       "ldcw 0(%2),%0"
+-       : "=r"(ret), "=m"(*spinlock)
+-       : "r"(spinlock));
++  return __ldcw (a);
++}
+ 
+-  return ret == 0;
++/* Emulate testandset */
++PT_EI long int
++testandset (__atomic_lock_t *spinlock)
++{
++  return (__load_and_clear(spinlock) == 0);
+ }
+-#undef str
+-#undef xstr
+ 
++PT_EI int
++lock_held (__atomic_lock_t *spinlock)
++{
++  volatile unsigned int *a = __ldcw_align (spinlock);
++
++  return *a == 0;
++}
++		
+ #endif /* pt-machine.h */
+--- libc-orig/linuxthreads/sysdeps/pthread/bits/initspin.h	2002-08-26 18:39:44.000000000 -0400
++++ libc/linuxthreads/sysdeps/pthread/bits/initspin.h	2004-02-23 09:36:18.000000000 -0500
+@@ -23,6 +23,7 @@
+ #define __LT_SPINLOCK_INIT 0
+ 
+ /* Macros for lock initializers, using the above definition. */
+-#define __LOCK_INITIALIZER { 0, __LT_SPINLOCK_INIT }
++#define __LOCK_INITIALIZER ((struct _pthread_fastlock){ 0, __LT_SPINLOCK_INIT })
++#define __LOCK_ALT_INITIALIZER { 0, __LT_SPINLOCK_INIT }
+ #define __ALT_LOCK_INITIALIZER { 0, __LT_SPINLOCK_INIT }
+ #define __ATOMIC_INITIALIZER { 0, __LT_SPINLOCK_INIT }
+--- libc-orig/linuxthreads/sysdeps/pthread/bits/libc-lock.h	2003-09-23 00:33:20.000000000 -0400
++++ libc/linuxthreads/sysdeps/pthread/bits/libc-lock.h	2003-12-08 21:25:00.000000000 -0500
+@@ -71,12 +71,12 @@
+    initialized locks must be set to one due to the lack of normal
+    atomic operations.) */
+ 
+-#if __LT_SPINLOCK_INIT == 0
++#ifdef __LT_INITIALIZER_NOT_ZERO
+ #  define __libc_lock_define_initialized(CLASS,NAME) \
+-  CLASS __libc_lock_t NAME;
++  CLASS __libc_lock_t NAME = PTHREAD_MUTEX_INITIALIZER;
+ #else
+ #  define __libc_lock_define_initialized(CLASS,NAME) \
+-  CLASS __libc_lock_t NAME = PTHREAD_MUTEX_INITIALIZER;
++  CLASS __libc_lock_t NAME;
+ #endif
+ 
+ #define __libc_rwlock_define_initialized(CLASS,NAME) \
+--- libc-orig/linuxthreads/sysdeps/pthread/bits/pthreadtypes.h	2004-09-17 12:24:47.000000000 -0400
++++ libc/linuxthreads/sysdeps/pthread/bits/pthreadtypes.h	2004-09-17 12:24:19.000000000 -0400
+@@ -22,12 +22,14 @@
+ #define __need_schedparam
+ #include <bits/sched.h>
+ 
++typedef int __atomic_lock_t;
++
+ /* Fast locks (not abstract because mutexes and conditions aren't abstract). */
+ struct _pthread_fastlock
+ {
+-  long int __status;   /* "Free" or "taken" or head of waiting list */
+-  int __spinlock;      /* Used by compare_and_swap emulation. Also,
+-			  adaptive SMP lock stores spin count here. */
++  long int __status;		/* "Free" or "taken" or head of waiting list */
++  __atomic_lock_t __spinlock;	/* Used by compare_and_swap emulation. Also,
++				   adaptive SMP lock stores spin count here. */
+ };
+ 
+ #ifndef _PTHREAD_DESCR_DEFINED
+--- libc-orig/linuxthreads/sysdeps/pthread/pthread.h	2004-09-21 17:55:20.000000000 -0400
++++ libc/linuxthreads/sysdeps/pthread/pthread.h	2004-09-21 17:55:03.000000000 -0400
+@@ -31,26 +31,26 @@
+ /* Initializers.  */
+ 
+ #define PTHREAD_MUTEX_INITIALIZER \
+-  {0, 0, 0, PTHREAD_MUTEX_TIMED_NP, __LOCK_INITIALIZER}
++  {0, 0, 0, PTHREAD_MUTEX_TIMED_NP, __LOCK_ALT_INITIALIZER}
+ #ifdef __USE_GNU
+ # define PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP \
+-  {0, 0, 0, PTHREAD_MUTEX_RECURSIVE_NP, __LOCK_INITIALIZER}
++  {0, 0, 0, PTHREAD_MUTEX_RECURSIVE_NP, __LOCK_ALT_INITIALIZER}
+ # define PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP \
+-  {0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, __LOCK_INITIALIZER}
++  {0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, __LOCK_ALT_INITIALIZER}
+ # define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP \
+-  {0, 0, 0, PTHREAD_MUTEX_ADAPTIVE_NP, __LOCK_INITIALIZER}
++  {0, 0, 0, PTHREAD_MUTEX_ADAPTIVE_NP, __LOCK_ALT_INITIALIZER}
+ #endif
+ 
+-#define PTHREAD_COND_INITIALIZER {__LOCK_INITIALIZER, 0, "", 0}
++#define PTHREAD_COND_INITIALIZER {__LOCK_ALT_INITIALIZER, 0, "", 0}
+ 
+ #if defined __USE_UNIX98 || defined __USE_XOPEN2K
+ # define PTHREAD_RWLOCK_INITIALIZER \
+-  { __LOCK_INITIALIZER, 0, NULL, NULL, NULL,				      \
++  { __LOCK_ALT_INITIALIZER, 0, NULL, NULL, NULL,			      \
+     PTHREAD_RWLOCK_DEFAULT_NP, PTHREAD_PROCESS_PRIVATE }
+ #endif
+ #ifdef __USE_GNU
+ # define PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP \
+-  { __LOCK_INITIALIZER, 0, NULL, NULL, NULL,				      \
++  { __LOCK_ALT_INITIALIZER, 0, NULL, NULL, NULL,			      \
+     PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP, PTHREAD_PROCESS_PRIVATE }
+ #endif
+ 
+--- libc-orig/linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/initspin.h	2002-08-26 18:39:55.000000000 -0400
++++ libc/linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/initspin.h	2004-02-23 09:35:37.000000000 -0500
+@@ -19,9 +19,23 @@
+ 
+ /* Initial value of a spinlock.  PA-RISC only implements atomic load
+    and clear so this must be non-zero. */
+-#define __LT_SPINLOCK_INIT 1
++#define __LT_SPINLOCK_INIT ((__atomic_lock_t) { { 1, 1, 1, 1 } })
++
++/* Initialize global spinlocks without cast, generally macro wrapped */
++#define __LT_SPINLOCK_ALT_INIT { { 1, 1, 1, 1 } }
++
++/* Macros for lock initializers, not using the above definition.
++   The above definition is not used in the case that static initializers
++   use this value. */
++#define __LOCK_ALT_INITIALIZER { __LT_SPINLOCK_ALT_INIT, 0 }
++
++/* Used to initialize _pthread_fastlock's in non-static case */
++#define __LOCK_INITIALIZER ((struct _pthread_fastlock){ __LT_SPINLOCK_INIT, 0 })
++
++/* Used in pthread_atomic initialization */
++#define __ATOMIC_INITIALIZER { 0, __LT_SPINLOCK_ALT_INIT }
++
++/* Tell the rest of the code that the initializer is non-zero without
++   explaining it's internal structure */
++#define __LT_INITIALIZER_NOT_ZERO
+ 
+-/* Macros for lock initializers, using the above definition. */
+-#define __LOCK_INITIALIZER { 0, __LT_SPINLOCK_INIT }
+-#define __ALT_LOCK_INITIALIZER { 0, __LT_SPINLOCK_INIT }
+-#define __ATOMIC_INITIALIZER { 0, __LT_SPINLOCK_INIT }
+--- libc-orig/linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/pthreadtypes.h	1969-12-31 19:00:00.000000000 -0500
++++ libc/linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/pthreadtypes.h	2003-12-08 21:25:00.000000000 -0500
+@@ -0,0 +1,160 @@
++/* Linuxthreads - a simple clone()-based implementation of Posix        */
++/* threads for Linux.                                                   */
++/* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr)              */
++/*                                                                      */
++/* This program is free software; you can redistribute it and/or        */
++/* modify it under the terms of the GNU Library General Public License  */
++/* as published by the Free Software Foundation; either version 2       */
++/* of the License, or (at your option) any later version.               */
++/*                                                                      */
++/* This program is distributed in the hope that it will be useful,      */
++/* but WITHOUT ANY WARRANTY; without even the implied warranty of       */
++/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the        */
++/* GNU Library General Public License for more details.                 */
++
++#if !defined _BITS_TYPES_H && !defined _PTHREAD_H
++# error "Never include <bits/pthreadtypes.h> directly; use <sys/types.h> instead."
++#endif
++
++#ifndef _BITS_PTHREADTYPES_H
++#define _BITS_PTHREADTYPES_H	1
++
++#define __need_schedparam
++#include <bits/sched.h>
++
++/* We need 128-bit alignment for the ldcw semaphore.  At most, we are
++   assured of 64-bit alignment for stack locals and malloc'd data.  Thus,
++   we use a struct with four ints for the atomic lock type.  The locking
++   code will figure out which of the four to use for the ldcw semaphore.  */
++typedef volatile struct {
++  int lock[4];
++} __attribute__ ((aligned(16))) __atomic_lock_t;
++
++/* Fast locks (not abstract because mutexes and conditions aren't abstract). */
++struct _pthread_fastlock
++{
++  __atomic_lock_t __spinlock;	/* Used by compare_and_swap emulation.  Also,
++				   adaptive SMP lock stores spin count here. */
++  long int __status;		/* "Free" or "taken" or head of waiting list */
++};
++
++#ifndef _PTHREAD_DESCR_DEFINED
++/* Thread descriptors */
++typedef struct _pthread_descr_struct *_pthread_descr;
++# define _PTHREAD_DESCR_DEFINED
++#endif
++
++
++/* Attributes for threads.  */
++typedef struct __pthread_attr_s
++{
++  int __detachstate;
++  int __schedpolicy;
++  struct __sched_param __schedparam;
++  int __inheritsched;
++  int __scope;
++  size_t __guardsize;
++  int __stackaddr_set;
++  void *__stackaddr;
++  size_t __stacksize;
++} pthread_attr_t;
++
++
++/* Conditions (not abstract because of PTHREAD_COND_INITIALIZER */
++
++#ifdef __GLIBC_HAVE_LONG_LONG
++__extension__ typedef long long __pthread_cond_align_t;
++#else
++typedef long __pthread_cond_align_t;
++#endif
++
++typedef struct
++{
++  struct _pthread_fastlock __c_lock; /* Protect against concurrent access */
++  _pthread_descr __c_waiting;        /* Threads waiting on this condition */
++  char __padding[48 - sizeof (struct _pthread_fastlock)
++		 - sizeof (_pthread_descr) - sizeof (__pthread_cond_align_t)];
++  __pthread_cond_align_t __align;
++} pthread_cond_t;
++
++
++/* Attribute for conditionally variables.  */
++typedef struct
++{
++  int __dummy;
++} pthread_condattr_t;
++
++/* Keys for thread-specific data */
++typedef unsigned int pthread_key_t;
++
++
++/* Mutexes (not abstract because of PTHREAD_MUTEX_INITIALIZER).  */
++/* (The layout is unnatural to maintain binary compatibility
++    with earlier releases of LinuxThreads.) */
++typedef struct
++{
++  int __m_reserved;               /* Reserved for future use */
++  int __m_count;                  /* Depth of recursive locking */
++  _pthread_descr __m_owner;       /* Owner thread (if recursive or errcheck) */
++  int __m_kind;                   /* Mutex kind: fast, recursive or errcheck */
++  struct _pthread_fastlock __m_lock; /* Underlying fast lock */
++} pthread_mutex_t;
++
++
++/* Attribute for mutex.  */
++typedef struct
++{
++  int __mutexkind;
++} pthread_mutexattr_t;
++
++
++/* Once-only execution */
++typedef int pthread_once_t;
++
++
++#ifdef __USE_UNIX98
++/* Read-write locks.  */
++typedef struct _pthread_rwlock_t
++{
++  struct _pthread_fastlock __rw_lock; /* Lock to guarantee mutual exclusion */
++  int __rw_readers;                   /* Number of readers */
++  _pthread_descr __rw_writer;         /* Identity of writer, or NULL if none */
++  _pthread_descr __rw_read_waiting;   /* Threads waiting for reading */
++  _pthread_descr __rw_write_waiting;  /* Threads waiting for writing */
++  int __rw_kind;                      /* Reader/Writer preference selection */
++  int __rw_pshared;                   /* Shared between processes or not */
++} pthread_rwlock_t;
++
++
++/* Attribute for read-write locks.  */
++typedef struct
++{
++  int __lockkind;
++  int __pshared;
++} pthread_rwlockattr_t;
++#endif
++
++#ifdef __USE_XOPEN2K
++/* POSIX spinlock data type.  */
++typedef __atomic_lock_t pthread_spinlock_t;
++
++/* POSIX barrier. */
++typedef struct {
++  struct _pthread_fastlock __ba_lock; /* Lock to guarantee mutual exclusion */
++  int __ba_required;                  /* Threads needed for completion */
++  int __ba_present;                   /* Threads waiting */
++  _pthread_descr __ba_waiting;        /* Queue of waiting threads */
++} pthread_barrier_t;
++
++/* barrier attribute */
++typedef struct {
++  int __pshared;
++} pthread_barrierattr_t;
++
++#endif
++
++
++/* Thread identifiers */
++typedef unsigned long int pthread_t;
++
++#endif	/* bits/pthreadtypes.h */
+--- libc-orig/linuxthreads/sysdeps/unix/sysv/linux/hppa/sysdep-cancel.h	2003-10-10 21:28:08.000000000 -0400
++++ libc/linuxthreads/sysdeps/unix/sysv/linux/hppa/sysdep-cancel.h	2004-09-22 20:05:32.000000000 -0400
+@@ -29,61 +29,109 @@
+ #  define NO_ERROR -0x1000
+ # endif
+ 
++/* The syscall cancellation mechanism requires userspace
++   assistance, the following code does roughly this:
++
++   	do arguments (read arg5 and arg6 to registers)
++	setup frame
++	
++	check if there are threads, yes jump to pseudo_cancel
++	
++	unthreaded:
++		syscall
++		check syscall return (jump to pre_end)
++		set errno
++		set return to -1
++		(jump to pre_end)
++		
++	pseudo_cancel:
++		cenable
++		syscall
++		cdisable
++		check syscall return (jump to pre_end)
++		set errno
++		set return to -1
++		
++	pre_end
++		restore stack
++	
++	It is expected that 'ret' and 'END' macros will
++	append an 'undo arguments' and 'return' to the 
++	this PSEUDO macro. */
++   
+ # undef PSEUDO
+ # define PSEUDO(name, syscall_name, args)				\
+-  ENTRY (name)								\
+-    SINGLE_THREAD_P					ASM_LINE_SEP	\
+-    cmpib,<> 0,%ret0,Lpseudo_cancel			ASM_LINE_SEP	\
+-    nop							ASM_LINE_SEP	\
+-    DO_CALL(syscall_name, args)				ASM_LINE_SEP	\
+-    /* DONE! */						ASM_LINE_SEP	\
+-    bv 0(2)						ASM_LINE_SEP	\
+-    nop							ASM_LINE_SEP	\
+-  Lpseudo_cancel:					ASM_LINE_SEP	\
+-    /* store return ptr */				ASM_LINE_SEP	\
+-    stw %rp, -20(%sr0,%sp)				ASM_LINE_SEP	\
+-    /* save syscall args */				ASM_LINE_SEP	\
+-    PUSHARGS_##args /* MACRO */				ASM_LINE_SEP	\
+-    STW_PIC						ASM_LINE_SEP	\
+-    CENABLE /* FUNC CALL */				ASM_LINE_SEP	\
+-    ldo 64(%sp), %sp					ASM_LINE_SEP	\
+-    ldo -64(%sp), %sp					ASM_LINE_SEP	\
+-    LDW_PIC						ASM_LINE_SEP	\
+-    /* restore syscall args */				ASM_LINE_SEP	\
+-    POPARGS_##args					ASM_LINE_SEP	\
+-    /* save r4 in arg0 stack slot */			ASM_LINE_SEP	\
+-    stw %r4, -36(%sr0,%sp)				ASM_LINE_SEP	\
+-    /* save mask from cenable */			ASM_LINE_SEP	\
+-    copy %ret0, %r4					ASM_LINE_SEP	\
+-    ble 0x100(%sr2,%r0)					ASM_LINE_SEP    \
+-    ldi SYS_ify (syscall_name), %r20			ASM_LINE_SEP	\
+-    LDW_PIC						ASM_LINE_SEP	\
+-    /* pass mask as arg0 to cdisable */			ASM_LINE_SEP	\
+-    copy %r4, %r26					ASM_LINE_SEP	\
+-    copy %ret0, %r4					ASM_LINE_SEP	\
+-    CDISABLE						ASM_LINE_SEP	\
+-    ldo 64(%sp), %sp					ASM_LINE_SEP	\
+-    ldo -64(%sp), %sp					ASM_LINE_SEP	\
+-    LDW_PIC						ASM_LINE_SEP	\
+-    /* compare error */					ASM_LINE_SEP	\
+-    ldi NO_ERROR,%r1					ASM_LINE_SEP	\
+-    /* branch if no error */				ASM_LINE_SEP	\
+-    cmpb,>>=,n %r1,%r4,Lpre_end				ASM_LINE_SEP	\
+-    nop							ASM_LINE_SEP	\
+-    SYSCALL_ERROR_HANDLER				ASM_LINE_SEP	\
+-    ldo 64(%sp), %sp					ASM_LINE_SEP	\
+-    ldo -64(%sp), %sp					ASM_LINE_SEP	\
+-    /* No need to LDW_PIC */				ASM_LINE_SEP	\
+-    /* make syscall res value positive */		ASM_LINE_SEP	\
+-    sub %r0, %r4, %r4					ASM_LINE_SEP	\
+-    /* store into errno location */			ASM_LINE_SEP	\
+-    stw %r4, 0(%sr0,%ret0)				ASM_LINE_SEP	\
+-    /* return -1 */					ASM_LINE_SEP	\
+-    ldo -1(%r0), %ret0					ASM_LINE_SEP	\
+-  Lpre_end:						ASM_LINE_SEP	\
+-    ldw -20(%sr0,%sp), %rp             			ASM_LINE_SEP	\
+-    /* No need to LDW_PIC */				ASM_LINE_SEP	\
+-    ldw -36(%sr0,%sp), %r4				ASM_LINE_SEP
++	ENTRY (name)							\
++	DOARGS_##args					ASM_LINE_SEP	\
++	copy TREG, %r1					ASM_LINE_SEP	\
++	copy %sp, TREG					ASM_LINE_SEP	\
++	stwm %r1, 64(%sp)				ASM_LINE_SEP	\
++	stw %rp, -20(%sp)				ASM_LINE_SEP	\
++	stw TREG, -4(%sp)				ASM_LINE_SEP	\
++	/* Done setting up frame, continue... */	ASM_LINE_SEP	\
++	SINGLE_THREAD_P					ASM_LINE_SEP	\
++	cmpib,<>,n 0,%ret0,L(pseudo_cancel)		ASM_LINE_SEP	\
++L(unthreaded):						ASM_LINE_SEP	\
++	/* Save r19 */					ASM_LINE_SEP	\
++	SAVE_PIC(TREG)					ASM_LINE_SEP	\
++	/* Do syscall, delay loads # */			ASM_LINE_SEP	\
++	ble  0x100(%sr2,%r0)				ASM_LINE_SEP	\
++	ldi SYS_ify (syscall_name), %r20 /* delay */	ASM_LINE_SEP	\
++	ldi NO_ERROR,%r1				ASM_LINE_SEP	\
++	cmpb,>>=,n %r1,%ret0,L(pre_end)			ASM_LINE_SEP	\
++	/* Restore r19 from TREG */			ASM_LINE_SEP	\
++	LOAD_PIC(TREG) /* delay */			ASM_LINE_SEP	\
++	SYSCALL_ERROR_HANDLER				ASM_LINE_SEP	\
++	/* Use TREG for temp storage */			ASM_LINE_SEP	\
++	copy %ret0, TREG /* delay */			ASM_LINE_SEP	\
++	/* OPTIMIZE: Don't reload r19 */		ASM_LINE_SEP	\
++	/* do a -1*syscall_ret0 */			ASM_LINE_SEP	\
++	sub %r0, TREG, TREG				ASM_LINE_SEP	\
++	/* Store into errno location */			ASM_LINE_SEP	\
++	stw TREG, 0(%sr0,%ret0)				ASM_LINE_SEP	\
++	b L(pre_end)					ASM_LINE_SEP	\
++	/* return -1 as error */			ASM_LINE_SEP	\
++	ldo -1(%r0), %ret0 /* delay */			ASM_LINE_SEP	\
++L(pseudo_cancel):					ASM_LINE_SEP	\
++	PUSHARGS_##args /* Save args */			ASM_LINE_SEP	\
++	/* Save r19 into TREG */			ASM_LINE_SEP	\
++	CENABLE /* FUNC CALL */				ASM_LINE_SEP	\
++	SAVE_PIC(TREG) /* delay */			ASM_LINE_SEP	\
++	/* restore syscall args */			ASM_LINE_SEP	\
++	POPARGS_##args					ASM_LINE_SEP	\
++	/* save mask from cenable (use stub rp slot) */	ASM_LINE_SEP	\
++	stw %ret0, -24(%sp)				ASM_LINE_SEP	\
++	/* ... SYSCALL ... */				ASM_LINE_SEP	\
++	ble 0x100(%sr2,%r0)				ASM_LINE_SEP    \
++	ldi SYS_ify (syscall_name), %r20 /* delay */	ASM_LINE_SEP	\
++	/* ............... */				ASM_LINE_SEP	\
++	LOAD_PIC(TREG)					ASM_LINE_SEP	\
++	/* pass mask as arg0 to cdisable */		ASM_LINE_SEP	\
++	ldw -24(%sp), %r26				ASM_LINE_SEP	\
++	CDISABLE					ASM_LINE_SEP	\
++	stw %ret0, -24(%sp) /* delay */			ASM_LINE_SEP	\
++	/* Restore syscall return (use arg regs) */	ASM_LINE_SEP	\
++	ldw -24(%sp), %r26				ASM_LINE_SEP	\
++	/* compare error */				ASM_LINE_SEP	\
++	ldi NO_ERROR,%r1				ASM_LINE_SEP	\
++	/* branch if no error */			ASM_LINE_SEP	\
++	cmpb,>>=,n %r1,%r26,L(pre_end)			ASM_LINE_SEP	\
++	LOAD_PIC(TREG)	/* cond. nullify */		ASM_LINE_SEP	\
++	copy %r26, TREG	/* save syscall return */	ASM_LINE_SEP	\
++	SYSCALL_ERROR_HANDLER				ASM_LINE_SEP	\
++	/* make syscall res value positive */		ASM_LINE_SEP	\
++	sub %r0, TREG, TREG	/* delay */		ASM_LINE_SEP	\
++	/* No need to LOAD_PIC */			ASM_LINE_SEP	\
++	/* store into errno location */			ASM_LINE_SEP	\
++	stw TREG, 0(%sr0,%ret0)				ASM_LINE_SEP	\
++	/* return -1 */					ASM_LINE_SEP	\
++	ldo -1(%r0), %ret0				ASM_LINE_SEP	\
++L(pre_end):						ASM_LINE_SEP	\
++	/* Restore rp before exit */			ASM_LINE_SEP	\
++	ldw -84(%sr0,%sp), %rp				ASM_LINE_SEP	\
++	/* Undo frame */				ASM_LINE_SEP	\
++	ldwm -64(%sp),TREG				ASM_LINE_SEP	\
++	/* No need to LOAD_PIC */			ASM_LINE_SEP
+ 
+ /* Save arguments into our frame */
+ # define PUSHARGS_0	/* nothing to do */
+@@ -91,8 +139,8 @@
+ # define PUSHARGS_2	PUSHARGS_1 stw %r25, -40(%sr0,%sp)	ASM_LINE_SEP
+ # define PUSHARGS_3	PUSHARGS_2 stw %r24, -44(%sr0,%sp)	ASM_LINE_SEP
+ # define PUSHARGS_4	PUSHARGS_3 stw %r23, -48(%sr0,%sp)	ASM_LINE_SEP
+-# define PUSHARGS_5	PUSHARGS_4 /* Args are on the stack... */
+-# define PUSHARGS_6	PUSHARGS_5
++# define PUSHARGS_5	PUSHARGS_4 stw %r22, -52(%sr0,%sp)	ASM_LINE_SEP 
++# define PUSHARGS_6	PUSHARGS_5 stw %r21, -56(%sr0,%sp)	ASM_LINE_SEP
+ 
+ /* Bring them back from the stack */
+ # define POPARGS_0	/* nothing to do */
+@@ -101,7 +149,7 @@
+ # define POPARGS_3	POPARGS_2 ldw -44(%sr0,%sp), %r24	ASM_LINE_SEP
+ # define POPARGS_4	POPARGS_3 ldw -48(%sr0,%sp), %r23	ASM_LINE_SEP
+ # define POPARGS_5	POPARGS_4 ldw -52(%sr0,%sp), %r22	ASM_LINE_SEP
+-# define POPARGS_6	POPARGS_5 ldw -54(%sr0,%sp), %r21	ASM_LINE_SEP
++# define POPARGS_6	POPARGS_5 ldw -56(%sr0,%sp), %r21	ASM_LINE_SEP
+ 
+ # ifdef IS_IN_libpthread
+ #  ifdef PIC
+@@ -163,10 +211,10 @@
+ /* This ALT version requires newer kernel support */
+ #  define SINGLE_THREAD_P_MFCTL						\
+ 	mfctl %cr27, %ret0					ASM_LINE_SEP	\
+-	cmpib,= NO_THREAD_CR27,%ret0,Lstp			ASM_LINE_SEP	\
++	cmpib,= NO_THREAD_CR27,%ret0,L(stp)			ASM_LINE_SEP	\
+ 	nop							ASM_LINE_SEP	\
+ 	ldw MULTIPLE_THREADS_OFFSET(%sr0,%ret0),%ret0		ASM_LINE_SEP	\
+- Lstp:								ASM_LINE_SEP
++L(stp):								ASM_LINE_SEP
+ #  ifdef PIC
+ /* Slower version uses GOT to get value of __local_multiple_threads */
+ #   define SINGLE_THREAD_P							\
+@@ -174,7 +222,7 @@
+ 	ldw RT%__local_multiple_threads(%sr0,%r1), %ret0	ASM_LINE_SEP	\
+ 	ldw 0(%sr0,%ret0), %ret0 				ASM_LINE_SEP
+ #  else
+-  /* Slow non-pic version using DP */
++/* Slow non-pic version using DP */
+ #   define SINGLE_THREAD_P								\
+ 	addil LR%__local_multiple_threads-$global$,%r27  		ASM_LINE_SEP	\
+ 	ldw RR%__local_multiple_threads-$global$(%sr0,%r1),%ret0	ASM_LINE_SEP

Added: glibc-package/trunk/debian/patches/glibc235-hppa-sysdeps.dpatch
===================================================================
--- glibc-package/trunk/debian/patches/glibc235-hppa-sysdeps.dpatch	2005-07-17 15:36:05 UTC (rev 955)
+++ glibc-package/trunk/debian/patches/glibc235-hppa-sysdeps.dpatch	2005-07-17 16:06:06 UTC (rev 956)
@@ -0,0 +1,951 @@
+#! /bin/sh -e
+
+# All lines beginning with `# DP:' are a description of the patch.
+# DP: Description: Make glibc-2.3.5 compile to enable hppa sysdeps
+#		   and linuxthreads correctly again.
+# DP: Related bugs: 
+# DP: Dpatch author: Jeff Bailey <jbailey@ubuntu.com>
+# DP: Patch author: Carlos O'Donell
+# DP: Upstream status: Pending
+# DP: Status Details: 
+# DP: Date: 2005-07-17
+
+PATCHLEVEL=1
+
+if [ $# -ne 2 ]; then
+    echo >&2 "`basename $0`: script expects -patch|-unpatch as argument"
+    exit 1
+fi
+case "$1" in
+    -patch) patch -d "$2" -f --no-backup-if-mismatch -p$PATCHLEVEL < $0;;
+    -unpatch) patch -d "$2" -f --no-backup-if-mismatch -R -p$PATCHLEVEL < $0;;
+    *)
+	echo >&2 "`basename $0`: script expects -patch|-unpatch as argument"
+	exit 1
+esac
+exit 0
+
+# append the patch here and adjust the -p? flag in the patch calls.
+--- libc-orig/sysdeps/generic/dl-sysdep.c	2004-12-14 15:30:41.000000000 -0500
++++ libc/sysdeps/generic/dl-sysdep.c	2004-12-14 15:30:29.000000000 -0500
+@@ -89,7 +89,7 @@
+ #else
+   uid_t uid = 0;
+   gid_t gid = 0;
+-  unsigned int seen = 0;
++  int seen = 0;
+ # define set_seen_secure() (seen = -1)
+ # ifdef HAVE_AUX_XID
+ #  define set_seen(tag) (tag)	/* Evaluate for the side effects.  */
+--- libc-orig/sysdeps/hppa/atomicity.h	1969-12-31 19:00:00.000000000 -0500
++++ libc/sysdeps/hppa/atomicity.h	2003-12-08 21:25:00.000000000 -0500
+@@ -0,0 +1,55 @@
++/* Low-level functions for atomic operations.  HP-PARISC version.
++   Copyright (C) 1997,2001 Free Software Foundation, Inc.
++   This file is part of the GNU C Library.
++
++   The GNU C Library is free software; you can redistribute it and/or
++   modify it under the terms of the GNU Lesser General Public
++   License as published by the Free Software Foundation; either
++   version 2.1 of the License, or (at your option) any later version.
++
++   The GNU C Library is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++   Lesser General Public License for more details.
++
++   You should have received a copy of the GNU Lesser General Public
++   License along with the GNU C Library; if not, write to the Free
++   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
++   02111-1307 USA.  */
++
++#ifndef _ATOMICITY_H
++#define _ATOMICITY_H	1
++
++#include <inttypes.h>
++
++#warning stub atomicity functions are not atomic
++#warning CAO This will get implemented soon
++
++static inline int
++__attribute__ ((unused))
++exchange_and_add (volatile uint32_t *mem, int val)
++{
++  int result = *mem;
++  *mem += val;
++  return result;
++}
++
++static inline void
++__attribute__ ((unused))
++atomic_add (volatile uint32_t *mem, int val)
++{
++  *mem += val;
++}
++
++static inline int
++__attribute__ ((unused))
++compare_and_swap (volatile long int *p, long int oldval, long int newval)
++{
++  if (*p != oldval)
++    return 0;
++
++  *p = newval;
++  return 1;
++}
++
++#endif /* atomicity.h */
+--- libc-orig/sysdeps/hppa/bits/link.h	2005-01-28 14:39:49.000000000 -0500
++++ libc/sysdeps/hppa/bits/link.h	2005-03-01 13:42:58.000000000 -0500
+@@ -0,0 +1,63 @@
++/* Copyright (C) 2005 Free Software Foundation, Inc.
++   This file is part of the GNU C Library.
++
++   The GNU C Library is free software; you can redistribute it and/or
++   modify it under the terms of the GNU Lesser General Public
++   License as published by the Free Software Foundation; either
++   version 2.1 of the License, or (at your option) any later version.
++
++   The GNU C Library is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++   Lesser General Public License for more details.
++
++   You should have received a copy of the GNU Lesser General Public
++   License along with the GNU C Library; if not, write to the Free
++   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
++   02111-1307 USA.  */
++
++#ifndef	_LINK_H
++# error "Never include <bits/link.h> directly; use <link.h> instead."
++#endif
++
++/* Registers for entry into PLT on hppa.  */
++typedef struct La_hppa_regs
++{
++  uint32_t lr_r8;
++  uint32_t lr_r9;
++  uint32_t lr_r10;
++  uint32_t lr_r11;
++  uint32_t lr_gr [8];
++  double lr_fr [8];
++  uint32_t lr_unat;
++  uint32_t lr_sp;
++} La_hppa_regs;
++
++/* Return values for calls from PLT on hppa.  */
++typedef struct La_hppa_retval
++{
++  uint32_t lrv_r8;
++  uint32_t lrv_r9;
++  uint32_t lrv_r10;
++  uint32_t lrv_r11;
++  double lr_fr [8];
++} La_hppa_retval;
++
++
++__BEGIN_DECLS
++
++extern Elf32_Addr la_hppa_gnu_pltenter (Elf32_Sym *__sym, unsigned int __ndx,
++				       uintptr_t *__refcook,
++				       uintptr_t *__defcook,
++				       La_hppa_regs *__regs,
++				       unsigned int *__flags,
++				       const char *__symname,
++				       long int *__framesizep);
++extern unsigned int la_hppa_gnu_pltexit (Elf32_Sym *__sym, unsigned int __ndx,
++					uintptr_t *__refcook,
++					uintptr_t *__defcook,
++					const La_hppa_regs *__inregs,
++					La_hppa_retval *__outregs,
++					const char *symname);
++
++__END_DECLS
+--- libc-orig/sysdeps/hppa/dl-trampoline.S	1969-12-31 19:00:00.000000000 -0500
++++ libc/sysdeps/hppa/dl-trampoline.S	2005-04-05 15:51:55.000000000 -0400
+@@ -0,0 +1,194 @@
++/* PLT trampolines. hppa version.
++   Copyright (C) 2005 Free Software Foundation, Inc.
++   This file is part of the GNU C Library.
++
++   The GNU C Library is free software; you can redistribute it and/or
++   modify it under the terms of the GNU Lesser General Public
++   License as published by the Free Software Foundation; either
++   version 2.1 of the License, or (at your option) any later version.
++
++   The GNU C Library is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++   Lesser General Public License for more details.
++
++   You should have received a copy of the GNU Lesser General Public
++   License along with the GNU C Library; if not, write to the Free
++   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
++   02111-1307 USA.  */
++
++#include <sysdep.h>
++
++/* This code gets called via the .plt stub, and is used in
++   dl-runtime.c to call the `_dl_fixup' function and then redirect 
++   to the    address it returns. `_dl_fixup' takes two
++   arguments, however `_dl_profile_fixup' takes a number of 
++   parameters for use with library auditing (LA).
++   
++   WARNING: This template is also used by gcc's __cffc, and expects
++   that the "bl" for _dl_runtime_resolve exist at a particular offset.
++   Do not change this template without changing gcc, while the prefix
++   "bl" should fix everything so gcc finds the right spot, it will
++   slow down __cffc when it attempts to call fixup to resolve function
++   descriptor references. Please refer to gcc/gcc/config/pa/fptr.c
++   
++   Enter with r19 = reloc offset, r20 = got-8, r21 = fixup ltp.  */
++
++	/* FAKE bl to provide gcc's __cffc with fixup loc. */
++	.text
++	bl	_dl_fixup, %r2
++        .text
++        .align 4
++        .global _dl_runtime_resolve
++        .type _dl_runtime_resolve,@function
++_dl_runtime_resolve:
++        .PROC
++        .CALLINFO FRAME=128,CALLS,SAVE_RP,ENTRY_GR=3
++        .ENTRY
++        /* SAVE_RP says we do */
++        stw %rp, -20(%sp)
++
++	/* Save static link register */
++	stw	%r29,-16(%sp)
++ 	/* Save argument registers in the call stack frame. */
++	stw	%r26,-36(%sp)
++	stw	%r25,-40(%sp)
++	stw	%r24,-44(%sp)
++	stw	%r23,-48(%sp)
++
++	/* Build a call frame, and save structure pointer. */
++	copy	%sp, %r26	/* Copy previous sp */
++	/* Save function result address (on entry) */
++	stwm	%r28,128(%sp)
++
++	/* Save floating point argument registers */
++	ldo	-56(%sp),%r26	
++	fstd,ma	%fr4,-8(%r26)
++	fstd,ma	%fr5,-8(%r26)
++	fstd,ma	%fr6,-8(%r26)
++	fstd	%fr7,0(%r26)
++
++	/* Fillin some frame info to follow ABI */
++	stw	%r21,-32(%sp)	/* PIC register value */
++	stw	%r26,-4(%sp)	/* Previous sp */
++
++ 	/* Set up args to fixup func, needs only two arguments  */
++	ldw	8+4(%r20),%r26		/* (1) got[1] == struct link_map */
++	copy	%r19,%r25		/* (2) reloc offset  */
++
++ 	/* Call the real address resolver. */
++	bl	_dl_fixup,%rp
++	copy	%r21,%r19		/* set fixup func ltp */
++
++	/* Load up the returned func descriptor */
++	copy	%ret0, %r22
++	copy	%ret1, %r19
++
++	/* Reload arguments fp args */
++	ldo	-80(%sp),%r26
++	fldd,ma	8(%r26),%fr7
++	fldd,ma	8(%r26),%fr6
++	fldd,ma	8(%r26),%fr5
++	fldd	0(%r26),%fr4
++
++	/* Adjust sp, and restore function result address*/
++	ldwm	-128(%sp),%r28
++
++	/* Reload static link register */
++	ldw	-16(%sp),%r29
++	/* Reload general args */
++	ldw	-36(%sp),%r26
++	ldw	-40(%sp),%r25
++	ldw	-44(%sp),%r24
++	ldw	-48(%sp),%r23
++
++	/* Jump to new function, but return to previous function */
++	bv	%r0(%r22)
++	ldw	-20(%sp),%rp
++        .EXIT
++        .PROCEND
++	.size   _dl_runtime_resolve, . - _dl_runtime_resolve
++
++
++	/* FIXME:
++		Need to largely rewrite the bottom half of
++		this code in order to save and restore the
++		LA struct from the stack along with
++		interpreted parameters.
++	*/
++        .text
++        .align 4
++        .global _dl_runtime_profile
++        .type _dl_runtime_profile,@function
++_dl_runtime_profile:
++        .PROC
++        .CALLINFO FRAME=128,CALLS,SAVE_RP,ENTRY_GR=3
++        .ENTRY
++
++        /* SAVE_RP says we do */
++        stw %rp, -20(%sp)
++
++	/* Save static link register */
++	stw	%r29,-16(%sp)
++ 	/* Save argument registers in the call stack frame. */
++	stw	%r26,-36(%sp)
++	stw	%r25,-40(%sp)
++	stw	%r24,-44(%sp)
++	stw	%r23,-48(%sp)
++
++	/* Build a call frame, and save structure pointer. */
++	copy	%sp, %r26	/* Copy previous sp */
++	/* Save function result address (on entry) */
++	stwm	%r28,128(%sp)
++
++	/* Save floating point argument registers */
++	ldo	-56(%sp),%r26	
++	fstd,ma	%fr4,-8(%r26)
++	fstd,ma	%fr5,-8(%r26)
++	fstd,ma	%fr6,-8(%r26)
++	fstd	%fr7,0(%r26)
++
++	/* Fillin some frame info to follow ABI */
++	stw	%r21,-32(%sp)	/* PIC register value */
++	stw	%r26,-4(%sp)	/* Previous sp */
++
++ 	/* Set up args to fixup func, needs three arguments  */
++	ldw	8+4(%r20),%r26		/* (1) got[1] == struct link_map */
++	copy	%r19,%r25		/* (2) reloc offset  */
++	copy    %rp,%r24		/* (3) profile_fixup needs rp */
++
++ 	/* Call the real address resolver. */
++	bl	_dl_profile_fixup,%rp
++	copy	%r21,%r19		/* set fixup func ltp */
++
++	/* Load up the returned func descriptor */
++	copy	%ret0, %r22
++	copy	%ret1, %r19
++
++	/* Reload arguments fp args */
++	ldo	-80(%sp),%r26
++	fldd,ma	8(%r26),%fr7
++	fldd,ma	8(%r26),%fr6
++	fldd,ma	8(%r26),%fr5
++	fldd	0(%r26),%fr4
++
++	/* Adjust sp, and restore function result address*/
++	ldwm	-128(%sp),%r28
++
++	/* Reload static link register */
++	ldw	-16(%sp),%r29
++	/* Reload general args */
++	ldw	-36(%sp),%r26
++	ldw	-40(%sp),%r25
++	ldw	-44(%sp),%r24
++	ldw	-48(%sp),%r23
++
++	/* Jump to new function, but return to previous function */
++	bv	%r0(%r22)
++	ldw	-20(%sp),%rp
++        .EXIT
++        .PROCEND
++	.size   _dl_runtime_profile, . - _dl_runtime_profile
++
++
++
+--- libc-orig/sysdeps/hppa/fpu/libm-test-ulps	2003-03-22 19:52:10.000000000 -0500
++++ libc/sysdeps/hppa/fpu/libm-test-ulps	2004-09-29 23:50:16.000000000 -0400
+@@ -1,6 +1,9 @@
+ # Begin of automatic generation
+ 
+ # atan2
++Test "atan2 (-0.00756827042671106339, -.001792735857538728036) == -1.80338464113663849327153994380":
++float: 6
++ifloat: 6
+ Test "atan2 (-0.75, -1.0) == -2.49809154479650885165983415456218025":
+ float: 3
+ ifloat: 3
+@@ -258,9 +261,6 @@
+ ifloat: 1
+ 
+ # ctan
+-Test "Real part of: ctan (-2 - 3 i) == 0.376402564150424829275122113032269084e-2 - 1.00323862735360980144635859782192726 i":
+-double: 1
+-idouble: 1
+ Test "Imaginary part of: ctan (0.75 + 1.25 i) == 0.160807785916206426725166058173438663 + 0.975363285031235646193581759755216379 i":
+ double: 1
+ idouble: 1
+@@ -479,6 +479,11 @@
+ float: 1
+ ifloat: 1
+ 
++# lround
++Test "lround (1071930.0008) == 1071930":
++double: -214511494
++idouble: -214511494
++
+ # sincos
+ Test "sincos (M_PI_6l*2.0, &sin_res, &cos_res) puts 0.5 in cos_res":
+ double: 1
+@@ -640,8 +645,8 @@
+ 
+ # Maximal error of functions:
+ Function: "atan2":
+-float: 3
+-ifloat: 3
++float: 6
++ifloat: 6
+ 
+ Function: "atanh":
+ float: 1
+@@ -777,10 +782,6 @@
+ float: 1
+ ifloat: 1
+ 
+-Function: Real part of "ctan":
+-double: 1
+-idouble: 1
+-
+ Function: Imaginary part of "ctan":
+ double: 1
+ idouble: 1
+--- libc-orig/sysdeps/hppa/sysdep.h	2003-10-15 01:31:42.000000000 -0400
++++ libc/sysdeps/hppa/sysdep.h	2004-11-01 01:51:58.000000000 -0500
+@@ -22,9 +22,8 @@
+ #include <sys/syscall.h>
+ #include "config.h"
+ 
+-#ifndef ASM_LINE_SEP
+-#define ASM_LINE_SEP ;
+-#endif
++#undef ASM_LINE_SEP
++#define ASM_LINE_SEP ! 
+ 
+ #ifdef	__ASSEMBLER__
+ 
+@@ -51,13 +50,9 @@
+ #define END(name)							      \
+   .PROCEND
+ 
+-
+-/* If compiled for profiling, call `mcount' at the start of each function.  */
++/* GCC does everything for us. */
+ #ifdef	PROF
+-/* The mcount code relies on a normal frame pointer being on the stack
+-   to locate our caller, so push one just for its benefit.  */
+-#define CALL_MCOUNT \
+-  XXX	ASM_LINE_SEP
++#define CALL_MCOUNT 
+ #else
+ #define CALL_MCOUNT		/* Do nothing.  */
+ #endif
+--- libc-orig/sysdeps/unix/sysv/linux/hppa/sysdep.c	2003-10-15 01:45:16.000000000 -0400
++++ libc/sysdeps/unix/sysv/linux/hppa/sysdep.c	2004-09-20 14:04:24.000000000 -0400
+@@ -46,13 +46,13 @@
+   {
+     register unsigned long int __res asm("r28");
+     LOAD_ARGS_6 (arg0, arg1, arg2, arg3, arg4, arg5)
+-    asm volatile (STW_ASM_PIC
++    asm volatile (SAVE_ASM_PIC
+ 		  "	ble  0x100(%%sr2, %%r0)	\n"
+ 		  "	copy %1, %%r20		\n"
+-		  LDW_ASM_PIC
++		  LOAD_ASM_PIC
+ 		  : "=r" (__res)
+ 		  : "r" (sysnum) ASM_ARGS_6
+-		  : CALL_CLOB_REGS CLOB_ARGS_6);
++		  : "memory", CALL_CLOB_REGS CLOB_ARGS_6);
+     __sys_res = __res;
+   }
+   if ((unsigned long int) __sys_res >= (unsigned long int) -4095)
+--- libc-orig/sysdeps/unix/sysv/linux/hppa/sysdep.h	2003-11-03 12:18:38.000000000 -0500
++++ libc/sysdeps/unix/sysv/linux/hppa/sysdep.h	2005-04-05 03:40:47.000000000 -0400
+@@ -24,26 +24,33 @@
+ #include <sys/syscall.h>
+ #include "config.h"
+ 
+-#ifndef ASM_LINE_SEP
+-# define ASM_LINE_SEP ;
+-#endif
++#undef ASM_LINE_SEP
++#define ASM_LINE_SEP ! 
+ 
+ #undef SYS_ify
+ #define SYS_ify(syscall_name)	(__NR_##syscall_name)
+ 
++/* WARNING: TREG must be a callee saves register so 
++   that it doesn't have to be restored after a call 
++   to another function */
+ #ifdef PIC
+-/* WARNING: CANNOT BE USED IN A NOP! */
+-# define STW_PIC stw %r19, -32(%sr0, %sp) ASM_LINE_SEP
+-# define LDW_PIC ldw -32(%sr0, %sp), %r19 ASM_LINE_SEP
+-# define STW_ASM_PIC	"       copy %%r19, %%r4\n"
+-# define LDW_ASM_PIC	"       copy %%r4, %%r19\n"
+-# define USING_GR4	"%r4",
++# define TREG %r3
++# define SAVE_PIC(SREG) copy %r19, SREG ASM_LINE_SEP
++# define LOAD_PIC(LREG) copy LREG, %r19 ASM_LINE_SEP
++/* Inline assembly defines */
++# define TREG_ASM "%r4" /* Cant clobber r3, it holds framemarker */
++# define SAVE_ASM_PIC	"       copy %%r19, %" TREG_ASM "\n"
++# define LOAD_ASM_PIC	"       copy %" TREG_ASM ", %%r19\n"
++# define USING_TREG	TREG_ASM,
+ #else
+-# define STW_PIC ASM_LINE_SEP
+-# define LDW_PIC ASM_LINE_SEP
+-# define STW_ASM_PIC	" \n"
+-# define LDW_ASM_PIC	" \n"
+-# define USING_GR4
++# define TREG %r3
++# define SAVE_PIC(SREG) nop ASM_LINE_SEP
++# define LOAD_PIC(LREG) nop ASM_LINE_SEP
++/* Inline assembly defines */
++# define TREG_ASM 
++# define SAVE_ASM_PIC	"nop \n"
++# define LOAD_ASM_PIC	"nop \n"
++# define USING_TREG
+ #endif
+ 
+ #ifdef __ASSEMBLER__
+@@ -76,31 +83,73 @@
+ 
+ /* We don't want the label for the error handle to be global when we define
+    it here.  */
+-#ifdef PIC
++/*#ifdef PIC
+ # define SYSCALL_ERROR_LABEL 0f
+ #else
+ # define SYSCALL_ERROR_LABEL syscall_error
+-#endif
++#endif*/
++
++/* Argument manipulation from the stack for preparing to
++   make a syscall */
++
++#define DOARGS_0 /* nothing */
++#define DOARGS_1 /* nothing */
++#define DOARGS_2 /* nothing */
++#define DOARGS_3 /* nothing */
++#define DOARGS_4 /* nothing */
++#define DOARGS_5 ldw -52(%sp), %r22		ASM_LINE_SEP
++#define DOARGS_6 DOARGS_5 ldw -56(%sp), %r21	ASM_LINE_SEP
++
++#define UNDOARGS_0 /* nothing */
++#define UNDOARGS_1 /* nothing */
++#define UNDOARGS_2 /* nothing */
++#define UNDOARGS_3 /* nothing */
++#define UNDOARGS_4 /* nothing */
++#define UNDOARGS_5 /* nothing */
++#define UNDOARGS_6 /* nothing */
+ 
+ /* Define an entry point visible from C.
+ 
+    There is currently a bug in gdb which prevents us from specifying
+    incomplete stabs information.  Fake some entries here which specify
+    the current source file.  */
+-#define	ENTRY(name)						\
+-	.text					ASM_LINE_SEP	\
+-	.export C_SYMBOL_NAME(name)		ASM_LINE_SEP	\
+-	.type	C_SYMBOL_NAME(name),@function	ASM_LINE_SEP	\
+-	C_LABEL(name)				ASM_LINE_SEP	\
+-	CALL_MCOUNT				ASM_LINE_SEP
++#define	ENTRY(name)							\
++	.text						ASM_LINE_SEP	\
++	.align ALIGNARG(4)				ASM_LINE_SEP	\
++	.export C_SYMBOL_NAME(name)			ASM_LINE_SEP	\
++	.type	C_SYMBOL_NAME(name),@function		ASM_LINE_SEP	\
++	C_LABEL(name)					ASM_LINE_SEP	\
++	.PROC						ASM_LINE_SEP	\
++	.CALLINFO FRAME=64,CALLS,SAVE_RP,ENTRY_GR=3	ASM_LINE_SEP	\
++	.ENTRY						ASM_LINE_SEP	\
++	/* SAVE_RP says we do */			ASM_LINE_SEP	\
++	stw %rp, -20(%sr0,%sp)				ASM_LINE_SEP	\
++	/*FIXME: Call mcount? (carefull with stack!) */
++
++/* Some syscall wrappers do not call other functions, and
++   hence are classified as leaf, so add NO_CALLS for gdb */
++#define	ENTRY_LEAF(name)						\
++	.text						ASM_LINE_SEP	\
++	.align ALIGNARG(4)				ASM_LINE_SEP	\
++	.export C_SYMBOL_NAME(name)			ASM_LINE_SEP	\
++	.type	C_SYMBOL_NAME(name),@function		ASM_LINE_SEP	\
++	C_LABEL(name)					ASM_LINE_SEP	\
++	.PROC						ASM_LINE_SEP	\
++	.CALLINFO FRAME=64,NO_CALLS,SAVE_RP,ENTRY_GR=3	ASM_LINE_SEP	\
++	.ENTRY						ASM_LINE_SEP	\
++	/* SAVE_RP says we do */			ASM_LINE_SEP	\
++	stw %rp, -20(%sr0,%sp)				ASM_LINE_SEP	\
++	/*FIXME: Call mcount? (carefull with stack!) */
+ 
+ #undef	END
+ #define END(name)							\
+-1:							ASM_LINE_SEP	\
+-.size	C_SYMBOL_NAME(name),1b-C_SYMBOL_NAME(name)	ASM_LINE_SEP
+-
+-/* If compiled for profiling, call `mcount' at the start of each function.  */
+-/* No, don't bother.  gcc will put the call in for us.  */
++  	.EXIT						ASM_LINE_SEP	\
++	.PROCEND					ASM_LINE_SEP	\
++.size	C_SYMBOL_NAME(name), .-C_SYMBOL_NAME(name)	ASM_LINE_SEP
++
++/* If compiled for profiling, call `mcount' at the start 
++   of each function. No, don't bother.  gcc will put the 
++   call in for us.  */
+ #define CALL_MCOUNT		/* Do nothing.  */
+ 
+ /* syscall wrappers consist of
+@@ -118,14 +167,16 @@
+ */
+ 
+ #define	PSEUDO(name, syscall_name, args)			\
+-  ENTRY (name)							\
+-  DO_CALL(syscall_name, args)			ASM_LINE_SEP	\
++  ENTRY (name)					ASM_LINE_SEP	\
++  /* If necc. load args from stack */		ASM_LINE_SEP	\
++  DOARGS_##args					ASM_LINE_SEP	\
++  DO_CALL (syscall_name, args)			ASM_LINE_SEP	\
++  UNDOARGS_##args				ASM_LINE_SEP	\
+   nop						ASM_LINE_SEP
+ 
+ #define ret \
+-	/* Return value set by ERRNO code */	ASM_LINE_SEP	\
+-	bv 0(2)					ASM_LINE_SEP	\
+-	nop					ASM_LINE_SEP
++  /* Return value set by ERRNO code */		ASM_LINE_SEP	\
++  bv,n 0(2)					ASM_LINE_SEP
+ 
+ #undef	PSEUDO_END
+ #define	PSEUDO_END(name)					\
+@@ -133,8 +184,10 @@
+ 
+ /* We don't set the errno on the return from the syscall */
+ #define	PSEUDO_NOERRNO(name, syscall_name, args)		\
+-  ENTRY (name)							\
+-  DO_CALL_NOERRNO(syscall_name, args)		ASM_LINE_SEP	\
++  ENTRY_LEAF (name)				ASM_LINE_SEP	\
++  DOARGS_##args					ASM_LINE_SEP	\
++  DO_CALL_NOERRNO (syscall_name, args)		ASM_LINE_SEP	\
++  UNDOARGS_##args				ASM_LINE_SEP	\
+   nop						ASM_LINE_SEP
+ 
+ #define ret_NOERRNO ret
+@@ -146,9 +199,11 @@
+ /* This has to return the error value */
+ #undef  PSEUDO_ERRVAL
+ #define PSEUDO_ERRVAL(name, syscall_name, args)			\
+-	ENTRY(name)						\
+-	DO_CALL_ERRVAL(syscall_name, args)	ASM_LINE_SEP	\
+-	nop					ASM_LINE_SEP
++  ENTRY_LEAF (name)				ASM_LINE_SEP	\
++  DOARGS_##args					ASM_LINE_SEP	\
++  DO_CALL_ERRVAL (syscall_name, args)		ASM_LINE_SEP	\
++  UNDOARGS_##args				ASM_LINE_SEP	\
++  nop						ASM_LINE_SEP
+ 
+ #define ret_ERRVAL ret
+ 
+@@ -161,7 +216,8 @@
+ #define SYSCALL_PIC_SETUP	/* Nothing.  */
+ 
+ 
+-/* All the syscall assembly macros rely on finding the approriate
++/* FIXME: This comment is not true.
++ * All the syscall assembly macros rely on finding the approriate
+    SYSCALL_ERROR_LABEL or rather HANDLER. */
+ 
+ /* int * __errno_location(void) so you have to store your value
+@@ -209,8 +265,8 @@
+ 	arg 2		gr25
+ 	arg 3		gr24
+ 	arg 4		gr23
+-	arg 5		-52(gr30)
+-	arg 6		-56(gr30)
++	arg 5		-52(sp)
++	arg 6		-56(sp)
+ 
+    gr22 and gr21 are caller-saves, so we can just load the arguments
+    there and generally be happy. */
+@@ -219,46 +275,48 @@
+  * is intended to mimic the if (__sys_res...)
+  * code inside INLINE_SYSCALL
+  */
++#define NO_ERROR -0x1000
+ 
+ #undef	DO_CALL
+ #define DO_CALL(syscall_name, args)				\
+-	DOARGS_##args				ASM_LINE_SEP	\
+-	STW_PIC					ASM_LINE_SEP	\
++  	copy TREG,%r1				ASM_LINE_SEP	\
++	copy %sp,TREG				ASM_LINE_SEP	\
++	/* Create a frame */			ASM_LINE_SEP	\
++	stwm %r1, 64(%sp)			ASM_LINE_SEP	\
++	stw %rp, -20(%sp)			ASM_LINE_SEP	\
++	stw TREG, -4(%sp)			ASM_LINE_SEP	\
++	/* Save r19 */				ASM_LINE_SEP	\
++	SAVE_PIC(TREG)				ASM_LINE_SEP	\
+ 	/* Do syscall, delay loads # */		ASM_LINE_SEP	\
+ 	ble  0x100(%sr2,%r0)			ASM_LINE_SEP	\
+ 	ldi SYS_ify (syscall_name), %r20	ASM_LINE_SEP	\
+-	ldi -0x1000,%r1				ASM_LINE_SEP	\
+-	cmpb,>>=,n %r1,%ret0,0f			ASM_LINE_SEP	\
+-	/* save rp or we get lost */		ASM_LINE_SEP	\
+-	stw %rp, -20(%sr0,%sp)			ASM_LINE_SEP	\
+-	/* Restore r19 from frame */		ASM_LINE_SEP	\
+-	LDW_PIC					ASM_LINE_SEP	\
+-	stw %ret0, -24(%sr0,%sp)		ASM_LINE_SEP	\
++	ldi NO_ERROR,%r1			ASM_LINE_SEP	\
++	cmpb,>>=,n %r1,%ret0,L(pre_end)		ASM_LINE_SEP	\
++	/* Restore r19 from TREG */		ASM_LINE_SEP	\
++	LOAD_PIC(TREG) /* delay */		ASM_LINE_SEP	\
+ 	SYSCALL_ERROR_HANDLER			ASM_LINE_SEP	\
+-	/* create frame */			ASM_LINE_SEP	\
+-	ldo 64(%sp), %sp			ASM_LINE_SEP	\
+-	ldo -64(%sp), %sp			ASM_LINE_SEP	\
++	/* Use TREG for temp storage */		ASM_LINE_SEP	\
++	copy %ret0, TREG /* delay */		ASM_LINE_SEP	\
+ 	/* OPTIMIZE: Don't reload r19 */	ASM_LINE_SEP	\
+ 	/* do a -1*syscall_ret0 */		ASM_LINE_SEP	\
+-	ldw -24(%sr0,%sp), %r26			ASM_LINE_SEP	\
+-	sub %r0, %r26, %r26			ASM_LINE_SEP	\
++	sub %r0, TREG, TREG			ASM_LINE_SEP	\
+ 	/* Store into errno location */		ASM_LINE_SEP	\
+-	stw %r26, 0(%sr0,%ret0)			ASM_LINE_SEP	\
++	stw TREG, 0(%sr0,%ret0)			ASM_LINE_SEP	\
+ 	/* return -1 as error */		ASM_LINE_SEP	\
+ 	ldo -1(%r0), %ret0			ASM_LINE_SEP	\
+-	ldw -20(%sr0,%sp), %rp			ASM_LINE_SEP	\
+-0:						ASM_LINE_SEP	\
+-	UNDOARGS_##args				ASM_LINE_SEP
++L(pre_end):					ASM_LINE_SEP	\
++	/* Restore return pointer */		ASM_LINE_SEP	\
++	ldw -84(%sp),%rp			ASM_LINE_SEP	\
++	/* Restore our frame, restoring TREG */	ASM_LINE_SEP	\
++	ldwm -64(%sp), TREG			ASM_LINE_SEP
+ 
+ /* We do nothing with the return, except hand it back to someone else */
+ #undef  DO_CALL_NOERRNO
+ #define DO_CALL_NOERRNO(syscall_name, args)			\
+-	DOARGS_##args                                           \
+ 	/* No need to store r19 */		ASM_LINE_SEP	\
+ 	ble  0x100(%sr2,%r0)                    ASM_LINE_SEP    \
+ 	ldi SYS_ify (syscall_name), %r20        ASM_LINE_SEP    \
+-	/* Caller will restore r19 */		ASM_LINE_SEP	\
+-	UNDOARGS_##args
++	/* Caller will restore r19 */		ASM_LINE_SEP
+ 
+ /* Here, we return the ERRVAL in assembly, note we don't call the
+    error handler function, but we do 'negate' the return _IF_
+@@ -266,34 +324,15 @@
+ 
+ #undef	DO_CALL_ERRVAL
+ #define DO_CALL_ERRVAL(syscall_name, args)			\
+-	DOARGS_##args				ASM_LINE_SEP	\
+ 	/* No need to store r19 */		ASM_LINE_SEP	\
+ 	ble  0x100(%sr2,%r0)			ASM_LINE_SEP	\
+ 	ldi SYS_ify (syscall_name), %r20	ASM_LINE_SEP	\
+ 	/* Caller will restore r19 */		ASM_LINE_SEP	\
+-	ldi -0x1000,%r1				ASM_LINE_SEP	\
++	ldi NO_ERROR,%r1			ASM_LINE_SEP	\
+ 	cmpb,>>=,n %r1,%ret0,0f			ASM_LINE_SEP	\
+ 	sub %r0, %ret0, %ret0			ASM_LINE_SEP	\
+-0:						ASM_LINE_SEP	\
+-	UNDOARGS_##args				ASM_LINE_SEP
++0:						ASM_LINE_SEP
+ 
+-#define DOARGS_0 /* nothing */
+-#define DOARGS_1 /* nothing */
+-#define DOARGS_2 /* nothing */
+-#define DOARGS_3 /* nothing */
+-#define DOARGS_4 /* nothing */
+-#define DOARGS_5 ldw -52(%r30), %r22		ASM_LINE_SEP
+-#define DOARGS_6 ldw -52(%r30), %r22		ASM_LINE_SEP	\
+-		 ldw -56(%r30), %r21		ASM_LINE_SEP
+-
+-
+-#define UNDOARGS_0 /* nothing */
+-#define UNDOARGS_1 /* nothing */
+-#define UNDOARGS_2 /* nothing */
+-#define UNDOARGS_3 /* nothing */
+-#define UNDOARGS_4 /* nothing */
+-#define UNDOARGS_5 /* nothing */
+-#define UNDOARGS_6 /* nothing */
+ 
+ #else
+ 
+@@ -305,27 +344,28 @@
+    registers r20 -> r26 will conflict with the list so they
+    are treated specially. Although r19 is clobbered by the syscall
+    we cannot say this because it would violate ABI, thus we say
+-   r4 is clobbered and use that register to save/restore r19
++   TREG is clobbered and use that register to save/restore r19
+    across the syscall. */
+ 
+-#define CALL_CLOB_REGS	"%r1", "%r2", USING_GR4 \
++#define CALL_CLOB_REGS	"%r1", "%r2", USING_TREG \
+ 		 	"%r20", "%r29", "%r31"
+ 
+ #undef INLINE_SYSCALL
+-#define INLINE_SYSCALL(name, nr, args...)	({			\
++#define INLINE_SYSCALL(name, nr, args...)				\
++({									\
+ 	long __sys_res;							\
+ 	{								\
+ 		register unsigned long __res asm("r28");		\
+ 		LOAD_ARGS_##nr(args)					\
+-		/* FIXME: HACK stw/ldw r19 around syscall */		\
++		/* FIXME: HACK save/load r19 around syscall */		\
+ 		asm volatile(						\
+-			STW_ASM_PIC					\
++			SAVE_ASM_PIC					\
+ 			"	ble  0x100(%%sr2, %%r0)\n"		\
+ 			"	ldi %1, %%r20\n"			\
+-			LDW_ASM_PIC					\
++			LOAD_ASM_PIC					\
+ 			: "=r" (__res)					\
+ 			: "i" (SYS_ify(name)) ASM_ARGS_##nr		\
+-			: CALL_CLOB_REGS CLOB_ARGS_##nr			\
++			: "memory", CALL_CLOB_REGS CLOB_ARGS_##nr	\
+ 		);							\
+ 		__sys_res = (long)__res;				\
+ 	}								\
+@@ -339,8 +379,8 @@
+ /* INTERNAL_SYSCALL_DECL - Allows us to setup some function static
+    value to use within the context of the syscall
+    INTERNAL_SYSCALL_ERROR_P - Returns 0 if it wasn't an error, 1 otherwise
+-   You are allowed to use the syscall result (val) and the DECL error variable
+-   to determine what went wrong.
++   You are allowed to use the syscall result (val) and the DECL error 
++   variable to determine what went wrong.
+    INTERLAL_SYSCALL_ERRNO - Munges the val/err pair into the error number.
+    In our case we just flip the sign. */
+ 
+@@ -357,46 +397,46 @@
+ 
+ /* Similar to INLINE_SYSCALL but we don't set errno */
+ #undef INTERNAL_SYSCALL
+-#define INTERNAL_SYSCALL(name, err, nr, args...) 		\
+-({								\
+-	long __sys_res;						\
+-	{							\
+-		register unsigned long __res asm("r28");	\
+-		LOAD_ARGS_##nr(args)				\
+-		/* FIXME: HACK stw/ldw r19 around syscall */	\
+-		asm volatile(					\
+-			STW_ASM_PIC				\
+-			"	ble  0x100(%%sr2, %%r0)\n"	\
+-			"	ldi %1, %%r20\n"		\
+-			LDW_ASM_PIC				\
+-			: "=r" (__res)				\
+-			: "i" (SYS_ify(name)) ASM_ARGS_##nr	\
+-			: CALL_CLOB_REGS CLOB_ARGS_##nr		\
+-		);						\
+-		__sys_res = (long)__res;			\
+-	}							\
+-	__sys_res;						\
++#define INTERNAL_SYSCALL(name, err, nr, args...) 			\
++({									\
++	long __sys_res;							\
++	{								\
++		register unsigned long __res asm("r28");		\
++		LOAD_ARGS_##nr(args)					\
++		/* FIXME: HACK save/load r19 around syscall */		\
++		asm volatile(						\
++			SAVE_ASM_PIC					\
++			"	ble  0x100(%%sr2, %%r0)\n"		\
++			"	ldi %1, %%r20\n"			\
++			LOAD_ASM_PIC					\
++			: "=r" (__res)					\
++			: "i" (SYS_ify(name)) ASM_ARGS_##nr		\
++			: "memory", CALL_CLOB_REGS CLOB_ARGS_##nr	\
++		);							\
++		__sys_res = (long)__res;				\
++	}								\
++	__sys_res;							\
+  })
+ 
+ #define LOAD_ARGS_0()
+-#define LOAD_ARGS_1(r26)					\
+-	register unsigned long __r26 __asm__("r26") = (unsigned long)(r26);   \
+-	LOAD_ARGS_0()
+-#define LOAD_ARGS_2(r26,r25)					\
+-	register unsigned long __r25 __asm__("r25") = (unsigned long)(r25);   \
+-	LOAD_ARGS_1(r26)
+-#define LOAD_ARGS_3(r26,r25,r24)				\
+-	register unsigned long __r24 __asm__("r24") = (unsigned long)(r24);   \
+-	LOAD_ARGS_2(r26,r25)
+-#define LOAD_ARGS_4(r26,r25,r24,r23)				\
+-	register unsigned long __r23 __asm__("r23") = (unsigned long)(r23);   \
+-	LOAD_ARGS_3(r26,r25,r24)
+-#define LOAD_ARGS_5(r26,r25,r24,r23,r22)			\
+-	register unsigned long __r22 __asm__("r22") = (unsigned long)(r22);   \
+-	LOAD_ARGS_4(r26,r25,r24,r23)
+-#define LOAD_ARGS_6(r26,r25,r24,r23,r22,r21)			\
+-	register unsigned long __r21 __asm__("r21") = (unsigned long)(r21);   \
+-	LOAD_ARGS_5(r26,r25,r24,r23,r22)
++#define LOAD_ARGS_1(r26)						\
++  register unsigned long __r26 __asm__("r26") = (unsigned long)(r26);	\
++  LOAD_ARGS_0()
++#define LOAD_ARGS_2(r26,r25)						\
++  register unsigned long __r25 __asm__("r25") = (unsigned long)(r25);	\
++  LOAD_ARGS_1(r26)
++#define LOAD_ARGS_3(r26,r25,r24)					\
++  register unsigned long __r24 __asm__("r24") = (unsigned long)(r24);	\
++  LOAD_ARGS_2(r26,r25)
++#define LOAD_ARGS_4(r26,r25,r24,r23)					\
++  register unsigned long __r23 __asm__("r23") = (unsigned long)(r23);	\
++  LOAD_ARGS_3(r26,r25,r24)
++#define LOAD_ARGS_5(r26,r25,r24,r23,r22)				\
++  register unsigned long __r22 __asm__("r22") = (unsigned long)(r22);	\
++  LOAD_ARGS_4(r26,r25,r24,r23)
++#define LOAD_ARGS_6(r26,r25,r24,r23,r22,r21)				\
++  register unsigned long __r21 __asm__("r21") = (unsigned long)(r21);	\
++  LOAD_ARGS_5(r26,r25,r24,r23,r22)
+ 
+ /* Even with zero args we use r20 for the syscall number */
+ #define ASM_ARGS_0
+--- libc-orig/sysdeps/unix/sysv/linux/posix_fadvise.c	2003-08-16 20:36:22.000000000 -0400
++++ libc/sysdeps/unix/sysv/linux/posix_fadvise.c	2004-09-26 22:45:33.000000000 -0400
+@@ -35,6 +35,19 @@
+     return INTERNAL_SYSCALL_ERRNO (ret, err);
+   return 0;
+ #else
++# ifdef __NR_fadvise64_64
++  INTERNAL_SYSCALL_DECL (err);  
++  int ret = INTERNAL_SYSCALL (fadvise64_64, err, 6, fd,
++			      __LONG_LONG_PAIR ((long) (offset >> 31),
++						(long) offset),
++			      __LONG_LONG_PAIR ((long) (len >> 31),
++						(long) len),
++			      advise);
++  if (INTERNAL_SYSCALL_ERROR_P (ret, err))
++    return INTERNAL_SYSCALL_ERRNO (ret, err);
++  return 0;
++# else
+   return ENOSYS;
++# endif
+ #endif
+ }
+--- glibc-2.3.5.old/sysdeps/hppa/dl-lookupcfg.h	2005-05-15 20:36:10.383193082 -0600
++++ glibc-2.3.5/sysdeps/hppa/dl-lookupcfg.h	2005-05-15 21:44:07.543952666 -0600
+@@ -67,3 +69,15 @@
+ #define DL_DT_FINI_ADDRESS(map, addr) \
+   ((Elf32_Addr)(addr) & 2 ? (addr) : DL_AUTO_FUNCTION_ADDRESS (map, addr))
+ 
++/* The type of the return value of fixup/profile_fixup */
++#define DL_FIXUP_VALUE_TYPE struct fdesc
++
++/* Construct a fixup value from the address and linkmap */
++#define DL_FIXUP_MAKE_VALUE(map, addr) \
++   ((struct fdesc) { (addr), (map)->l_info[DT_PLTGOT]->d_un.d_ptr })
++
++/* Extract the code address from a fixup value */
++#define DL_FIXUP_VALUE_CODE_ADDR(value) ((value).ip)
++#define DL_FIXUP_VALUE_ADDR(value) ((uintptr_t) &(value))
++#define DL_FIXUP_ADDR_VALUE(addr) (*(struct fdesc *) (addr))
++



Reply to: