[Date Prev][Date Next] [Thread Prev][Thread Next] [Date Index] [Thread Index]

r1516 - in glibc-package/branches/glibc-2.4/debian: . patches



Author: schizo
Date: 2006-05-23 01:52:58 +0000 (Tue, 23 May 2006)
New Revision: 1516

Modified:
   glibc-package/branches/glibc-2.4/debian/changelog
   glibc-package/branches/glibc-2.4/debian/patches/glibc235-hppa-lt.diff
   glibc-package/branches/glibc-2.4/debian/patches/linuxthreads-sizefix.diff
Log:
Update linuxthreads tarball to 5/5 HEAD
    - Update linuxthreads-sizefix.diff.
    - Update glibc235-hppa-lt.diff.


Modified: glibc-package/branches/glibc-2.4/debian/changelog
===================================================================
--- glibc-package/branches/glibc-2.4/debian/changelog	2006-05-23 01:47:32 UTC (rev 1515)
+++ glibc-package/branches/glibc-2.4/debian/changelog	2006-05-23 01:52:58 UTC (rev 1516)
@@ -15,7 +15,8 @@
     - Remove glibc235-gcc4-elf.diff (merged upstream).
     - Remove siginfo_h.diff (merged upstream).
     - Remove everything to do with nscd_nischeck.
-    - Update linuxthreads-sizefix.diff for 2.4.
+    - Update linuxthreads-sizefix.diff.
+    - Update glibc235-hppa-lt.diff.
     - debian/shlibver: Bump up to 2.4-1.
     - Add any/local-bashisms.diff: fix invalid test operator (==)
       in run-iconv-test.sh

Modified: glibc-package/branches/glibc-2.4/debian/patches/glibc235-hppa-lt.diff
===================================================================
--- glibc-package/branches/glibc-2.4/debian/patches/glibc235-hppa-lt.diff	2006-05-23 01:47:32 UTC (rev 1515)
+++ glibc-package/branches/glibc-2.4/debian/patches/glibc235-hppa-lt.diff	2006-05-23 01:52:58 UTC (rev 1516)
@@ -238,188 +238,6 @@
  {
    return __pthread_compare_and_swap(ptr, oldval, newval, spinlock);
  }
---- libc-orig/linuxthreads/sysdeps/hppa/pspinlock.c	2002-08-26 18:39:51.000000000 -0400
-+++ libc/linuxthreads/sysdeps/hppa/pspinlock.c	2004-08-15 14:22:02.000000000 -0400
-@@ -24,13 +24,10 @@
- int
- __pthread_spin_lock (pthread_spinlock_t *lock)
- {
--  unsigned int val;
-+  volatile unsigned int *addr = __ldcw_align (lock);
- 
--  do
--    asm volatile ("ldcw %1,%0"
--		  : "=r" (val), "=m" (*lock)
--		  : "m" (*lock));
--  while (!val);
-+  while (__ldcw (addr) == 0)
-+    while (*addr == 0) ;
- 
-   return 0;
- }
-@@ -40,13 +37,9 @@
- int
- __pthread_spin_trylock (pthread_spinlock_t *lock)
- {
--  unsigned int val;
-+  volatile unsigned int *a = __ldcw_align (lock);
- 
--  asm volatile ("ldcw %1,%0"
--		: "=r" (val), "=m" (*lock)
--		: "m" (*lock));
--
--  return val ? 0 : EBUSY;
-+  return __ldcw (a) ? 0 : EBUSY;
- }
- weak_alias (__pthread_spin_trylock, pthread_spin_trylock)
- 
-@@ -54,7 +47,11 @@
- int
- __pthread_spin_unlock (pthread_spinlock_t *lock)
- {
--  *lock = 1;
-+  volatile unsigned int *a = __ldcw_align (lock);
-+  int tmp = 1;
-+  /* This should be a memory barrier to newer compilers */
-+  __asm__ __volatile__ ("stw,ma %1,0(%0)"
-+                        : : "r" (a), "r" (tmp) : "memory");           
-   return 0;
- }
- weak_alias (__pthread_spin_unlock, pthread_spin_unlock)
-@@ -66,7 +63,11 @@
-   /* We can ignore the `pshared' parameter.  Since we are busy-waiting
-      all processes which can access the memory location `lock' points
-      to can use the spinlock.  */
--  *lock = 1;
-+  volatile unsigned int *a = __ldcw_align (lock);
-+  int tmp = 1;
-+  /* This should be a memory barrier to newer compilers */
-+  __asm__ __volatile__ ("stw,ma %1,0(%0)"
-+                        : : "r" (a), "r" (tmp) : "memory");           
-   return 0;
- }
- weak_alias (__pthread_spin_init, pthread_spin_init)
---- libc-orig/linuxthreads/sysdeps/hppa/pt-machine.h	2003-07-31 15:15:42.000000000 -0400
-+++ libc/linuxthreads/sysdeps/hppa/pt-machine.h	2004-08-23 14:39:23.000000000 -0400
-@@ -22,41 +22,103 @@
- #ifndef _PT_MACHINE_H
- #define _PT_MACHINE_H   1
- 
-+#include <sys/types.h>
- #include <bits/initspin.h>
- 
- #ifndef PT_EI
- # define PT_EI extern inline __attribute__ ((always_inline))
- #endif
- 
--extern long int testandset (int *spinlock);
--extern int __compare_and_swap (long int *p, long int oldval, long int newval);
-+extern inline long int testandset (__atomic_lock_t *spinlock);
-+extern inline int __compare_and_swap (long int *p, long int oldval, long int newval);
-+extern inline int lock_held (__atomic_lock_t *spinlock); 
-+extern inline int __load_and_clear (__atomic_lock_t *spinlock);
- 
- /* Get some notion of the current stack.  Need not be exactly the top
-    of the stack, just something somewhere in the current frame.  */
- #define CURRENT_STACK_FRAME  stack_pointer
- register char * stack_pointer __asm__ ("%r30");
- 
-+/* Get/Set thread-specific pointer.  We have to call into the kernel to
-+ * modify it, but we can read it in user mode.  */
-+
-+#define THREAD_SELF __get_cr27()
-+
-+static inline struct _pthread_descr_struct * __get_cr27(void)
-+{
-+	long cr27;
-+	asm("mfctl %%cr27, %0" : "=r" (cr27) : );
-+	return (struct _pthread_descr_struct *) cr27;
-+}
-+
-+#define INIT_THREAD_SELF(descr, nr) __set_cr27(descr)
-+
-+static inline void __set_cr27(struct _pthread_descr_struct * cr27)
-+{
-+	asm(
-+		"ble	0xe0(%%sr2, %%r0)\n\t"
-+		"copy	%0, %%r26"
-+	 : : "r" (cr27) : "r26" );
-+}
-+
-+/* We want the OS to assign stack addresses.  */
-+#define FLOATING_STACKS	1
-+#define ARCH_STACK_MAX_SIZE	8*1024*1024
- 
- /* The hppa only has one atomic read and modify memory operation,
-    load and clear, so hppa spinlocks must use zero to signify that
--   someone is holding the lock.  */
-+   someone is holding the lock.  The address used for the ldcw
-+   semaphore must be 16-byte aligned.  */
-+#define __ldcw(a) ({ \
-+  unsigned int __ret;							\
-+  __asm__ __volatile__("ldcw 0(%1),%0"					\
-+                      : "=r" (__ret) : "r" (a) : "memory");		\
-+  __ret;								\
-+})
-+
-+/* Strongly ordered lock reset */
-+#define __lock_reset(lock_addr, tmp) ({						\
-+	__asm__ __volatile__ ("stw,ma %1,0(%0)"					\
-+				: : "r" (lock_addr), "r" (tmp) : "memory"); 	\
-+    })
-+
-+/* Because malloc only guarantees 8-byte alignment for malloc'd data,
-+   and GCC only guarantees 8-byte alignment for stack locals, we can't
-+   be assured of 16-byte alignment for atomic lock data even if we
-+   specify "__attribute ((aligned(16)))" in the type declaration.  So,
-+   we use a struct containing an array of four ints for the atomic lock
-+   type and dynamically select the 16-byte aligned int from the array
-+   for the semaphore.  */
-+#define __PA_LDCW_ALIGNMENT 16
-+#define __ldcw_align(a) ({ \
-+  volatile unsigned int __ret = (unsigned int) a;			\
-+  if ((__ret & ~(__PA_LDCW_ALIGNMENT - 1)) < (unsigned int) a)		\
-+    __ret = (__ret & ~(__PA_LDCW_ALIGNMENT - 1)) + __PA_LDCW_ALIGNMENT; \
-+  (unsigned int *) __ret;						\
-+})
- 
--#define xstr(s) str(s)
--#define str(s) #s
- /* Spinlock implementation; required.  */
--PT_EI long int
--testandset (int *spinlock)
-+PT_EI int
-+__load_and_clear (__atomic_lock_t *spinlock)
- {
--  int ret;
-+  volatile unsigned int *a = __ldcw_align (spinlock);
- 
--  __asm__ __volatile__(
--       "ldcw 0(%2),%0"
--       : "=r"(ret), "=m"(*spinlock)
--       : "r"(spinlock));
-+  return __ldcw (a);
-+}
- 
--  return ret == 0;
-+/* Emulate testandset */
-+PT_EI long int
-+testandset (__atomic_lock_t *spinlock)
-+{
-+  return (__load_and_clear(spinlock) == 0);
- }
--#undef str
--#undef xstr
- 
-+PT_EI int
-+lock_held (__atomic_lock_t *spinlock)
-+{
-+  volatile unsigned int *a = __ldcw_align (spinlock);
-+
-+  return *a == 0;
-+}
-+		
- #endif /* pt-machine.h */
 --- libc-orig/linuxthreads/sysdeps/pthread/bits/initspin.h	2002-08-26 18:39:44.000000000 -0400
 +++ libc/linuxthreads/sysdeps/pthread/bits/initspin.h	2004-02-23 09:36:18.000000000 -0500
 @@ -23,6 +23,7 @@
@@ -505,37 +323,6 @@
      PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP, PTHREAD_PROCESS_PRIVATE }
  #endif
  
---- libc-orig/linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/initspin.h	2002-08-26 18:39:55.000000000 -0400
-+++ libc/linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/initspin.h	2004-02-23 09:35:37.000000000 -0500
-@@ -19,9 +19,23 @@
- 
- /* Initial value of a spinlock.  PA-RISC only implements atomic load
-    and clear so this must be non-zero. */
--#define __LT_SPINLOCK_INIT 1
-+#define __LT_SPINLOCK_INIT ((__atomic_lock_t) { { 1, 1, 1, 1 } })
-+
-+/* Initialize global spinlocks without cast, generally macro wrapped */
-+#define __LT_SPINLOCK_ALT_INIT { { 1, 1, 1, 1 } }
-+
-+/* Macros for lock initializers, not using the above definition.
-+   The above definition is not used in the case that static initializers
-+   use this value. */
-+#define __LOCK_ALT_INITIALIZER { __LT_SPINLOCK_ALT_INIT, 0 }
-+
-+/* Used to initialize _pthread_fastlock's in non-static case */
-+#define __LOCK_INITIALIZER ((struct _pthread_fastlock){ __LT_SPINLOCK_INIT, 0 })
-+
-+/* Used in pthread_atomic initialization */
-+#define __ATOMIC_INITIALIZER { 0, __LT_SPINLOCK_ALT_INIT }
-+
-+/* Tell the rest of the code that the initializer is non-zero without
-+   explaining it's internal structure */
-+#define __LT_INITIALIZER_NOT_ZERO
- 
--/* Macros for lock initializers, using the above definition. */
--#define __LOCK_INITIALIZER { 0, __LT_SPINLOCK_INIT }
--#define __ALT_LOCK_INITIALIZER { 0, __LT_SPINLOCK_INIT }
--#define __ATOMIC_INITIALIZER { 0, __LT_SPINLOCK_INIT }
 --- libc-orig/linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/pthreadtypes.h	1969-12-31 19:00:00.000000000 -0500
 +++ libc/linuxthreads/sysdeps/unix/sysv/linux/hppa/bits/pthreadtypes.h	2003-12-08 21:25:00.000000000 -0500
 @@ -0,0 +1,160 @@
@@ -699,210 +486,3 @@
 +typedef unsigned long int pthread_t;
 +
 +#endif	/* bits/pthreadtypes.h */
---- libc-orig/linuxthreads/sysdeps/unix/sysv/linux/hppa/sysdep-cancel.h	2003-10-10 21:28:08.000000000 -0400
-+++ libc/linuxthreads/sysdeps/unix/sysv/linux/hppa/sysdep-cancel.h	2004-09-22 20:05:32.000000000 -0400
-@@ -29,61 +29,109 @@
- #  define NO_ERROR -0x1000
- # endif
- 
-+/* The syscall cancellation mechanism requires userspace
-+   assistance, the following code does roughly this:
-+
-+   	do arguments (read arg5 and arg6 to registers)
-+	setup frame
-+	
-+	check if there are threads, yes jump to pseudo_cancel
-+	
-+	unthreaded:
-+		syscall
-+		check syscall return (jump to pre_end)
-+		set errno
-+		set return to -1
-+		(jump to pre_end)
-+		
-+	pseudo_cancel:
-+		cenable
-+		syscall
-+		cdisable
-+		check syscall return (jump to pre_end)
-+		set errno
-+		set return to -1
-+		
-+	pre_end
-+		restore stack
-+	
-+	It is expected that 'ret' and 'END' macros will
-+	append an 'undo arguments' and 'return' to the 
-+	this PSEUDO macro. */
-+   
- # undef PSEUDO
- # define PSEUDO(name, syscall_name, args)				\
--  ENTRY (name)								\
--    SINGLE_THREAD_P					ASM_LINE_SEP	\
--    cmpib,<> 0,%ret0,Lpseudo_cancel			ASM_LINE_SEP	\
--    nop							ASM_LINE_SEP	\
--    DO_CALL(syscall_name, args)				ASM_LINE_SEP	\
--    /* DONE! */						ASM_LINE_SEP	\
--    bv 0(2)						ASM_LINE_SEP	\
--    nop							ASM_LINE_SEP	\
--  Lpseudo_cancel:					ASM_LINE_SEP	\
--    /* store return ptr */				ASM_LINE_SEP	\
--    stw %rp, -20(%sr0,%sp)				ASM_LINE_SEP	\
--    /* save syscall args */				ASM_LINE_SEP	\
--    PUSHARGS_##args /* MACRO */				ASM_LINE_SEP	\
--    STW_PIC						ASM_LINE_SEP	\
--    CENABLE /* FUNC CALL */				ASM_LINE_SEP	\
--    ldo 64(%sp), %sp					ASM_LINE_SEP	\
--    ldo -64(%sp), %sp					ASM_LINE_SEP	\
--    LDW_PIC						ASM_LINE_SEP	\
--    /* restore syscall args */				ASM_LINE_SEP	\
--    POPARGS_##args					ASM_LINE_SEP	\
--    /* save r4 in arg0 stack slot */			ASM_LINE_SEP	\
--    stw %r4, -36(%sr0,%sp)				ASM_LINE_SEP	\
--    /* save mask from cenable */			ASM_LINE_SEP	\
--    copy %ret0, %r4					ASM_LINE_SEP	\
--    ble 0x100(%sr2,%r0)					ASM_LINE_SEP    \
--    ldi SYS_ify (syscall_name), %r20			ASM_LINE_SEP	\
--    LDW_PIC						ASM_LINE_SEP	\
--    /* pass mask as arg0 to cdisable */			ASM_LINE_SEP	\
--    copy %r4, %r26					ASM_LINE_SEP	\
--    copy %ret0, %r4					ASM_LINE_SEP	\
--    CDISABLE						ASM_LINE_SEP	\
--    ldo 64(%sp), %sp					ASM_LINE_SEP	\
--    ldo -64(%sp), %sp					ASM_LINE_SEP	\
--    LDW_PIC						ASM_LINE_SEP	\
--    /* compare error */					ASM_LINE_SEP	\
--    ldi NO_ERROR,%r1					ASM_LINE_SEP	\
--    /* branch if no error */				ASM_LINE_SEP	\
--    cmpb,>>=,n %r1,%r4,Lpre_end				ASM_LINE_SEP	\
--    nop							ASM_LINE_SEP	\
--    SYSCALL_ERROR_HANDLER				ASM_LINE_SEP	\
--    ldo 64(%sp), %sp					ASM_LINE_SEP	\
--    ldo -64(%sp), %sp					ASM_LINE_SEP	\
--    /* No need to LDW_PIC */				ASM_LINE_SEP	\
--    /* make syscall res value positive */		ASM_LINE_SEP	\
--    sub %r0, %r4, %r4					ASM_LINE_SEP	\
--    /* store into errno location */			ASM_LINE_SEP	\
--    stw %r4, 0(%sr0,%ret0)				ASM_LINE_SEP	\
--    /* return -1 */					ASM_LINE_SEP	\
--    ldo -1(%r0), %ret0					ASM_LINE_SEP	\
--  Lpre_end:						ASM_LINE_SEP	\
--    ldw -20(%sr0,%sp), %rp             			ASM_LINE_SEP	\
--    /* No need to LDW_PIC */				ASM_LINE_SEP	\
--    ldw -36(%sr0,%sp), %r4				ASM_LINE_SEP
-+	ENTRY (name)							\
-+	DOARGS_##args					ASM_LINE_SEP	\
-+	copy TREG, %r1					ASM_LINE_SEP	\
-+	copy %sp, TREG					ASM_LINE_SEP	\
-+	stwm %r1, 64(%sp)				ASM_LINE_SEP	\
-+	stw %rp, -20(%sp)				ASM_LINE_SEP	\
-+	stw TREG, -4(%sp)				ASM_LINE_SEP	\
-+	/* Done setting up frame, continue... */	ASM_LINE_SEP	\
-+	SINGLE_THREAD_P					ASM_LINE_SEP	\
-+	cmpib,<>,n 0,%ret0,L(pseudo_cancel)		ASM_LINE_SEP	\
-+L(unthreaded):						ASM_LINE_SEP	\
-+	/* Save r19 */					ASM_LINE_SEP	\
-+	SAVE_PIC(TREG)					ASM_LINE_SEP	\
-+	/* Do syscall, delay loads # */			ASM_LINE_SEP	\
-+	ble  0x100(%sr2,%r0)				ASM_LINE_SEP	\
-+	ldi SYS_ify (syscall_name), %r20 /* delay */	ASM_LINE_SEP	\
-+	ldi NO_ERROR,%r1				ASM_LINE_SEP	\
-+	cmpb,>>=,n %r1,%ret0,L(pre_end)			ASM_LINE_SEP	\
-+	/* Restore r19 from TREG */			ASM_LINE_SEP	\
-+	LOAD_PIC(TREG) /* delay */			ASM_LINE_SEP	\
-+	SYSCALL_ERROR_HANDLER				ASM_LINE_SEP	\
-+	/* Use TREG for temp storage */			ASM_LINE_SEP	\
-+	copy %ret0, TREG /* delay */			ASM_LINE_SEP	\
-+	/* OPTIMIZE: Don't reload r19 */		ASM_LINE_SEP	\
-+	/* do a -1*syscall_ret0 */			ASM_LINE_SEP	\
-+	sub %r0, TREG, TREG				ASM_LINE_SEP	\
-+	/* Store into errno location */			ASM_LINE_SEP	\
-+	stw TREG, 0(%sr0,%ret0)				ASM_LINE_SEP	\
-+	b L(pre_end)					ASM_LINE_SEP	\
-+	/* return -1 as error */			ASM_LINE_SEP	\
-+	ldo -1(%r0), %ret0 /* delay */			ASM_LINE_SEP	\
-+L(pseudo_cancel):					ASM_LINE_SEP	\
-+	PUSHARGS_##args /* Save args */			ASM_LINE_SEP	\
-+	/* Save r19 into TREG */			ASM_LINE_SEP	\
-+	CENABLE /* FUNC CALL */				ASM_LINE_SEP	\
-+	SAVE_PIC(TREG) /* delay */			ASM_LINE_SEP	\
-+	/* restore syscall args */			ASM_LINE_SEP	\
-+	POPARGS_##args					ASM_LINE_SEP	\
-+	/* save mask from cenable (use stub rp slot) */	ASM_LINE_SEP	\
-+	stw %ret0, -24(%sp)				ASM_LINE_SEP	\
-+	/* ... SYSCALL ... */				ASM_LINE_SEP	\
-+	ble 0x100(%sr2,%r0)				ASM_LINE_SEP    \
-+	ldi SYS_ify (syscall_name), %r20 /* delay */	ASM_LINE_SEP	\
-+	/* ............... */				ASM_LINE_SEP	\
-+	LOAD_PIC(TREG)					ASM_LINE_SEP	\
-+	/* pass mask as arg0 to cdisable */		ASM_LINE_SEP	\
-+	ldw -24(%sp), %r26				ASM_LINE_SEP	\
-+	CDISABLE					ASM_LINE_SEP	\
-+	stw %ret0, -24(%sp) /* delay */			ASM_LINE_SEP	\
-+	/* Restore syscall return */			ASM_LINE_SEP	\
-+	ldw -24(%sp), %ret0				ASM_LINE_SEP	\
-+	/* compare error */				ASM_LINE_SEP	\
-+	ldi NO_ERROR,%r1				ASM_LINE_SEP	\
-+	/* branch if no error */			ASM_LINE_SEP	\
-+	cmpb,>>=,n %r1,%ret0,L(pre_end)			ASM_LINE_SEP	\
-+	LOAD_PIC(TREG)	/* cond. nullify */		ASM_LINE_SEP	\
-+	copy %ret0, TREG /* save syscall return */	ASM_LINE_SEP	\
-+	SYSCALL_ERROR_HANDLER				ASM_LINE_SEP	\
-+	/* make syscall res value positive */		ASM_LINE_SEP	\
-+	sub %r0, TREG, TREG	/* delay */		ASM_LINE_SEP	\
-+	/* No need to LOAD_PIC */			ASM_LINE_SEP	\
-+	/* store into errno location */			ASM_LINE_SEP	\
-+	stw TREG, 0(%sr0,%ret0)				ASM_LINE_SEP	\
-+	/* return -1 */					ASM_LINE_SEP	\
-+	ldo -1(%r0), %ret0				ASM_LINE_SEP	\
-+L(pre_end):						ASM_LINE_SEP	\
-+	/* Restore rp before exit */			ASM_LINE_SEP	\
-+	ldw -84(%sr0,%sp), %rp				ASM_LINE_SEP	\
-+	/* Undo frame */				ASM_LINE_SEP	\
-+	ldwm -64(%sp),TREG				ASM_LINE_SEP	\
-+	/* No need to LOAD_PIC */			ASM_LINE_SEP
- 
- /* Save arguments into our frame */
- # define PUSHARGS_0	/* nothing to do */
-@@ -91,8 +139,8 @@
- # define PUSHARGS_2	PUSHARGS_1 stw %r25, -40(%sr0,%sp)	ASM_LINE_SEP
- # define PUSHARGS_3	PUSHARGS_2 stw %r24, -44(%sr0,%sp)	ASM_LINE_SEP
- # define PUSHARGS_4	PUSHARGS_3 stw %r23, -48(%sr0,%sp)	ASM_LINE_SEP
--# define PUSHARGS_5	PUSHARGS_4 /* Args are on the stack... */
--# define PUSHARGS_6	PUSHARGS_5
-+# define PUSHARGS_5	PUSHARGS_4 stw %r22, -52(%sr0,%sp)	ASM_LINE_SEP 
-+# define PUSHARGS_6	PUSHARGS_5 stw %r21, -56(%sr0,%sp)	ASM_LINE_SEP
- 
- /* Bring them back from the stack */
- # define POPARGS_0	/* nothing to do */
-@@ -101,7 +149,7 @@
- # define POPARGS_3	POPARGS_2 ldw -44(%sr0,%sp), %r24	ASM_LINE_SEP
- # define POPARGS_4	POPARGS_3 ldw -48(%sr0,%sp), %r23	ASM_LINE_SEP
- # define POPARGS_5	POPARGS_4 ldw -52(%sr0,%sp), %r22	ASM_LINE_SEP
--# define POPARGS_6	POPARGS_5 ldw -54(%sr0,%sp), %r21	ASM_LINE_SEP
-+# define POPARGS_6	POPARGS_5 ldw -56(%sr0,%sp), %r21	ASM_LINE_SEP
- 
- # ifdef IS_IN_libpthread
- #  ifdef PIC
-@@ -163,10 +211,10 @@
- /* This ALT version requires newer kernel support */
- #  define SINGLE_THREAD_P_MFCTL						\
- 	mfctl %cr27, %ret0					ASM_LINE_SEP	\
--	cmpib,= NO_THREAD_CR27,%ret0,Lstp			ASM_LINE_SEP	\
-+	cmpib,= NO_THREAD_CR27,%ret0,L(stp)			ASM_LINE_SEP	\
- 	nop							ASM_LINE_SEP	\
- 	ldw MULTIPLE_THREADS_OFFSET(%sr0,%ret0),%ret0		ASM_LINE_SEP	\
-- Lstp:								ASM_LINE_SEP
-+L(stp):								ASM_LINE_SEP
- #  ifdef PIC
- /* Slower version uses GOT to get value of __local_multiple_threads */
- #   define SINGLE_THREAD_P							\
-@@ -174,7 +222,7 @@
- 	ldw RT%__local_multiple_threads(%sr0,%r1), %ret0	ASM_LINE_SEP	\
- 	ldw 0(%sr0,%ret0), %ret0 				ASM_LINE_SEP
- #  else
--  /* Slow non-pic version using DP */
-+/* Slow non-pic version using DP */
- #   define SINGLE_THREAD_P								\
- 	addil LR%__local_multiple_threads-$global$,%r27  		ASM_LINE_SEP	\
- 	ldw RR%__local_multiple_threads-$global$(%sr0,%r1),%ret0	ASM_LINE_SEP

Modified: glibc-package/branches/glibc-2.4/debian/patches/linuxthreads-sizefix.diff
===================================================================
--- glibc-package/branches/glibc-2.4/debian/patches/linuxthreads-sizefix.diff	2006-05-23 01:47:32 UTC (rev 1515)
+++ glibc-package/branches/glibc-2.4/debian/patches/linuxthreads-sizefix.diff	2006-05-23 01:52:58 UTC (rev 1516)
@@ -188,26 +188,6 @@
  
  /* Alignment requirements for the TCB.  */
  #  define TLS_TCB_ALIGN __alignof__ (struct _pthread_descr_struct)
-Index: glibc-2.4/linuxthreads/sysdeps/ia64/tls.h
-===================================================================
---- glibc-2.4.orig/linuxthreads/sysdeps/ia64/tls.h	2005-01-09 15:01:13.000000000 -0500
-+++ glibc-2.4/linuxthreads/sysdeps/ia64/tls.h	2006-03-11 01:00:38.000000000 -0500
-@@ -65,7 +65,14 @@
- #  define TLS_TCB_SIZE sizeof (tcbhead_t)
- 
- /* This is the size we need before TCB.  */
--#  define TLS_PRE_TCB_SIZE sizeof (struct _pthread_descr_struct)
-+#  ifndef IS_IN_rtld
-+#   define TLS_PRE_TCB_SIZE sizeof (struct _pthread_descr_struct)
-+#  else
-+#   include <nptl-struct-pthread.h>
-+#   define TLS_PRE_TCB_SIZE \
-+  (sizeof (struct _pthread_descr_struct) > NPTL_STRUCT_PTHREAD_SIZE	\
-+   ? sizeof (struct _pthread_descr_struct) : NPTL_STRUCT_PTHREAD_SIZE)
-+#  endif
- 
- /* Alignment requirements for the TCB.  */
- #  define TLS_TCB_ALIGN __alignof__ (struct _pthread_descr_struct)
 Index: glibc-2.4/linuxthreads/sysdeps/powerpc/tls.h
 ===================================================================
 --- glibc-2.4.orig/linuxthreads/sysdeps/powerpc/tls.h	2005-01-09 15:01:14.000000000 -0500



Reply to: