[glibc] 01/01: patches/kfreebsd/local-fbtl.diff: update to revision 5954 (from glibc-bsd).
This is an automated email from the git hooks/post-receive script.
aurel32 pushed a commit to branch glibc-2.23
in repository glibc.
commit b4715454881d4b9834bac787b9c289b1226bc217
Author: Aurelien Jarno <aurelien@aurel32.net>
Date: Sun Mar 20 12:21:00 2016 +0100
patches/kfreebsd/local-fbtl.diff: update to revision 5954 (from glibc-bsd).
---
debian/changelog | 2 +
debian/patches/kfreebsd/local-fbtl.diff | 2315 ++++++++++++++--------------
debian/patches/kfreebsd/local-sysdeps.diff | 27 +-
3 files changed, 1158 insertions(+), 1186 deletions(-)
diff --git a/debian/changelog b/debian/changelog
index 52cdcc2..3659a8b 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -5,6 +5,8 @@ glibc (2.23-0experimental1) UNRELEASED; urgency=medium
tst-malloc-thread-exit as xfail.
* debian/testsuite-xfail-debian.mk (ppc64) mark tst-malloc-thread-exit
test as xfail, it is a known issue and not a regression.
+ * patches/kfreebsd/local-fbtl.diff: update to revision 5954 (from
+ glibc-bsd).
[ Samuel Thibault ]
* hurd-i386/cvs-libpthread.diff: More updates to glibc-2.23.
diff --git a/debian/patches/kfreebsd/local-fbtl.diff b/debian/patches/kfreebsd/local-fbtl.diff
index 952dce8..ff6387c 100644
--- a/debian/patches/kfreebsd/local-fbtl.diff
+++ b/debian/patches/kfreebsd/local-fbtl.diff
@@ -16882,7 +16882,7 @@
+hidden_def (__nptl_death_event)
--- /dev/null
+++ b/fbtl/forward.c
-@@ -0,0 +1,218 @@
+@@ -0,0 +1,219 @@
+/* Copyright (C) 2002-2013 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -17082,8 +17082,9 @@
+FORWARD2 (pthread_self, pthread_t, (void), (), return 0)
+
+
-+FORWARD (pthread_setcancelstate, (int state, int *oldstate), (state, oldstate),
-+ 0)
++FORWARD (__pthread_setcancelstate, (int state, int *oldstate),
++ (state, oldstate), 0)
++strong_alias (__pthread_setcancelstate, pthread_setcancelstate)
+
+FORWARD (pthread_setcanceltype, (int type, int *oldtype), (type, oldtype), 0)
+
@@ -17411,7 +17412,7 @@
+ .ptr_pthread_mutex_lock = __pthread_mutex_lock,
+ .ptr_pthread_mutex_unlock = __pthread_mutex_unlock,
+ .ptr_pthread_self = __pthread_self,
-+ .ptr_pthread_setcancelstate = __pthread_setcancelstate,
++ .ptr___pthread_setcancelstate = __pthread_setcancelstate,
+ .ptr_pthread_setcanceltype = __pthread_setcanceltype,
+ .ptr___pthread_cleanup_upto = __pthread_cleanup_upto,
+ .ptr___pthread_once = __pthread_once,
@@ -19149,7 +19150,7 @@
+EWOULDBLOCK EWOULDBLOCK
--- /dev/null
+++ b/fbtl/pthreadP.h
-@@ -0,0 +1,660 @@
+@@ -0,0 +1,661 @@
+/* Copyright (C) 2002-2013 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -19659,6 +19660,7 @@
+hidden_proto (__pthread_getspecific)
+hidden_proto (__pthread_setspecific)
+hidden_proto (__pthread_once)
++hidden_proto (__pthread_setcancelstate)
+#endif
+
+extern int __pthread_cond_broadcast_2_0 (pthread_cond_2_0_t *cond);
@@ -27926,7 +27928,7 @@
+}
--- /dev/null
+++ b/fbtl/pthread_setcancelstate.c
-@@ -0,0 +1,72 @@
+@@ -0,0 +1,73 @@
+/* Copyright (C) 2002-2013 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -27999,6 +28001,7 @@
+ return 0;
+}
+strong_alias (__pthread_setcancelstate, pthread_setcancelstate)
++hidden_def (__pthread_setcancelstate)
--- /dev/null
+++ b/fbtl/pthread_setcanceltype.c
@@ -0,0 +1,75 @@
@@ -30654,7 +30657,7 @@
+#ifndef __ASSUME_PRIVATE_FUTEX
+ int private_futex;
+#else
-+ int __unused1;
++ int __glibc_reserved1;
+#endif
+ /* Reservation of some values for the TM ABI. */
+ void *__private_tm[4];
@@ -31226,10 +31229,10 @@
+ || __builtin_expect (__libc_alloca_cutoff (size), 1));
+}
--- /dev/null
-+++ b/fbtl/sysdeps/pthread/bits/libc-lock.h
-@@ -0,0 +1,187 @@
-+/* libc-internal interface for mutex locks. NPTL version.
-+ Copyright (C) 1996-2013 Free Software Foundation, Inc.
++++ b/fbtl/sysdeps/pthread/bits/sigthread.h
+@@ -0,0 +1,43 @@
++/* Signal handling function for threaded programs.
++ Copyright (C) 1998-2013 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
@@ -31246,186 +31249,42 @@
+ License along with the GNU C Library; see the file COPYING.LIB. If
+ not, see <http://www.gnu.org/licenses/>. */
+
-+#ifndef _BITS_LIBC_LOCK_H
-+#define _BITS_LIBC_LOCK_H 1
-+
-+#include <pthread.h>
-+#define __need_NULL
-+#include <stddef.h>
-+
-+
-+/* Mutex type. */
-+#if defined _LIBC || defined _IO_MTSAFE_IO
-+# if (!IS_IN (libc) && !IS_IN (libpthread)) || !defined _LIBC
-+typedef struct { pthread_mutex_t mutex; } __libc_lock_recursive_t;
-+# else
-+typedef struct { int lock; int cnt; void *owner; } __libc_lock_recursive_t;
-+# endif
-+#else
-+typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t;
-+#endif
-+
-+/* Define a lock variable NAME with storage class CLASS. The lock must be
-+ initialized with __libc_lock_init before it can be used (or define it
-+ with __libc_lock_define_initialized, below). Use `extern' for CLASS to
-+ declare a lock defined in another module. In public structure
-+ definitions you must use a pointer to the lock structure (i.e., NAME
-+ begins with a `*'), because its storage size will not be known outside
-+ of libc. */
-+#define __libc_lock_define_recursive(CLASS,NAME) \
-+ CLASS __libc_lock_recursive_t NAME;
-+
-+/* Define an initialized recursive lock variable NAME with storage
-+ class CLASS. */
-+#if defined _LIBC && (IS_IN (libc) || IS_IN (libpthread))
-+# define __libc_lock_define_initialized_recursive(CLASS,NAME) \
-+ CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
-+# define _LIBC_LOCK_RECURSIVE_INITIALIZER \
-+ { LLL_LOCK_INITIALIZER, 0, NULL }
-+#else
-+# define __libc_lock_define_initialized_recursive(CLASS,NAME) \
-+ CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
-+# define _LIBC_LOCK_RECURSIVE_INITIALIZER \
-+ {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
-+#endif
-+
-+/* Initialize a recursive mutex. */
-+#if defined _LIBC && (IS_IN (libc) || IS_IN (libpthread))
-+# define __libc_lock_init_recursive(NAME) \
-+ ((void) ((NAME) = (__libc_lock_recursive_t) _LIBC_LOCK_RECURSIVE_INITIALIZER))
-+#else
-+# define __libc_lock_init_recursive(NAME) \
-+ do { \
-+ if (__pthread_mutex_init != NULL) \
-+ { \
-+ pthread_mutexattr_t __attr; \
-+ __pthread_mutexattr_init (&__attr); \
-+ __pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP); \
-+ __pthread_mutex_init (&(NAME).mutex, &__attr); \
-+ __pthread_mutexattr_destroy (&__attr); \
-+ } \
-+ } while (0)
-+#endif
-+
-+/* Finalize recursive named lock. */
-+#if defined _LIBC && (IS_IN (libc) || IS_IN (libpthread))
-+# define __libc_lock_fini_recursive(NAME) ((void) 0)
-+#else
-+# define __libc_lock_fini_recursive(NAME) \
-+ __libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0)
-+#endif
-+
-+/* Lock the recursive named lock variable. */
-+#if defined _LIBC && (IS_IN (libc) || IS_IN (libpthread))
-+# define __libc_lock_lock_recursive(NAME) \
-+ do { \
-+ void *self = THREAD_SELF; \
-+ if ((NAME).owner != self) \
-+ { \
-+ lll_lock ((NAME).lock, LLL_PRIVATE); \
-+ (NAME).owner = self; \
-+ } \
-+ ++(NAME).cnt; \
-+ } while (0)
-+#else
-+# define __libc_lock_lock_recursive(NAME) \
-+ __libc_maybe_call (__pthread_mutex_lock, (&(NAME).mutex), 0)
-+#endif
-+
-+/* Try to lock the recursive named lock variable. */
-+#if defined _LIBC && (IS_IN (libc) || IS_IN (libpthread))
-+# define __libc_lock_trylock_recursive(NAME) \
-+ ({ \
-+ int result = 0; \
-+ void *self = THREAD_SELF; \
-+ if ((NAME).owner != self) \
-+ { \
-+ if (lll_trylock ((NAME).lock) == 0) \
-+ { \
-+ (NAME).owner = self; \
-+ (NAME).cnt = 1; \
-+ } \
-+ else \
-+ result = EBUSY; \
-+ } \
-+ else \
-+ ++(NAME).cnt; \
-+ result; \
-+ })
-+#else
-+# define __libc_lock_trylock_recursive(NAME) \
-+ __libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
-+#endif
++#ifndef _BITS_SIGTHREAD_H
++#define _BITS_SIGTHREAD_H 1
+
-+/* Unlock the recursive named lock variable. */
-+#if defined _LIBC && (IS_IN (libc) || IS_IN (libpthread))
-+/* We do no error checking here. */
-+# define __libc_lock_unlock_recursive(NAME) \
-+ do { \
-+ if (--(NAME).cnt == 0) \
-+ { \
-+ (NAME).owner = NULL; \
-+ lll_unlock ((NAME).lock, LLL_PRIVATE); \
-+ } \
-+ } while (0)
-+#else
-+# define __libc_lock_unlock_recursive(NAME) \
-+ __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
++#if !defined _SIGNAL_H && !defined _PTHREAD_H
++# error "Never include this file directly. Use <pthread.h> instead"
+#endif
+
-+/* Note that for I/O cleanup handling we are using the old-style
-+ cancel handling. It does not have to be integrated with C++ since
-+ no C++ code is called in the middle. The old-style handling is
-+ faster and the support is not going away. */
-+extern void _pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *buffer,
-+ void (*routine) (void *), void *arg);
-+extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer,
-+ int execute);
-+
-+/* Start critical region with cleanup. */
-+#define __libc_cleanup_region_start(DOIT, FCT, ARG) \
-+ { struct _pthread_cleanup_buffer _buffer; \
-+ int _avail; \
-+ if (DOIT) { \
-+ _avail = PTFAVAIL (_pthread_cleanup_push_defer); \
-+ if (_avail) { \
-+ __libc_ptf_call_always (_pthread_cleanup_push_defer, (&_buffer, FCT, \
-+ ARG)); \
-+ } else { \
-+ _buffer.__routine = (FCT); \
-+ _buffer.__arg = (ARG); \
-+ } \
-+ } else { \
-+ _avail = 0; \
-+ }
++/* Functions for handling signals. */
+
-+/* End critical region with cleanup. */
-+#define __libc_cleanup_region_end(DOIT) \
-+ if (_avail) { \
-+ __libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\
-+ } else if (DOIT) \
-+ _buffer.__routine (_buffer.__arg); \
-+ }
++/* Modify the signal mask for the calling thread. The arguments have
++ the same meaning as for sigprocmask(2). */
++extern int pthread_sigmask (int __how,
++ const __sigset_t *__restrict __newmask,
++ __sigset_t *__restrict __oldmask)__THROW;
+
++/* Send signal SIGNO to the given thread. */
++extern int pthread_kill (pthread_t __threadid, int __signo) __THROW;
+
-+/* Hide the definitions which are only supposed to be used inside libc in
-+ a separate file. This file is not present in the installation! */
-+#ifdef _LIBC
-+# include "libc-lockP.h"
++#ifdef __USE_GNU
++/* Queue signal and data to a thread. */
++extern int pthread_sigqueue (pthread_t __threadid, int __signo,
++ const union sigval __value) __THROW;
+#endif
+
-+#endif /* bits/libc-lock.h */
++#endif /* bits/sigthread.h */
--- /dev/null
-+++ b/fbtl/sysdeps/pthread/bits/libc-lockP.h
-@@ -0,0 +1,436 @@
-+/* Private libc-internal interface for mutex locks. NPTL version.
-+ Copyright (C) 1996-2013 Free Software Foundation, Inc.
++++ b/fbtl/sysdeps/pthread/createthread.c
+@@ -0,0 +1,298 @@
++/* Copyright (C) 2002-2013 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
++ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
-+ modify it under the terms of the GNU Lesser General Public License as
-+ published by the Free Software Foundation; either version 2.1 of the
-+ License, or (at your option) any later version.
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -31433,752 +31292,154 @@
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
-+ License along with the GNU C Library; see the file COPYING.LIB. If
-+ not, see <http://www.gnu.org/licenses/>. */
++ License along with the GNU C Library; if not, see
++ <http://www.gnu.org/licenses/>. */
+
-+#ifndef _BITS_LIBC_LOCKP_H
-+#define _BITS_LIBC_LOCKP_H 1
++#include <sched.h>
++#include <setjmp.h>
++#include <signal.h>
++#include <stdlib.h>
++#include <atomic.h>
++#include <ldsodefs.h>
++#include <tls.h>
++#include <stdint.h>
+
-+#include <pthread.h>
-+#define __need_NULL
-+#include <stddef.h>
++#include "kernel-features.h"
+
+
-+/* Fortunately Linux now has a mean to do locking which is realtime
-+ safe without the aid of the thread library. We also need no fancy
-+ options like error checking mutexes etc. We only need simple
-+ locks, maybe recursive. This can be easily and cheaply implemented
-+ using futexes. We will use them everywhere except in ld.so since
-+ ld.so might be used on old kernels with a different libc.so. */
-+#include <lowlevellock.h>
-+#include <tls.h>
-+#include <pthread-functions.h>
++#define CLONE_SIGNAL (CLONE_SIGHAND | CLONE_THREAD)
+
-+#if IS_IN (libpthread)
-+/* This gets us the declarations of the __pthread_* internal names,
-+ and hidden_proto for them. */
-+# include <fbtl/pthreadP.h>
++/* Unless otherwise specified, the thread "register" is going to be
++ initialized with a pointer to the TCB. */
++#ifndef TLS_VALUE
++# define TLS_VALUE pd
+#endif
+
-+/* Mutex type. */
-+#if !IS_IN (libc) && !IS_IN (libpthread)
-+typedef pthread_mutex_t __libc_lock_t;
-+#else
-+typedef int __libc_lock_t;
++#ifndef ARCH_CLONE
++# define ARCH_CLONE __clone
+#endif
-+typedef struct { pthread_mutex_t mutex; } __rtld_lock_recursive_t;
-+typedef pthread_rwlock_t __libc_rwlock_t;
-+
-+/* Type for key to thread-specific data. */
-+typedef pthread_key_t __libc_key_t;
-+
-+/* Define a lock variable NAME with storage class CLASS. The lock must be
-+ initialized with __libc_lock_init before it can be used (or define it
-+ with __libc_lock_define_initialized, below). Use `extern' for CLASS to
-+ declare a lock defined in another module. In public structure
-+ definitions you must use a pointer to the lock structure (i.e., NAME
-+ begins with a `*'), because its storage size will not be known outside
-+ of libc. */
-+#define __libc_lock_define(CLASS,NAME) \
-+ CLASS __libc_lock_t NAME;
-+#define __libc_rwlock_define(CLASS,NAME) \
-+ CLASS __libc_rwlock_t NAME;
-+#define __rtld_lock_define_recursive(CLASS,NAME) \
-+ CLASS __rtld_lock_recursive_t NAME;
-+
-+/* Define an initialized lock variable NAME with storage class CLASS.
+
-+ For the C library we take a deeper look at the initializer. For
-+ this implementation all fields are initialized to zero. Therefore
-+ we don't initialize the variable which allows putting it into the
-+ BSS section. (Except on PA-RISC and other odd architectures, where
-+ initialized locks must be set to one due to the lack of normal
-+ atomic operations.) */
+
-+#if IS_IN (libc) || IS_IN (libpthread)
-+# if LLL_LOCK_INITIALIZER == 0
-+# define __libc_lock_define_initialized(CLASS,NAME) \
-+ CLASS __libc_lock_t NAME;
-+# else
-+# define __libc_lock_define_initialized(CLASS,NAME) \
-+ CLASS __libc_lock_t NAME = LLL_LOCK_INITIALIZER;
-+# endif
-+#else
-+# define __libc_lock_define_initialized(CLASS,NAME) \
-+ CLASS __libc_lock_t NAME;
++#ifndef TLS_MULTIPLE_THREADS_IN_TCB
++/* Pointer to the corresponding variable in libc. */
++int *__libc_multiple_threads_ptr attribute_hidden;
+#endif
+
-+#define __libc_rwlock_define_initialized(CLASS,NAME) \
-+ CLASS __libc_rwlock_t NAME = PTHREAD_RWLOCK_INITIALIZER;
+
-+#define __rtld_lock_define_initialized_recursive(CLASS,NAME) \
-+ CLASS __rtld_lock_recursive_t NAME = _RTLD_LOCK_RECURSIVE_INITIALIZER;
-+#define _RTLD_LOCK_RECURSIVE_INITIALIZER \
-+ {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
++struct rtprio;
++struct thr_param {
++ void (*start_func)(void *); /* thread entry function. */
++ void *arg; /* argument for entry function. */
++ char *stack_base; /* stack base address. */
++ size_t stack_size; /* stack size. */
++ char *tls_base; /* tls base address. */
++ size_t tls_size; /* tls size. */
++ long *child_tid; /* address to store new TID. */
++ long *parent_tid; /* parent accesses the new TID here. */
++ int flags; /* thread flags. */
++ struct rtprio *rtp; /* Real-time scheduling priority */
++ void *spare[3]; /* TODO: cpu affinity mask etc. */
++};
+
-+#define __rtld_lock_initialize(NAME) \
-+ (void) ((NAME) = (__rtld_lock_recursive_t) _RTLD_LOCK_RECURSIVE_INITIALIZER)
+
-+/* If we check for a weakly referenced symbol and then perform a
-+ normal jump to it te code generated for some platforms in case of
-+ PIC is unnecessarily slow. What would happen is that the function
-+ is first referenced as data and then it is called indirectly
-+ through the PLT. We can make this a direct jump. */
-+#ifdef __PIC__
-+# define __libc_maybe_call(FUNC, ARGS, ELSE) \
-+ (__extension__ ({ __typeof (FUNC) *_fn = (FUNC); \
-+ _fn != NULL ? (*_fn) ARGS : ELSE; }))
-+#else
-+# define __libc_maybe_call(FUNC, ARGS, ELSE) \
-+ (FUNC != NULL ? FUNC ARGS : ELSE)
++static int
++do_clone (struct pthread *pd, const struct pthread_attr *attr,
++ int clone_flags, int (*fct) (void *), STACK_VARIABLES_PARMS,
++ int stopped)
++{
++#ifdef PREPARE_CREATE
++ PREPARE_CREATE;
+#endif
+
-+/* Call thread functions through the function pointer table. */
-+#if defined SHARED && IS_IN (libc)
-+# define PTFAVAIL(NAME) __libc_pthread_functions_init
-+# define __libc_ptf_call(FUNC, ARGS, ELSE) \
-+ (__libc_pthread_functions_init ? PTHFCT_CALL (ptr_##FUNC, ARGS) : ELSE)
-+# define __libc_ptf_call_always(FUNC, ARGS) \
-+ PTHFCT_CALL (ptr_##FUNC, ARGS)
-+#elif IS_IN (libpthread)
-+# define PTFAVAIL(NAME) 1
-+# define __libc_ptf_call(FUNC, ARGS, ELSE) \
-+ FUNC ARGS
-+# define __libc_ptf_call_always(FUNC, ARGS) \
-+ FUNC ARGS
-+#else
-+# define PTFAVAIL(NAME) (NAME != NULL)
-+# define __libc_ptf_call(FUNC, ARGS, ELSE) \
-+ __libc_maybe_call (FUNC, ARGS, ELSE)
-+# define __libc_ptf_call_always(FUNC, ARGS) \
-+ FUNC ARGS
-+#endif
++ struct thr_param p;
+
++ if (__builtin_expect (stopped != 0, 0))
++ /* We make sure the thread does not run far by forcing it to get a
++ lock. We lock it here too so that the new thread cannot continue
++ until we tell it to. */
++ lll_lock (pd->lock, LLL_PRIVATE);
+
-+/* Initialize the named lock variable, leaving it in a consistent, unlocked
-+ state. */
-+#if IS_IN (libc) || IS_IN (libpthread)
-+# define __libc_lock_init(NAME) \
-+ ((void) ((NAME) = LLL_LOCK_INITIALIZER))
-+#else
-+# define __libc_lock_init(NAME) \
-+ __libc_maybe_call (__pthread_mutex_init, (&(NAME), NULL), 0)
-+#endif
-+#if defined SHARED && IS_IN (libc)
-+/* ((NAME) = (__libc_rwlock_t) PTHREAD_RWLOCK_INITIALIZER) is inefficient. */
-+# define __libc_rwlock_init(NAME) \
-+ ((void) __builtin_memset (&(NAME), '\0', sizeof (NAME)))
-+#else
-+# define __libc_rwlock_init(NAME) \
-+ __libc_maybe_call (__pthread_rwlock_init, (&(NAME), NULL), 0)
-+#endif
++ /* One more thread. We cannot have the thread do this itself, since it
++ might exist but not have been scheduled yet by the time we've returned
++ and need to check the value to behave correctly. We must do it before
++ creating the thread, in case it does get scheduled first and then
++ might mistakenly think it was the only thread. In the failure case,
++ we momentarily store a false value; this doesn't matter because there
++ is no kosher thing a signal handler interrupting us right here can do
++ that cares whether the thread count is correct. */
++ atomic_increment (&__nptl_nthreads);
++#if 0
++ int rc = ARCH_CLONE (fct, STACK_VARIABLES_ARGS, clone_flags,
++ pd, &pd->tid, TLS_VALUE, &pd->tid);
+
-+/* Finalize the named lock variable, which must be locked. It cannot be
-+ used again until __libc_lock_init is called again on it. This must be
-+ called on a lock variable before the containing storage is reused. */
-+#if IS_IN (libc) || IS_IN (libpthread)
-+# define __libc_lock_fini(NAME) ((void) 0)
+#else
-+# define __libc_lock_fini(NAME) \
-+ __libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0)
-+#endif
-+#if defined SHARED && IS_IN (libc)
-+# define __libc_rwlock_fini(NAME) ((void) 0)
-+#else
-+# define __libc_rwlock_fini(NAME) \
-+ __libc_maybe_call (__pthread_rwlock_destroy, (&(NAME)), 0)
++ memset(&p, 0, sizeof(p));
++ p.start_func = fct;
++ p.arg = pd;
++ p.stack_base = stackaddr; /* first in STACK_VARIABLES_ARGS */
++ p.stack_size = stacksize; /* second in STACK_VARIABLES_ARGS */
++ p.tls_base = (char*)pd;
++ p.child_tid = &(pd->ktid);
++
++ int rc = INLINE_SYSCALL(thr_new, 2, &p, sizeof(p));
++
++ if (rc)
++ {
++ errno = rc;
++ rc = -1;;
++ }
+#endif
+
-+/* Lock the named lock variable. */
-+#if IS_IN (libc) || IS_IN (libpthread)
-+# ifndef __libc_lock_lock
-+# define __libc_lock_lock(NAME) \
-+ ({ lll_lock (NAME, LLL_PRIVATE); 0; })
-+# endif
-+#else
-+# undef __libc_lock_lock
-+# define __libc_lock_lock(NAME) \
-+ __libc_maybe_call (__pthread_mutex_lock, (&(NAME)), 0)
-+#endif
-+#define __libc_rwlock_rdlock(NAME) \
-+ __libc_ptf_call (__pthread_rwlock_rdlock, (&(NAME)), 0)
-+#define __libc_rwlock_wrlock(NAME) \
-+ __libc_ptf_call (__pthread_rwlock_wrlock, (&(NAME)), 0)
+
-+/* Try to lock the named lock variable. */
-+#if IS_IN (libc) || IS_IN (libpthread)
-+# ifndef __libc_lock_trylock
-+# define __libc_lock_trylock(NAME) \
-+ lll_trylock (NAME)
-+# endif
-+#else
-+# undef __libc_lock_trylock
-+# define __libc_lock_trylock(NAME) \
-+ __libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
-+#endif
-+#define __libc_rwlock_tryrdlock(NAME) \
-+ __libc_maybe_call (__pthread_rwlock_tryrdlock, (&(NAME)), 0)
-+#define __libc_rwlock_trywrlock(NAME) \
-+ __libc_maybe_call (__pthread_rwlock_trywrlock, (&(NAME)), 0)
++ if (__builtin_expect (rc == -1, 0))
++ {
++ atomic_decrement (&__nptl_nthreads); /* Oops, we lied for a second. */
++ pd->ktid = 0;
+
-+#define __rtld_lock_trylock_recursive(NAME) \
-+ __libc_maybe_call (__pthread_mutex_trylock, (&(NAME).mutex), 0)
++ /* Perhaps a thread wants to change the IDs and if waiting
++ for this stillborn thread. */
++ if (__builtin_expect (atomic_exchange_acq (&pd->setxid_futex, 0)
++ == -2, 0))
++ lll_futex_wake (&pd->setxid_futex, 1, LLL_PRIVATE);
+
-+/* Unlock the named lock variable. */
-+#if IS_IN (libc) || IS_IN (libpthread)
-+# define __libc_lock_unlock(NAME) \
-+ lll_unlock (NAME, LLL_PRIVATE)
-+#else
-+# define __libc_lock_unlock(NAME) \
-+ __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
-+#endif
-+#define __libc_rwlock_unlock(NAME) \
-+ __libc_ptf_call (__pthread_rwlock_unlock, (&(NAME)), 0)
++ /* Free the resources. */
++ __deallocate_stack (pd);
+
-+#ifdef SHARED
-+# define __rtld_lock_default_lock_recursive(lock) \
-+ ++((pthread_mutex_t *)(lock))->__data.__count;
++ /* We have to translate error codes. */
++ return errno == ENOMEM ? EAGAIN : errno;
++ }
++#warning set scheduling parameters
++#if 0
++ /* Now we have the possibility to set scheduling parameters etc. */
++ if (__builtin_expect (stopped != 0, 0))
++ {
++ INTERNAL_SYSCALL_DECL (err);
++ int res = 0;
+
-+# define __rtld_lock_default_unlock_recursive(lock) \
-+ --((pthread_mutex_t *)(lock))->__data.__count;
++ /* Set the affinity mask if necessary. */
++ if (attr->cpuset != NULL)
++ {
++ res = INTERNAL_SYSCALL (sched_setaffinity, err, 3, pd->tid,
++ attr->cpusetsize, attr->cpuset);
+
-+# define __rtld_lock_lock_recursive(NAME) \
-+ GL(dl_rtld_lock_recursive) (&(NAME).mutex)
++ if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
++ {
++ /* The operation failed. We have to kill the thread. First
++ send it the cancellation signal. */
++ INTERNAL_SYSCALL_DECL (err2);
++ err_out:
++ (void) INTERNAL_SYSCALL (tgkill, err2, 3,
++ THREAD_GETMEM (THREAD_SELF, pid),
++ pd->tid, SIGCANCEL);
+
-+# define __rtld_lock_unlock_recursive(NAME) \
-+ GL(dl_rtld_unlock_recursive) (&(NAME).mutex)
-+#else
-+# define __rtld_lock_lock_recursive(NAME) \
-+ __libc_maybe_call (__pthread_mutex_lock, (&(NAME).mutex), 0)
++ /* We do not free the stack here because the canceled thread
++ itself will do this. */
+
-+# define __rtld_lock_unlock_recursive(NAME) \
-+ __libc_maybe_call (__pthread_mutex_unlock, (&(NAME).mutex), 0)
-+#endif
-+
-+/* Define once control variable. */
-+#if PTHREAD_ONCE_INIT == 0
-+/* Special case for static variables where we can avoid the initialization
-+ if it is zero. */
-+# define __libc_once_define(CLASS, NAME) \
-+ CLASS pthread_once_t NAME
-+#else
-+# define __libc_once_define(CLASS, NAME) \
-+ CLASS pthread_once_t NAME = PTHREAD_ONCE_INIT
-+#endif
-+
-+/* Call handler iff the first call. */
-+#define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \
-+ do { \
-+ if (PTFAVAIL (__pthread_once)) \
-+ __libc_ptf_call_always (__pthread_once, (&(ONCE_CONTROL), \
-+ INIT_FUNCTION)); \
-+ else if ((ONCE_CONTROL) == PTHREAD_ONCE_INIT) { \
-+ INIT_FUNCTION (); \
-+ (ONCE_CONTROL) |= 2; \
-+ } \
-+ } while (0)
-+
-+/* Get once control variable. */
-+#define __libc_once_get(ONCE_CONTROL) ((ONCE_CONTROL) != PTHREAD_ONCE_INIT)
-+
-+/* Note that for I/O cleanup handling we are using the old-style
-+ cancel handling. It does not have to be integrated with C++ snce
-+ no C++ code is called in the middle. The old-style handling is
-+ faster and the support is not going away. */
-+extern void _pthread_cleanup_push (struct _pthread_cleanup_buffer *buffer,
-+ void (*routine) (void *), void *arg);
-+extern void _pthread_cleanup_pop (struct _pthread_cleanup_buffer *buffer,
-+ int execute);
-+extern void _pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *buffer,
-+ void (*routine) (void *), void *arg);
-+extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer,
-+ int execute);
-+
-+/* Sometimes we have to exit the block in the middle. */
-+#define __libc_cleanup_end(DOIT) \
-+ if (_avail) { \
-+ __libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\
-+ } else if (DOIT) \
-+ _buffer.__routine (_buffer.__arg)
-+
-+
-+/* Normal cleanup handling, based on C cleanup attribute. */
-+__extern_inline void
-+__libc_cleanup_routine (struct __pthread_cleanup_frame *f)
-+{
-+ if (f->__do_it)
-+ f->__cancel_routine (f->__cancel_arg);
-+}
-+
-+#define __libc_cleanup_push(fct, arg) \
-+ do { \
-+ struct __pthread_cleanup_frame __clframe \
-+ __attribute__ ((__cleanup__ (__libc_cleanup_routine))) \
-+ = { .__cancel_routine = (fct), .__cancel_arg = (arg), \
-+ .__do_it = 1 };
-+
-+#define __libc_cleanup_pop(execute) \
-+ __clframe.__do_it = (execute); \
-+ } while (0)
-+
-+
-+/* Create thread-specific key. */
-+#define __libc_key_create(KEY, DESTRUCTOR) \
-+ __libc_ptf_call (__pthread_key_create, (KEY, DESTRUCTOR), 1)
-+
-+/* Get thread-specific data. */
-+#define __libc_getspecific(KEY) \
-+ __libc_ptf_call (__pthread_getspecific, (KEY), NULL)
-+
-+/* Set thread-specific data. */
-+#define __libc_setspecific(KEY, VALUE) \
-+ __libc_ptf_call (__pthread_setspecific, (KEY, VALUE), 0)
-+
-+
-+/* Register handlers to execute before and after `fork'. Note that the
-+ last parameter is NULL. The handlers registered by the libc are
-+ never removed so this is OK. */
-+#define __libc_atfork(PREPARE, PARENT, CHILD) \
-+ __register_atfork (PREPARE, PARENT, CHILD, NULL)
-+extern int __register_atfork (void (*__prepare) (void),
-+ void (*__parent) (void),
-+ void (*__child) (void),
-+ void *__dso_handle);
-+
-+/* Functions that are used by this file and are internal to the GNU C
-+ library. */
-+
-+extern int __pthread_mutex_init (pthread_mutex_t *__mutex,
-+ const pthread_mutexattr_t *__mutex_attr);
-+
-+extern int __pthread_mutex_destroy (pthread_mutex_t *__mutex);
-+
-+extern int __pthread_mutex_trylock (pthread_mutex_t *__mutex);
-+
-+extern int __pthread_mutex_lock (pthread_mutex_t *__mutex);
-+
-+extern int __pthread_mutex_unlock (pthread_mutex_t *__mutex);
-+
-+extern int __pthread_mutexattr_init (pthread_mutexattr_t *__attr);
-+
-+extern int __pthread_mutexattr_destroy (pthread_mutexattr_t *__attr);
-+
-+extern int __pthread_mutexattr_settype (pthread_mutexattr_t *__attr,
-+ int __kind);
-+
-+extern int __pthread_rwlock_init (pthread_rwlock_t *__rwlock,
-+ const pthread_rwlockattr_t *__attr);
-+
-+extern int __pthread_rwlock_destroy (pthread_rwlock_t *__rwlock);
-+
-+extern int __pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock);
-+
-+extern int __pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock);
-+
-+extern int __pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock);
-+
-+extern int __pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock);
-+
-+extern int __pthread_rwlock_unlock (pthread_rwlock_t *__rwlock);
-+
-+extern int __pthread_key_create (pthread_key_t *__key,
-+ void (*__destr_function) (void *));
-+
-+extern int __pthread_setspecific (pthread_key_t __key,
-+ const void *__pointer);
-+
-+extern void *__pthread_getspecific (pthread_key_t __key);
-+
-+extern int __pthread_once (pthread_once_t *__once_control,
-+ void (*__init_routine) (void));
-+
-+extern int __pthread_atfork (void (*__prepare) (void),
-+ void (*__parent) (void),
-+ void (*__child) (void));
-+
-+
-+
-+/* Make the pthread functions weak so that we can elide them from
-+ single-threaded processes. */
-+#ifndef __NO_WEAK_PTHREAD_ALIASES
-+# ifdef weak_extern
-+weak_extern (__pthread_mutex_init)
-+weak_extern (__pthread_mutex_destroy)
-+weak_extern (__pthread_mutex_lock)
-+weak_extern (__pthread_mutex_trylock)
-+weak_extern (__pthread_mutex_unlock)
-+weak_extern (__pthread_mutexattr_init)
-+weak_extern (__pthread_mutexattr_destroy)
-+weak_extern (__pthread_mutexattr_settype)
-+weak_extern (__pthread_rwlock_init)
-+weak_extern (__pthread_rwlock_destroy)
-+weak_extern (__pthread_rwlock_rdlock)
-+weak_extern (__pthread_rwlock_tryrdlock)
-+weak_extern (__pthread_rwlock_wrlock)
-+weak_extern (__pthread_rwlock_trywrlock)
-+weak_extern (__pthread_rwlock_unlock)
-+weak_extern (__pthread_key_create)
-+weak_extern (__pthread_setspecific)
-+weak_extern (__pthread_getspecific)
-+weak_extern (__pthread_once)
-+weak_extern (__pthread_initialize)
-+weak_extern (__pthread_atfork)
-+weak_extern (_pthread_cleanup_push_defer)
-+weak_extern (_pthread_cleanup_pop_restore)
-+weak_extern (pthread_setcancelstate)
-+# else
-+# pragma weak __pthread_mutex_init
-+# pragma weak __pthread_mutex_destroy
-+# pragma weak __pthread_mutex_lock
-+# pragma weak __pthread_mutex_trylock
-+# pragma weak __pthread_mutex_unlock
-+# pragma weak __pthread_mutexattr_init
-+# pragma weak __pthread_mutexattr_destroy
-+# pragma weak __pthread_mutexattr_settype
-+# pragma weak __pthread_rwlock_destroy
-+# pragma weak __pthread_rwlock_rdlock
-+# pragma weak __pthread_rwlock_tryrdlock
-+# pragma weak __pthread_rwlock_wrlock
-+# pragma weak __pthread_rwlock_trywrlock
-+# pragma weak __pthread_rwlock_unlock
-+# pragma weak __pthread_key_create
-+# pragma weak __pthread_setspecific
-+# pragma weak __pthread_getspecific
-+# pragma weak __pthread_once
-+# pragma weak __pthread_initialize
-+# pragma weak __pthread_atfork
-+# pragma weak _pthread_cleanup_push_defer
-+# pragma weak _pthread_cleanup_pop_restore
-+# pragma weak pthread_setcancelstate
-+# endif
-+#endif
-+
-+#endif /* bits/libc-lockP.h */
---- /dev/null
-+++ b/fbtl/sysdeps/pthread/bits/sigthread.h
-@@ -0,0 +1,43 @@
-+/* Signal handling function for threaded programs.
-+ Copyright (C) 1998-2013 Free Software Foundation, Inc.
-+ This file is part of the GNU C Library.
-+
-+ The GNU C Library is free software; you can redistribute it and/or
-+ modify it under the terms of the GNU Lesser General Public License as
-+ published by the Free Software Foundation; either version 2.1 of the
-+ License, or (at your option) any later version.
-+
-+ The GNU C Library is distributed in the hope that it will be useful,
-+ but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ Lesser General Public License for more details.
-+
-+ You should have received a copy of the GNU Lesser General Public
-+ License along with the GNU C Library; see the file COPYING.LIB. If
-+ not, see <http://www.gnu.org/licenses/>. */
-+
-+#ifndef _BITS_SIGTHREAD_H
-+#define _BITS_SIGTHREAD_H 1
-+
-+#if !defined _SIGNAL_H && !defined _PTHREAD_H
-+# error "Never include this file directly. Use <pthread.h> instead"
-+#endif
-+
-+/* Functions for handling signals. */
-+
-+/* Modify the signal mask for the calling thread. The arguments have
-+ the same meaning as for sigprocmask(2). */
-+extern int pthread_sigmask (int __how,
-+ const __sigset_t *__restrict __newmask,
-+ __sigset_t *__restrict __oldmask)__THROW;
-+
-+/* Send signal SIGNO to the given thread. */
-+extern int pthread_kill (pthread_t __threadid, int __signo) __THROW;
-+
-+#ifdef __USE_GNU
-+/* Queue signal and data to a thread. */
-+extern int pthread_sigqueue (pthread_t __threadid, int __signo,
-+ const union sigval __value) __THROW;
-+#endif
-+
-+#endif /* bits/sigthread.h */
---- /dev/null
-+++ b/fbtl/sysdeps/pthread/bits/stdio-lock.h
-@@ -0,0 +1,110 @@
-+/* Thread package specific definitions of stream lock type. NPTL version.
-+ Copyright (C) 2000-2013 Free Software Foundation, Inc.
-+ This file is part of the GNU C Library.
-+
-+ The GNU C Library is free software; you can redistribute it and/or
-+ modify it under the terms of the GNU Lesser General Public
-+ License as published by the Free Software Foundation; either
-+ version 2.1 of the License, or (at your option) any later version.
-+
-+ The GNU C Library is distributed in the hope that it will be useful,
-+ but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ Lesser General Public License for more details.
-+
-+ You should have received a copy of the GNU Lesser General Public
-+ License along with the GNU C Library; if not, see
-+ <http://www.gnu.org/licenses/>. */
-+
-+#ifndef _BITS_STDIO_LOCK_H
-+#define _BITS_STDIO_LOCK_H 1
-+
-+#include <bits/libc-lock.h>
-+#include <lowlevellock.h>
-+
-+
-+/* The locking here is very inexpensive, even for inlining. */
-+#define _IO_lock_inexpensive 1
-+
-+typedef struct { int lock; int cnt; void *owner; } _IO_lock_t;
-+
-+#define _IO_lock_initializer { LLL_LOCK_INITIALIZER, 0, NULL }
-+
-+#define _IO_lock_init(_name) \
-+ ((void) ((_name) = (_IO_lock_t) _IO_lock_initializer))
-+
-+#define _IO_lock_fini(_name) \
-+ ((void) 0)
-+
-+#define _IO_lock_lock(_name) \
-+ do { \
-+ void *__self = THREAD_SELF; \
-+ if ((_name).owner != __self) \
-+ { \
-+ lll_lock ((_name).lock, LLL_PRIVATE); \
-+ (_name).owner = __self; \
-+ } \
-+ ++(_name).cnt; \
-+ } while (0)
-+
-+#define _IO_lock_trylock(_name) \
-+ ({ \
-+ int __result = 0; \
-+ void *__self = THREAD_SELF; \
-+ if ((_name).owner != __self) \
-+ { \
-+ if (lll_trylock ((_name).lock) == 0) \
-+ { \
-+ (_name).owner = __self; \
-+ (_name).cnt = 1; \
-+ } \
-+ else \
-+ __result = EBUSY; \
-+ } \
-+ else \
-+ ++(_name).cnt; \
-+ __result; \
-+ })
-+
-+#define _IO_lock_unlock(_name) \
-+ do { \
-+ if (--(_name).cnt == 0) \
-+ { \
-+ (_name).owner = NULL; \
-+ lll_unlock ((_name).lock, LLL_PRIVATE); \
-+ } \
-+ } while (0)
-+
-+
-+
-+#define _IO_cleanup_region_start(_fct, _fp) \
-+ __libc_cleanup_region_start (((_fp)->_flags & _IO_USER_LOCK) == 0, _fct, _fp)
-+#define _IO_cleanup_region_start_noarg(_fct) \
-+ __libc_cleanup_region_start (1, _fct, NULL)
-+#define _IO_cleanup_region_end(_doit) \
-+ __libc_cleanup_region_end (_doit)
-+
-+#if defined _LIBC && IS_IN (libc)
-+
-+# ifdef __EXCEPTIONS
-+# define _IO_acquire_lock(_fp) \
-+ do { \
-+ _IO_FILE *_IO_acquire_lock_file \
-+ __attribute__((cleanup (_IO_acquire_lock_fct))) \
-+ = (_fp); \
-+ _IO_flockfile (_IO_acquire_lock_file);
-+# define _IO_acquire_lock_clear_flags2(_fp) \
-+ do { \
-+ _IO_FILE *_IO_acquire_lock_file \
-+ __attribute__((cleanup (_IO_acquire_lock_clear_flags2_fct))) \
-+ = (_fp); \
-+ _IO_flockfile (_IO_acquire_lock_file);
-+# else
-+# define _IO_acquire_lock(_fp) _IO_acquire_lock_needs_exceptions_enabled
-+# define _IO_acquire_lock_clear_flags2(_fp) _IO_acquire_lock (_fp)
-+# endif
-+# define _IO_release_lock(_fp) ; } while (0)
-+
-+#endif
-+
-+#endif /* bits/stdio-lock.h */
---- /dev/null
-+++ b/fbtl/sysdeps/pthread/createthread.c
-@@ -0,0 +1,298 @@
-+/* Copyright (C) 2002-2013 Free Software Foundation, Inc.
-+ This file is part of the GNU C Library.
-+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-+
-+ The GNU C Library is free software; you can redistribute it and/or
-+ modify it under the terms of the GNU Lesser General Public
-+ License as published by the Free Software Foundation; either
-+ version 2.1 of the License, or (at your option) any later version.
-+
-+ The GNU C Library is distributed in the hope that it will be useful,
-+ but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ Lesser General Public License for more details.
-+
-+ You should have received a copy of the GNU Lesser General Public
-+ License along with the GNU C Library; if not, see
-+ <http://www.gnu.org/licenses/>. */
-+
-+#include <sched.h>
-+#include <setjmp.h>
-+#include <signal.h>
-+#include <stdlib.h>
-+#include <atomic.h>
-+#include <ldsodefs.h>
-+#include <tls.h>
-+#include <stdint.h>
-+
-+#include "kernel-features.h"
-+
-+
-+#define CLONE_SIGNAL (CLONE_SIGHAND | CLONE_THREAD)
-+
-+/* Unless otherwise specified, the thread "register" is going to be
-+ initialized with a pointer to the TCB. */
-+#ifndef TLS_VALUE
-+# define TLS_VALUE pd
-+#endif
-+
-+#ifndef ARCH_CLONE
-+# define ARCH_CLONE __clone
-+#endif
-+
-+
-+#ifndef TLS_MULTIPLE_THREADS_IN_TCB
-+/* Pointer to the corresponding variable in libc. */
-+int *__libc_multiple_threads_ptr attribute_hidden;
-+#endif
-+
-+
-+struct rtprio;
-+struct thr_param {
-+ void (*start_func)(void *); /* thread entry function. */
-+ void *arg; /* argument for entry function. */
-+ char *stack_base; /* stack base address. */
-+ size_t stack_size; /* stack size. */
-+ char *tls_base; /* tls base address. */
-+ size_t tls_size; /* tls size. */
-+ long *child_tid; /* address to store new TID. */
-+ long *parent_tid; /* parent accesses the new TID here. */
-+ int flags; /* thread flags. */
-+ struct rtprio *rtp; /* Real-time scheduling priority */
-+ void *spare[3]; /* TODO: cpu affinity mask etc. */
-+};
-+
-+
-+static int
-+do_clone (struct pthread *pd, const struct pthread_attr *attr,
-+ int clone_flags, int (*fct) (void *), STACK_VARIABLES_PARMS,
-+ int stopped)
-+{
-+#ifdef PREPARE_CREATE
-+ PREPARE_CREATE;
-+#endif
-+
-+ struct thr_param p;
-+
-+ if (__builtin_expect (stopped != 0, 0))
-+ /* We make sure the thread does not run far by forcing it to get a
-+ lock. We lock it here too so that the new thread cannot continue
-+ until we tell it to. */
-+ lll_lock (pd->lock, LLL_PRIVATE);
-+
-+ /* One more thread. We cannot have the thread do this itself, since it
-+ might exist but not have been scheduled yet by the time we've returned
-+ and need to check the value to behave correctly. We must do it before
-+ creating the thread, in case it does get scheduled first and then
-+ might mistakenly think it was the only thread. In the failure case,
-+ we momentarily store a false value; this doesn't matter because there
-+ is no kosher thing a signal handler interrupting us right here can do
-+ that cares whether the thread count is correct. */
-+ atomic_increment (&__nptl_nthreads);
-+#if 0
-+ int rc = ARCH_CLONE (fct, STACK_VARIABLES_ARGS, clone_flags,
-+ pd, &pd->tid, TLS_VALUE, &pd->tid);
-+
-+#else
-+ memset(&p, 0, sizeof(p));
-+ p.start_func = fct;
-+ p.arg = pd;
-+ p.stack_base = stackaddr; /* first in STACK_VARIABLES_ARGS */
-+ p.stack_size = stacksize; /* second in STACK_VARIABLES_ARGS */
-+ p.tls_base = (char*)pd;
-+ p.child_tid = &(pd->ktid);
-+
-+ int rc = INLINE_SYSCALL(thr_new, 2, &p, sizeof(p));
-+
-+ if (rc)
-+ {
-+ errno = rc;
-+ rc = -1;;
-+ }
-+#endif
-+
-+
-+ if (__builtin_expect (rc == -1, 0))
-+ {
-+ atomic_decrement (&__nptl_nthreads); /* Oops, we lied for a second. */
-+ pd->ktid = 0;
-+
-+ /* Perhaps a thread wants to change the IDs and if waiting
-+ for this stillborn thread. */
-+ if (__builtin_expect (atomic_exchange_acq (&pd->setxid_futex, 0)
-+ == -2, 0))
-+ lll_futex_wake (&pd->setxid_futex, 1, LLL_PRIVATE);
-+
-+ /* Free the resources. */
-+ __deallocate_stack (pd);
-+
-+ /* We have to translate error codes. */
-+ return errno == ENOMEM ? EAGAIN : errno;
-+ }
-+#warning set scheduling parameters
-+#if 0
-+ /* Now we have the possibility to set scheduling parameters etc. */
-+ if (__builtin_expect (stopped != 0, 0))
-+ {
-+ INTERNAL_SYSCALL_DECL (err);
-+ int res = 0;
-+
-+ /* Set the affinity mask if necessary. */
-+ if (attr->cpuset != NULL)
-+ {
-+ res = INTERNAL_SYSCALL (sched_setaffinity, err, 3, pd->tid,
-+ attr->cpusetsize, attr->cpuset);
-+
-+ if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
-+ {
-+ /* The operation failed. We have to kill the thread. First
-+ send it the cancellation signal. */
-+ INTERNAL_SYSCALL_DECL (err2);
-+ err_out:
-+ (void) INTERNAL_SYSCALL (tgkill, err2, 3,
-+ THREAD_GETMEM (THREAD_SELF, pid),
-+ pd->tid, SIGCANCEL);
-+
-+ /* We do not free the stack here because the canceled thread
-+ itself will do this. */
-+
-+ return (INTERNAL_SYSCALL_ERROR_P (res, err)
-+ ? INTERNAL_SYSCALL_ERRNO (res, err)
-+ : 0);
-+ }
-+ }
++ return (INTERNAL_SYSCALL_ERROR_P (res, err)
++ ? INTERNAL_SYSCALL_ERRNO (res, err)
++ : 0);
++ }
++ }
+
+ /* Set the scheduling parameters. */
+ if ((attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0)
@@ -32286,263 +31547,894 @@
+ lll_unlock (pd->lock, LLL_PRIVATE);
+ }
+
-+ return res;
-+ }
-+ }
++ return res;
++ }
++ }
++
++#ifdef NEED_DL_SYSINFO
++ assert (THREAD_SELF_SYSINFO == THREAD_SYSINFO (pd));
++#endif
++
++ /* Determine whether the newly created threads has to be started
++ stopped since we have to set the scheduling parameters or set the
++ affinity. */
++ bool stopped = false;
++ if (attr != NULL && (attr->cpuset != NULL
++ || (attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0))
++ stopped = true;
++ pd->stopped_start = stopped;
++ pd->parent_cancelhandling = THREAD_GETMEM (THREAD_SELF, cancelhandling);
++
++ /* Actually create the thread. */
++ int res = do_clone (pd, attr, clone_flags, start_thread,
++ STACK_VARIABLES_ARGS, stopped);
++
++ if (res == 0 && stopped)
++ /* And finally restart the new thread. */
++ lll_unlock (pd->lock, LLL_PRIVATE);
++
++ return res;
++}
+--- /dev/null
++++ b/fbtl/sysdeps/pthread/flockfile.c
+@@ -0,0 +1,32 @@
++/* Copyright (C) 2002-2013 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ <http://www.gnu.org/licenses/>. */
++
++#include <pthread.h>
++#include <stdio.h>
++#include <libio.h>
++#include <stdio-lock.h>
++
++
++void
++__flockfile (stream)
++ FILE *stream;
++{
++ _IO_lock_lock (*stream->_lock);
++}
++strong_alias (__flockfile, _IO_flockfile)
++weak_alias (__flockfile, flockfile)
+--- /dev/null
++++ b/fbtl/sysdeps/pthread/ftrylockfile.c
+@@ -0,0 +1,32 @@
++/* Copyright (C) 2002-2013 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ <http://www.gnu.org/licenses/>. */
++
++#include <errno.h>
++#include <pthread.h>
++#include <stdio.h>
++#include <stdio-lock.h>
++
++
++int
++__ftrylockfile (stream)
++ FILE *stream;
++{
++ return _IO_lock_trylock (*stream->_lock);
++}
++strong_alias (__ftrylockfile, _IO_ftrylockfile)
++weak_alias (__ftrylockfile, ftrylockfile)
+--- /dev/null
++++ b/fbtl/sysdeps/pthread/funlockfile.c
+@@ -0,0 +1,32 @@
++/* Copyright (C) 2002-2013 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ <http://www.gnu.org/licenses/>. */
++
++#include <pthread.h>
++#include <stdio.h>
++#include <libio.h>
++#include <stdio-lock.h>
++
++
++void
++__funlockfile (stream)
++ FILE *stream;
++{
++ _IO_lock_unlock (*stream->_lock);
++}
++strong_alias (__funlockfile, _IO_funlockfile)
++weak_alias (__funlockfile, funlockfile)
+--- /dev/null
++++ b/fbtl/sysdeps/pthread/gai_misc.h
+@@ -0,0 +1,121 @@
++/* Copyright (C) 2006-2013 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ <http://www.gnu.org/licenses/>. */
++
++/* We define a special synchronization primitive for AIO. POSIX
++ conditional variables would be ideal but the pthread_cond_*wait
++ operations do not return on EINTR. This is a requirement for
++ correct aio_suspend and lio_listio implementations. */
++
++#include <assert.h>
++#include <signal.h>
++#include <pthreadP.h>
++#include <lowlevellock.h>
++
++#define DONT_NEED_GAI_MISC_COND 1
++
++#define GAI_MISC_NOTIFY(waitlist) \
++ do { \
++ if (*waitlist->counterp > 0 && --*waitlist->counterp == 0) \
++ lll_futex_wake ((unsigned int *) waitlist->counterp, 1, LLL_PRIVATE); \
++ } while (0)
++
++#define GAI_MISC_WAIT(result, futex, timeout, cancel) \
++ do { \
++ volatile int *futexaddr = &futex; \
++ int oldval = futex; \
++ \
++ if (oldval != 0) \
++ { \
++ pthread_mutex_unlock (&__gai_requests_mutex); \
++ \
++ int oldtype; \
++ if (cancel) \
++ oldtype = LIBC_CANCEL_ASYNC (); \
++ \
++ int status; \
++ do \
++ { \
++ status = lll_futex_timed_wait ((unsigned int *) futexaddr, oldval,\
++ timeout, LLL_PRIVATE); \
++ if (status != EWOULDBLOCK) \
++ break; \
++ \
++ oldval = *futexaddr; \
++ } \
++ while (oldval != 0); \
++ \
++ if (cancel) \
++ LIBC_CANCEL_RESET (oldtype); \
++ \
++ if (status == EINTR) \
++ result = EINTR; \
++ else if (status == ETIMEDOUT) \
++ result = EAGAIN; \
++ else \
++ assert (status == 0 || status == EWOULDBLOCK); \
++ \
++ pthread_mutex_lock (&__gai_requests_mutex); \
++ } \
++ } while (0)
++
++
++#define gai_start_notify_thread __gai_start_notify_thread
++#define gai_create_helper_thread __gai_create_helper_thread
++
++extern inline void
++__gai_start_notify_thread (void)
++{
++ sigset_t ss;
++ sigemptyset (&ss);
++ INTERNAL_SYSCALL_DECL (err);
++ INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_SETMASK, &ss, NULL, _NSIG / 8);
++}
++
++extern inline int
++__gai_create_helper_thread (pthread_t *threadp, void *(*tf) (void *),
++ void *arg)
++{
++ pthread_attr_t attr;
++
++ /* Make sure the thread is created detached. */
++ pthread_attr_init (&attr);
++ pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
++
++ /* The helper thread needs only very little resources. */
++ (void) pthread_attr_setstacksize (&attr,
++ __pthread_get_minstack (&attr)
++ + 4 * PTHREAD_STACK_MIN);
++
++ /* Block all signals in the helper thread. To do this thoroughly we
++ temporarily have to block all signals here. */
++ sigset_t ss;
++ sigset_t oss;
++ sigfillset (&ss);
++ INTERNAL_SYSCALL_DECL (err);
++ INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_SETMASK, &ss, &oss, _NSIG / 8);
++
++ int ret = pthread_create (threadp, &attr, tf, arg);
++
++ /* Restore the signal mask. */
++ INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_SETMASK, &oss, NULL,
++ _NSIG / 8);
++
++ (void) pthread_attr_destroy (&attr);
++ return ret;
++}
++
++#include_next <gai_misc.h>
+--- /dev/null
++++ b/fbtl/sysdeps/pthread/libc-lock.h
+@@ -0,0 +1,187 @@
++/* libc-internal interface for mutex locks. NPTL version.
++ Copyright (C) 1996-2013 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public License as
++ published by the Free Software Foundation; either version 2.1 of the
++ License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; see the file COPYING.LIB. If
++ not, see <http://www.gnu.org/licenses/>. */
++
++#ifndef _BITS_LIBC_LOCK_H
++#define _BITS_LIBC_LOCK_H 1
++
++#include <pthread.h>
++#define __need_NULL
++#include <stddef.h>
++
++
++/* Mutex type. */
++#if defined _LIBC || defined _IO_MTSAFE_IO
++# if (!IS_IN (libc) && !IS_IN (libpthread)) || !defined _LIBC
++typedef struct { pthread_mutex_t mutex; } __libc_lock_recursive_t;
++# else
++typedef struct { int lock; int cnt; void *owner; } __libc_lock_recursive_t;
++# endif
++#else
++typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t;
++#endif
++
++/* Define a lock variable NAME with storage class CLASS. The lock must be
++ initialized with __libc_lock_init before it can be used (or define it
++ with __libc_lock_define_initialized, below). Use `extern' for CLASS to
++ declare a lock defined in another module. In public structure
++ definitions you must use a pointer to the lock structure (i.e., NAME
++ begins with a `*'), because its storage size will not be known outside
++ of libc. */
++#define __libc_lock_define_recursive(CLASS,NAME) \
++ CLASS __libc_lock_recursive_t NAME;
++
++/* Define an initialized recursive lock variable NAME with storage
++ class CLASS. */
++#if defined _LIBC && (IS_IN (libc) || IS_IN (libpthread))
++# define __libc_lock_define_initialized_recursive(CLASS,NAME) \
++ CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
++# define _LIBC_LOCK_RECURSIVE_INITIALIZER \
++ { LLL_LOCK_INITIALIZER, 0, NULL }
++#else
++# define __libc_lock_define_initialized_recursive(CLASS,NAME) \
++ CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
++# define _LIBC_LOCK_RECURSIVE_INITIALIZER \
++ {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
++#endif
++
++/* Initialize a recursive mutex. */
++#if defined _LIBC && (IS_IN (libc) || IS_IN (libpthread))
++# define __libc_lock_init_recursive(NAME) \
++ ((void) ((NAME) = (__libc_lock_recursive_t) _LIBC_LOCK_RECURSIVE_INITIALIZER))
++#else
++# define __libc_lock_init_recursive(NAME) \
++ do { \
++ if (__pthread_mutex_init != NULL) \
++ { \
++ pthread_mutexattr_t __attr; \
++ __pthread_mutexattr_init (&__attr); \
++ __pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP); \
++ __pthread_mutex_init (&(NAME).mutex, &__attr); \
++ __pthread_mutexattr_destroy (&__attr); \
++ } \
++ } while (0)
++#endif
++
++/* Finalize recursive named lock. */
++#if defined _LIBC && (IS_IN (libc) || IS_IN (libpthread))
++# define __libc_lock_fini_recursive(NAME) ((void) 0)
++#else
++# define __libc_lock_fini_recursive(NAME) \
++ __libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0)
++#endif
++
++/* Lock the recursive named lock variable. */
++#if defined _LIBC && (IS_IN (libc) || IS_IN (libpthread))
++# define __libc_lock_lock_recursive(NAME) \
++ do { \
++ void *self = THREAD_SELF; \
++ if ((NAME).owner != self) \
++ { \
++ lll_lock ((NAME).lock, LLL_PRIVATE); \
++ (NAME).owner = self; \
++ } \
++ ++(NAME).cnt; \
++ } while (0)
++#else
++# define __libc_lock_lock_recursive(NAME) \
++ __libc_maybe_call (__pthread_mutex_lock, (&(NAME).mutex), 0)
++#endif
++
++/* Try to lock the recursive named lock variable. */
++#if defined _LIBC && (IS_IN (libc) || IS_IN (libpthread))
++# define __libc_lock_trylock_recursive(NAME) \
++ ({ \
++ int result = 0; \
++ void *self = THREAD_SELF; \
++ if ((NAME).owner != self) \
++ { \
++ if (lll_trylock ((NAME).lock) == 0) \
++ { \
++ (NAME).owner = self; \
++ (NAME).cnt = 1; \
++ } \
++ else \
++ result = EBUSY; \
++ } \
++ else \
++ ++(NAME).cnt; \
++ result; \
++ })
++#else
++# define __libc_lock_trylock_recursive(NAME) \
++ __libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
++#endif
++
++/* Unlock the recursive named lock variable. */
++#if defined _LIBC && (IS_IN (libc) || IS_IN (libpthread))
++/* We do no error checking here. */
++# define __libc_lock_unlock_recursive(NAME) \
++ do { \
++ if (--(NAME).cnt == 0) \
++ { \
++ (NAME).owner = NULL; \
++ lll_unlock ((NAME).lock, LLL_PRIVATE); \
++ } \
++ } while (0)
++#else
++# define __libc_lock_unlock_recursive(NAME) \
++ __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
++#endif
++
++/* Note that for I/O cleanup handling we are using the old-style
++ cancel handling. It does not have to be integrated with C++ since
++ no C++ code is called in the middle. The old-style handling is
++ faster and the support is not going away. */
++extern void _pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *buffer,
++ void (*routine) (void *), void *arg);
++extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer,
++ int execute);
++
++/* Start critical region with cleanup. */
++#define __libc_cleanup_region_start(DOIT, FCT, ARG) \
++ { struct _pthread_cleanup_buffer _buffer; \
++ int _avail; \
++ if (DOIT) { \
++ _avail = PTFAVAIL (_pthread_cleanup_push_defer); \
++ if (_avail) { \
++ __libc_ptf_call_always (_pthread_cleanup_push_defer, (&_buffer, FCT, \
++ ARG)); \
++ } else { \
++ _buffer.__routine = (FCT); \
++ _buffer.__arg = (ARG); \
++ } \
++ } else { \
++ _avail = 0; \
++ }
++
++/* End critical region with cleanup. */
++#define __libc_cleanup_region_end(DOIT) \
++ if (_avail) { \
++ __libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\
++ } else if (DOIT) \
++ _buffer.__routine (_buffer.__arg); \
++ }
++
++
++/* Hide the definitions which are only supposed to be used inside libc in
++ a separate file. This file is not present in the installation! */
++#ifdef _LIBC
++# include "libc-lockP.h"
++#endif
++
++#endif /* libc-lock.h */
+--- /dev/null
++++ b/fbtl/sysdeps/pthread/libc-lockP.h
+@@ -0,0 +1,438 @@
++/* Private libc-internal interface for mutex locks. NPTL version.
++ Copyright (C) 1996-2013 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public License as
++ published by the Free Software Foundation; either version 2.1 of the
++ License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; see the file COPYING.LIB. If
++ not, see <http://www.gnu.org/licenses/>. */
++
++#ifndef _BITS_LIBC_LOCKP_H
++#define _BITS_LIBC_LOCKP_H 1
++
++#include <pthread.h>
++#define __need_NULL
++#include <stddef.h>
++
++
++/* Fortunately Linux now has a mean to do locking which is realtime
++ safe without the aid of the thread library. We also need no fancy
++ options like error checking mutexes etc. We only need simple
++ locks, maybe recursive. This can be easily and cheaply implemented
++ using futexes. We will use them everywhere except in ld.so since
++ ld.so might be used on old kernels with a different libc.so. */
++#include <lowlevellock.h>
++#include <tls.h>
++#include <pthread-functions.h>
++
++#if IS_IN (libpthread)
++/* This gets us the declarations of the __pthread_* internal names,
++ and hidden_proto for them. */
++# include <fbtl/pthreadP.h>
++#endif
++
++/* Mutex type. */
++#if !IS_IN (libc) && !IS_IN (libpthread)
++typedef pthread_mutex_t __libc_lock_t;
++#else
++typedef int __libc_lock_t;
++#endif
++typedef struct { pthread_mutex_t mutex; } __rtld_lock_recursive_t;
++typedef pthread_rwlock_t __libc_rwlock_t;
++
++/* Type for key to thread-specific data. */
++typedef pthread_key_t __libc_key_t;
++
++/* Define a lock variable NAME with storage class CLASS. The lock must be
++ initialized with __libc_lock_init before it can be used (or define it
++ with __libc_lock_define_initialized, below). Use `extern' for CLASS to
++ declare a lock defined in another module. In public structure
++ definitions you must use a pointer to the lock structure (i.e., NAME
++ begins with a `*'), because its storage size will not be known outside
++ of libc. */
++#define __libc_lock_define(CLASS,NAME) \
++ CLASS __libc_lock_t NAME;
++#define __libc_rwlock_define(CLASS,NAME) \
++ CLASS __libc_rwlock_t NAME;
++#define __rtld_lock_define_recursive(CLASS,NAME) \
++ CLASS __rtld_lock_recursive_t NAME;
++
++/* Define an initialized lock variable NAME with storage class CLASS.
++
++ For the C library we take a deeper look at the initializer. For
++ this implementation all fields are initialized to zero. Therefore
++ we don't initialize the variable which allows putting it into the
++ BSS section. (Except on PA-RISC and other odd architectures, where
++ initialized locks must be set to one due to the lack of normal
++ atomic operations.) */
++
++#define _LIBC_LOCK_INITIALIZER LLL_LOCK_INITIALIZER
++#if IS_IN (libc) || IS_IN (libpthread)
++# if LLL_LOCK_INITIALIZER == 0
++# define __libc_lock_define_initialized(CLASS,NAME) \
++ CLASS __libc_lock_t NAME;
++# else
++# define __libc_lock_define_initialized(CLASS,NAME) \
++ CLASS __libc_lock_t NAME = LLL_LOCK_INITIALIZER;
++# endif
++#else
++# define __libc_lock_define_initialized(CLASS,NAME) \
++ CLASS __libc_lock_t NAME;
++#endif
++
++#define __libc_rwlock_define_initialized(CLASS,NAME) \
++ CLASS __libc_rwlock_t NAME = PTHREAD_RWLOCK_INITIALIZER;
++
++#define __rtld_lock_define_initialized_recursive(CLASS,NAME) \
++ CLASS __rtld_lock_recursive_t NAME = _RTLD_LOCK_RECURSIVE_INITIALIZER;
++#define _RTLD_LOCK_RECURSIVE_INITIALIZER \
++ {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
++
++#define __rtld_lock_initialize(NAME) \
++ (void) ((NAME) = (__rtld_lock_recursive_t) _RTLD_LOCK_RECURSIVE_INITIALIZER)
++
++/* If we check for a weakly referenced symbol and then perform a
++ normal jump to it te code generated for some platforms in case of
++ PIC is unnecessarily slow. What would happen is that the function
++ is first referenced as data and then it is called indirectly
++ through the PLT. We can make this a direct jump. */
++#ifdef __PIC__
++# define __libc_maybe_call(FUNC, ARGS, ELSE) \
++ (__extension__ ({ __typeof (FUNC) *_fn = (FUNC); \
++ _fn != NULL ? (*_fn) ARGS : ELSE; }))
++#else
++# define __libc_maybe_call(FUNC, ARGS, ELSE) \
++ (FUNC != NULL ? FUNC ARGS : ELSE)
++#endif
++
++/* Call thread functions through the function pointer table. */
++#if defined SHARED && IS_IN (libc)
++# define PTFAVAIL(NAME) __libc_pthread_functions_init
++# define __libc_ptf_call(FUNC, ARGS, ELSE) \
++ (__libc_pthread_functions_init ? PTHFCT_CALL (ptr_##FUNC, ARGS) : ELSE)
++# define __libc_ptf_call_always(FUNC, ARGS) \
++ PTHFCT_CALL (ptr_##FUNC, ARGS)
++#elif IS_IN (libpthread)
++# define PTFAVAIL(NAME) 1
++# define __libc_ptf_call(FUNC, ARGS, ELSE) \
++ FUNC ARGS
++# define __libc_ptf_call_always(FUNC, ARGS) \
++ FUNC ARGS
++#else
++# define PTFAVAIL(NAME) (NAME != NULL)
++# define __libc_ptf_call(FUNC, ARGS, ELSE) \
++ __libc_maybe_call (FUNC, ARGS, ELSE)
++# define __libc_ptf_call_always(FUNC, ARGS) \
++ FUNC ARGS
++#endif
++
++
++/* Initialize the named lock variable, leaving it in a consistent, unlocked
++ state. */
++#if IS_IN (libc) || IS_IN (libpthread)
++# define __libc_lock_init(NAME) \
++ ((void) ((NAME) = LLL_LOCK_INITIALIZER))
++#else
++# define __libc_lock_init(NAME) \
++ __libc_maybe_call (__pthread_mutex_init, (&(NAME), NULL), 0)
++#endif
++#if defined SHARED && IS_IN (libc)
++/* ((NAME) = (__libc_rwlock_t) PTHREAD_RWLOCK_INITIALIZER) is inefficient. */
++# define __libc_rwlock_init(NAME) \
++ ((void) __builtin_memset (&(NAME), '\0', sizeof (NAME)))
++#else
++# define __libc_rwlock_init(NAME) \
++ __libc_maybe_call (__pthread_rwlock_init, (&(NAME), NULL), 0)
++#endif
++
++/* Finalize the named lock variable, which must be locked. It cannot be
++ used again until __libc_lock_init is called again on it. This must be
++ called on a lock variable before the containing storage is reused. */
++#if IS_IN (libc) || IS_IN (libpthread)
++# define __libc_lock_fini(NAME) ((void) 0)
++#else
++# define __libc_lock_fini(NAME) \
++ __libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0)
++#endif
++#if defined SHARED && IS_IN (libc)
++# define __libc_rwlock_fini(NAME) ((void) 0)
++#else
++# define __libc_rwlock_fini(NAME) \
++ __libc_maybe_call (__pthread_rwlock_destroy, (&(NAME)), 0)
++#endif
++
++/* Lock the named lock variable. */
++#if IS_IN (libc) || IS_IN (libpthread)
++# ifndef __libc_lock_lock
++# define __libc_lock_lock(NAME) \
++ ({ lll_lock (NAME, LLL_PRIVATE); 0; })
++# endif
++#else
++# undef __libc_lock_lock
++# define __libc_lock_lock(NAME) \
++ __libc_maybe_call (__pthread_mutex_lock, (&(NAME)), 0)
++#endif
++#define __libc_rwlock_rdlock(NAME) \
++ __libc_ptf_call (__pthread_rwlock_rdlock, (&(NAME)), 0)
++#define __libc_rwlock_wrlock(NAME) \
++ __libc_ptf_call (__pthread_rwlock_wrlock, (&(NAME)), 0)
++
++/* Try to lock the named lock variable. */
++#if IS_IN (libc) || IS_IN (libpthread)
++# ifndef __libc_lock_trylock
++# define __libc_lock_trylock(NAME) \
++ lll_trylock (NAME)
++# endif
++#else
++# undef __libc_lock_trylock
++# define __libc_lock_trylock(NAME) \
++ __libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
++#endif
++#define __libc_rwlock_tryrdlock(NAME) \
++ __libc_maybe_call (__pthread_rwlock_tryrdlock, (&(NAME)), 0)
++#define __libc_rwlock_trywrlock(NAME) \
++ __libc_maybe_call (__pthread_rwlock_trywrlock, (&(NAME)), 0)
++
++#define __rtld_lock_trylock_recursive(NAME) \
++ __libc_maybe_call (__pthread_mutex_trylock, (&(NAME).mutex), 0)
++
++/* Unlock the named lock variable. */
++#if IS_IN (libc) || IS_IN (libpthread)
++# define __libc_lock_unlock(NAME) \
++ lll_unlock (NAME, LLL_PRIVATE)
++#else
++# define __libc_lock_unlock(NAME) \
++ __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
++#endif
++#define __libc_rwlock_unlock(NAME) \
++ __libc_ptf_call (__pthread_rwlock_unlock, (&(NAME)), 0)
++
++#ifdef SHARED
++# define __rtld_lock_default_lock_recursive(lock) \
++ ++((pthread_mutex_t *)(lock))->__data.__count;
+
-+#ifdef NEED_DL_SYSINFO
-+ assert (THREAD_SELF_SYSINFO == THREAD_SYSINFO (pd));
-+#endif
++# define __rtld_lock_default_unlock_recursive(lock) \
++ --((pthread_mutex_t *)(lock))->__data.__count;
+
-+ /* Determine whether the newly created threads has to be started
-+ stopped since we have to set the scheduling parameters or set the
-+ affinity. */
-+ bool stopped = false;
-+ if (attr != NULL && (attr->cpuset != NULL
-+ || (attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0))
-+ stopped = true;
-+ pd->stopped_start = stopped;
-+ pd->parent_cancelhandling = THREAD_GETMEM (THREAD_SELF, cancelhandling);
++# define __rtld_lock_lock_recursive(NAME) \
++ GL(dl_rtld_lock_recursive) (&(NAME).mutex)
+
-+ /* Actually create the thread. */
-+ int res = do_clone (pd, attr, clone_flags, start_thread,
-+ STACK_VARIABLES_ARGS, stopped);
++# define __rtld_lock_unlock_recursive(NAME) \
++ GL(dl_rtld_unlock_recursive) (&(NAME).mutex)
++#else
++# define __rtld_lock_lock_recursive(NAME) \
++ __libc_maybe_call (__pthread_mutex_lock, (&(NAME).mutex), 0)
+
-+ if (res == 0 && stopped)
-+ /* And finally restart the new thread. */
-+ lll_unlock (pd->lock, LLL_PRIVATE);
++# define __rtld_lock_unlock_recursive(NAME) \
++ __libc_maybe_call (__pthread_mutex_unlock, (&(NAME).mutex), 0)
++#endif
+
-+ return res;
-+}
---- /dev/null
-+++ b/fbtl/sysdeps/pthread/flockfile.c
-@@ -0,0 +1,32 @@
-+/* Copyright (C) 2002-2013 Free Software Foundation, Inc.
-+ This file is part of the GNU C Library.
-+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
++/* Define once control variable. */
++#if PTHREAD_ONCE_INIT == 0
++/* Special case for static variables where we can avoid the initialization
++ if it is zero. */
++# define __libc_once_define(CLASS, NAME) \
++ CLASS pthread_once_t NAME
++#else
++# define __libc_once_define(CLASS, NAME) \
++ CLASS pthread_once_t NAME = PTHREAD_ONCE_INIT
++#endif
+
-+ The GNU C Library is free software; you can redistribute it and/or
-+ modify it under the terms of the GNU Lesser General Public
-+ License as published by the Free Software Foundation; either
-+ version 2.1 of the License, or (at your option) any later version.
++/* Call handler iff the first call. */
++#define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \
++ do { \
++ if (PTFAVAIL (__pthread_once)) \
++ __libc_ptf_call_always (__pthread_once, (&(ONCE_CONTROL), \
++ INIT_FUNCTION)); \
++ else if ((ONCE_CONTROL) == PTHREAD_ONCE_INIT) { \
++ INIT_FUNCTION (); \
++ (ONCE_CONTROL) |= 2; \
++ } \
++ } while (0)
+
-+ The GNU C Library is distributed in the hope that it will be useful,
-+ but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ Lesser General Public License for more details.
++/* Get once control variable. */
++#define __libc_once_get(ONCE_CONTROL) ((ONCE_CONTROL) != PTHREAD_ONCE_INIT)
+
-+ You should have received a copy of the GNU Lesser General Public
-+ License along with the GNU C Library; if not, see
-+ <http://www.gnu.org/licenses/>. */
++/* Note that for I/O cleanup handling we are using the old-style
++ cancel handling. It does not have to be integrated with C++ snce
++ no C++ code is called in the middle. The old-style handling is
++ faster and the support is not going away. */
++extern void _pthread_cleanup_push (struct _pthread_cleanup_buffer *buffer,
++ void (*routine) (void *), void *arg);
++extern void _pthread_cleanup_pop (struct _pthread_cleanup_buffer *buffer,
++ int execute);
++extern void _pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *buffer,
++ void (*routine) (void *), void *arg);
++extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer,
++ int execute);
+
-+#include <pthread.h>
-+#include <stdio.h>
-+#include <libio.h>
-+#include <bits/stdio-lock.h>
++/* Sometimes we have to exit the block in the middle. */
++#define __libc_cleanup_end(DOIT) \
++ if (_avail) { \
++ __libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\
++ } else if (DOIT) \
++ _buffer.__routine (_buffer.__arg)
+
+
-+void
-+__flockfile (stream)
-+ FILE *stream;
++/* Normal cleanup handling, based on C cleanup attribute. */
++__extern_inline void
++__libc_cleanup_routine (struct __pthread_cleanup_frame *f)
+{
-+ _IO_lock_lock (*stream->_lock);
++ if (f->__do_it)
++ f->__cancel_routine (f->__cancel_arg);
+}
-+strong_alias (__flockfile, _IO_flockfile)
-+weak_alias (__flockfile, flockfile)
---- /dev/null
-+++ b/fbtl/sysdeps/pthread/ftrylockfile.c
-@@ -0,0 +1,32 @@
-+/* Copyright (C) 2002-2013 Free Software Foundation, Inc.
-+ This file is part of the GNU C Library.
-+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
-+ The GNU C Library is free software; you can redistribute it and/or
-+ modify it under the terms of the GNU Lesser General Public
-+ License as published by the Free Software Foundation; either
-+ version 2.1 of the License, or (at your option) any later version.
++#define __libc_cleanup_push(fct, arg) \
++ do { \
++ struct __pthread_cleanup_frame __clframe \
++ __attribute__ ((__cleanup__ (__libc_cleanup_routine))) \
++ = { .__cancel_routine = (fct), .__cancel_arg = (arg), \
++ .__do_it = 1 };
+
-+ The GNU C Library is distributed in the hope that it will be useful,
-+ but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ Lesser General Public License for more details.
++#define __libc_cleanup_pop(execute) \
++ __clframe.__do_it = (execute); \
++ } while (0)
+
-+ You should have received a copy of the GNU Lesser General Public
-+ License along with the GNU C Library; if not, see
-+ <http://www.gnu.org/licenses/>. */
+
-+#include <errno.h>
-+#include <pthread.h>
-+#include <stdio.h>
-+#include <bits/stdio-lock.h>
++/* Create thread-specific key. */
++#define __libc_key_create(KEY, DESTRUCTOR) \
++ __libc_ptf_call (__pthread_key_create, (KEY, DESTRUCTOR), 1)
+
++/* Get thread-specific data. */
++#define __libc_getspecific(KEY) \
++ __libc_ptf_call (__pthread_getspecific, (KEY), NULL)
+
-+int
-+__ftrylockfile (stream)
-+ FILE *stream;
-+{
-+ return _IO_lock_trylock (*stream->_lock);
-+}
-+strong_alias (__ftrylockfile, _IO_ftrylockfile)
-+weak_alias (__ftrylockfile, ftrylockfile)
---- /dev/null
-+++ b/fbtl/sysdeps/pthread/funlockfile.c
-@@ -0,0 +1,32 @@
-+/* Copyright (C) 2002-2013 Free Software Foundation, Inc.
-+ This file is part of the GNU C Library.
-+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
++/* Set thread-specific data. */
++#define __libc_setspecific(KEY, VALUE) \
++ __libc_ptf_call (__pthread_setspecific, (KEY, VALUE), 0)
+
-+ The GNU C Library is free software; you can redistribute it and/or
-+ modify it under the terms of the GNU Lesser General Public
-+ License as published by the Free Software Foundation; either
-+ version 2.1 of the License, or (at your option) any later version.
+
-+ The GNU C Library is distributed in the hope that it will be useful,
-+ but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ Lesser General Public License for more details.
++/* Register handlers to execute before and after `fork'. Note that the
++ last parameter is NULL. The handlers registered by the libc are
++ never removed so this is OK. */
++#define __libc_atfork(PREPARE, PARENT, CHILD) \
++ __register_atfork (PREPARE, PARENT, CHILD, NULL)
++extern int __register_atfork (void (*__prepare) (void),
++ void (*__parent) (void),
++ void (*__child) (void),
++ void *__dso_handle);
+
-+ You should have received a copy of the GNU Lesser General Public
-+ License along with the GNU C Library; if not, see
-+ <http://www.gnu.org/licenses/>. */
++/* Functions that are used by this file and are internal to the GNU C
++ library. */
+
-+#include <pthread.h>
-+#include <stdio.h>
-+#include <libio.h>
-+#include <bits/stdio-lock.h>
++extern int __pthread_mutex_init (pthread_mutex_t *__mutex,
++ const pthread_mutexattr_t *__mutex_attr);
+
++extern int __pthread_mutex_destroy (pthread_mutex_t *__mutex);
+
-+void
-+__funlockfile (stream)
-+ FILE *stream;
-+{
-+ _IO_lock_unlock (*stream->_lock);
-+}
-+strong_alias (__funlockfile, _IO_funlockfile)
-+weak_alias (__funlockfile, funlockfile)
---- /dev/null
-+++ b/fbtl/sysdeps/pthread/gai_misc.h
-@@ -0,0 +1,121 @@
-+/* Copyright (C) 2006-2013 Free Software Foundation, Inc.
-+ This file is part of the GNU C Library.
++extern int __pthread_mutex_trylock (pthread_mutex_t *__mutex);
+
-+ The GNU C Library is free software; you can redistribute it and/or
-+ modify it under the terms of the GNU Lesser General Public
-+ License as published by the Free Software Foundation; either
-+ version 2.1 of the License, or (at your option) any later version.
++extern int __pthread_mutex_lock (pthread_mutex_t *__mutex);
+
-+ The GNU C Library is distributed in the hope that it will be useful,
-+ but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ Lesser General Public License for more details.
++extern int __pthread_mutex_unlock (pthread_mutex_t *__mutex);
+
-+ You should have received a copy of the GNU Lesser General Public
-+ License along with the GNU C Library; if not, see
-+ <http://www.gnu.org/licenses/>. */
++extern int __pthread_mutexattr_init (pthread_mutexattr_t *__attr);
+
-+/* We define a special synchronization primitive for AIO. POSIX
-+ conditional variables would be ideal but the pthread_cond_*wait
-+ operations do not return on EINTR. This is a requirement for
-+ correct aio_suspend and lio_listio implementations. */
++extern int __pthread_mutexattr_destroy (pthread_mutexattr_t *__attr);
+
-+#include <assert.h>
-+#include <signal.h>
-+#include <pthreadP.h>
-+#include <lowlevellock.h>
++extern int __pthread_mutexattr_settype (pthread_mutexattr_t *__attr,
++ int __kind);
+
-+#define DONT_NEED_GAI_MISC_COND 1
++extern int __pthread_rwlock_init (pthread_rwlock_t *__rwlock,
++ const pthread_rwlockattr_t *__attr);
+
-+#define GAI_MISC_NOTIFY(waitlist) \
-+ do { \
-+ if (*waitlist->counterp > 0 && --*waitlist->counterp == 0) \
-+ lll_futex_wake ((unsigned int *) waitlist->counterp, 1, LLL_PRIVATE); \
-+ } while (0)
++extern int __pthread_rwlock_destroy (pthread_rwlock_t *__rwlock);
+
-+#define GAI_MISC_WAIT(result, futex, timeout, cancel) \
-+ do { \
-+ volatile int *futexaddr = &futex; \
-+ int oldval = futex; \
-+ \
-+ if (oldval != 0) \
-+ { \
-+ pthread_mutex_unlock (&__gai_requests_mutex); \
-+ \
-+ int oldtype; \
-+ if (cancel) \
-+ oldtype = LIBC_CANCEL_ASYNC (); \
-+ \
-+ int status; \
-+ do \
-+ { \
-+ status = lll_futex_timed_wait ((unsigned int *) futexaddr, oldval,\
-+ timeout, LLL_PRIVATE); \
-+ if (status != EWOULDBLOCK) \
-+ break; \
-+ \
-+ oldval = *futexaddr; \
-+ } \
-+ while (oldval != 0); \
-+ \
-+ if (cancel) \
-+ LIBC_CANCEL_RESET (oldtype); \
-+ \
-+ if (status == EINTR) \
-+ result = EINTR; \
-+ else if (status == ETIMEDOUT) \
-+ result = EAGAIN; \
-+ else \
-+ assert (status == 0 || status == EWOULDBLOCK); \
-+ \
-+ pthread_mutex_lock (&__gai_requests_mutex); \
-+ } \
-+ } while (0)
++extern int __pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock);
+
++extern int __pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock);
+
-+#define gai_start_notify_thread __gai_start_notify_thread
-+#define gai_create_helper_thread __gai_create_helper_thread
++extern int __pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock);
+
-+extern inline void
-+__gai_start_notify_thread (void)
-+{
-+ sigset_t ss;
-+ sigemptyset (&ss);
-+ INTERNAL_SYSCALL_DECL (err);
-+ INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_SETMASK, &ss, NULL, _NSIG / 8);
-+}
++extern int __pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock);
+
-+extern inline int
-+__gai_create_helper_thread (pthread_t *threadp, void *(*tf) (void *),
-+ void *arg)
-+{
-+ pthread_attr_t attr;
++extern int __pthread_rwlock_unlock (pthread_rwlock_t *__rwlock);
+
-+ /* Make sure the thread is created detached. */
-+ pthread_attr_init (&attr);
-+ pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
++extern int __pthread_key_create (pthread_key_t *__key,
++ void (*__destr_function) (void *));
+
-+ /* The helper thread needs only very little resources. */
-+ (void) pthread_attr_setstacksize (&attr,
-+ __pthread_get_minstack (&attr)
-+ + 4 * PTHREAD_STACK_MIN);
++extern int __pthread_setspecific (pthread_key_t __key,
++ const void *__pointer);
+
-+ /* Block all signals in the helper thread. To do this thoroughly we
-+ temporarily have to block all signals here. */
-+ sigset_t ss;
-+ sigset_t oss;
-+ sigfillset (&ss);
-+ INTERNAL_SYSCALL_DECL (err);
-+ INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_SETMASK, &ss, &oss, _NSIG / 8);
++extern void *__pthread_getspecific (pthread_key_t __key);
+
-+ int ret = pthread_create (threadp, &attr, tf, arg);
++extern int __pthread_once (pthread_once_t *__once_control,
++ void (*__init_routine) (void));
+
-+ /* Restore the signal mask. */
-+ INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_SETMASK, &oss, NULL,
-+ _NSIG / 8);
++extern int __pthread_atfork (void (*__prepare) (void),
++ void (*__parent) (void),
++ void (*__child) (void));
+
-+ (void) pthread_attr_destroy (&attr);
-+ return ret;
-+}
++extern int __pthread_setcancelstate (int state, int *oldstate);
+
-+#include_next <gai_misc.h>
++
++/* Make the pthread functions weak so that we can elide them from
++ single-threaded processes. */
++#ifndef __NO_WEAK_PTHREAD_ALIASES
++# ifdef weak_extern
++weak_extern (__pthread_mutex_init)
++weak_extern (__pthread_mutex_destroy)
++weak_extern (__pthread_mutex_lock)
++weak_extern (__pthread_mutex_trylock)
++weak_extern (__pthread_mutex_unlock)
++weak_extern (__pthread_mutexattr_init)
++weak_extern (__pthread_mutexattr_destroy)
++weak_extern (__pthread_mutexattr_settype)
++weak_extern (__pthread_rwlock_init)
++weak_extern (__pthread_rwlock_destroy)
++weak_extern (__pthread_rwlock_rdlock)
++weak_extern (__pthread_rwlock_tryrdlock)
++weak_extern (__pthread_rwlock_wrlock)
++weak_extern (__pthread_rwlock_trywrlock)
++weak_extern (__pthread_rwlock_unlock)
++weak_extern (__pthread_key_create)
++weak_extern (__pthread_setspecific)
++weak_extern (__pthread_getspecific)
++weak_extern (__pthread_once)
++weak_extern (__pthread_initialize)
++weak_extern (__pthread_atfork)
++weak_extern (__pthread_setcancelstate)
++weak_extern (_pthread_cleanup_push_defer)
++weak_extern (_pthread_cleanup_pop_restore)
++# else
++# pragma weak __pthread_mutex_init
++# pragma weak __pthread_mutex_destroy
++# pragma weak __pthread_mutex_lock
++# pragma weak __pthread_mutex_trylock
++# pragma weak __pthread_mutex_unlock
++# pragma weak __pthread_mutexattr_init
++# pragma weak __pthread_mutexattr_destroy
++# pragma weak __pthread_mutexattr_settype
++# pragma weak __pthread_rwlock_destroy
++# pragma weak __pthread_rwlock_rdlock
++# pragma weak __pthread_rwlock_tryrdlock
++# pragma weak __pthread_rwlock_wrlock
++# pragma weak __pthread_rwlock_trywrlock
++# pragma weak __pthread_rwlock_unlock
++# pragma weak __pthread_key_create
++# pragma weak __pthread_setspecific
++# pragma weak __pthread_getspecific
++# pragma weak __pthread_once
++# pragma weak __pthread_initialize
++# pragma weak __pthread_atfork
++# pragma weak __pthread_setcancelstate
++# pragma weak _pthread_cleanup_push_defer
++# pragma weak _pthread_cleanup_pop_restore
++# endif
++#endif
++
++#endif /* libc-lockP.h */
--- /dev/null
+++ b/fbtl/sysdeps/pthread/librt-cancellation.c
@@ -0,0 +1,24 @@
@@ -32692,7 +32584,7 @@
+#endif /* list.h */
--- /dev/null
+++ b/fbtl/sysdeps/pthread/malloc-machine.h
-@@ -0,0 +1,73 @@
+@@ -0,0 +1,62 @@
+/* Basic platform-independent macro definitions for mutexes,
+ thread-specific data and parameters for malloc.
+ Copyright (C) 2003-2013 Free Software Foundation, Inc.
@@ -32718,7 +32610,7 @@
+#undef thread_atfork_static
+
+#include <atomic.h>
-+#include <bits/libc-lock.h>
++#include <libc-lock.h>
+
+__libc_lock_define (typedef, mutex_t)
+
@@ -32726,7 +32618,6 @@
+#define mutex_lock(m) __libc_lock_lock (*(m))
+#define mutex_trylock(m) __libc_lock_trylock (*(m))
+#define mutex_unlock(m) __libc_lock_unlock (*(m))
-+#define MUTEX_INITIALIZER LLL_LOCK_INITIALIZER
+
+/* This is defined by newer gcc version unique for each module. */
+extern void *__dso_handle __attribute__ ((__weak__));
@@ -32753,16 +32644,6 @@
+ __linkin_atfork (&atfork_mem)
+#endif
+
-+/* thread specific data for glibc */
-+
-+#include <bits/libc-tsd.h>
-+
-+typedef int tsd_key_t[1]; /* no key data structure, libc magic does it */
-+__libc_tsd_define (static, void *, MALLOC) /* declaration/common definition */
-+#define tsd_key_create(key, destr) ((void) (key))
-+#define tsd_setspecific(key, data) __libc_tsd_set (void *, MALLOC, (data))
-+#define tsd_getspecific(key, vptr) ((vptr) = __libc_tsd_get (void *, MALLOC))
-+
+#include <sysdeps/generic/malloc-machine.h>
+
+#endif /* !defined(_MALLOC_MACHINE_H) */
@@ -33076,7 +32957,7 @@
+ int (*ptr_pthread_mutex_lock) (pthread_mutex_t *);
+ int (*ptr_pthread_mutex_unlock) (pthread_mutex_t *);
+ pthread_t (*ptr_pthread_self) (void);
-+ int (*ptr_pthread_setcancelstate) (int, int *);
++ int (*ptr___pthread_setcancelstate) (int, int *);
+ int (*ptr_pthread_setcanceltype) (int, int *);
+ void (*ptr___pthread_cleanup_upto) (__jmp_buf, char *);
+ int (*ptr___pthread_once) (pthread_once_t *, void (*) (void));
@@ -34501,6 +34382,119 @@
+#include <fbtl/pthreadP.h>
+#include <sysdeps/unix/bsd/sigprocmask.c>
--- /dev/null
++++ b/fbtl/sysdeps/pthread/stdio-lock.h
+@@ -0,0 +1,110 @@
++/* Thread package specific definitions of stream lock type. NPTL version.
++ Copyright (C) 2000-2013 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ <http://www.gnu.org/licenses/>. */
++
++#ifndef _STDIO_LOCK_H
++#define _STDIO_LOCK_H 1
++
++#include <libc-lock.h>
++#include <lowlevellock.h>
++
++
++/* The locking here is very inexpensive, even for inlining. */
++#define _IO_lock_inexpensive 1
++
++typedef struct { int lock; int cnt; void *owner; } _IO_lock_t;
++
++#define _IO_lock_initializer { LLL_LOCK_INITIALIZER, 0, NULL }
++
++#define _IO_lock_init(_name) \
++ ((void) ((_name) = (_IO_lock_t) _IO_lock_initializer))
++
++#define _IO_lock_fini(_name) \
++ ((void) 0)
++
++#define _IO_lock_lock(_name) \
++ do { \
++ void *__self = THREAD_SELF; \
++ if ((_name).owner != __self) \
++ { \
++ lll_lock ((_name).lock, LLL_PRIVATE); \
++ (_name).owner = __self; \
++ } \
++ ++(_name).cnt; \
++ } while (0)
++
++#define _IO_lock_trylock(_name) \
++ ({ \
++ int __result = 0; \
++ void *__self = THREAD_SELF; \
++ if ((_name).owner != __self) \
++ { \
++ if (lll_trylock ((_name).lock) == 0) \
++ { \
++ (_name).owner = __self; \
++ (_name).cnt = 1; \
++ } \
++ else \
++ __result = EBUSY; \
++ } \
++ else \
++ ++(_name).cnt; \
++ __result; \
++ })
++
++#define _IO_lock_unlock(_name) \
++ do { \
++ if (--(_name).cnt == 0) \
++ { \
++ (_name).owner = NULL; \
++ lll_unlock ((_name).lock, LLL_PRIVATE); \
++ } \
++ } while (0)
++
++
++
++#define _IO_cleanup_region_start(_fct, _fp) \
++ __libc_cleanup_region_start (((_fp)->_flags & _IO_USER_LOCK) == 0, _fct, _fp)
++#define _IO_cleanup_region_start_noarg(_fct) \
++ __libc_cleanup_region_start (1, _fct, NULL)
++#define _IO_cleanup_region_end(_doit) \
++ __libc_cleanup_region_end (_doit)
++
++#if defined _LIBC && IS_IN (libc)
++
++# ifdef __EXCEPTIONS
++# define _IO_acquire_lock(_fp) \
++ do { \
++ _IO_FILE *_IO_acquire_lock_file \
++ __attribute__((cleanup (_IO_acquire_lock_fct))) \
++ = (_fp); \
++ _IO_flockfile (_IO_acquire_lock_file);
++# define _IO_acquire_lock_clear_flags2(_fp) \
++ do { \
++ _IO_FILE *_IO_acquire_lock_file \
++ __attribute__((cleanup (_IO_acquire_lock_clear_flags2_fct))) \
++ = (_fp); \
++ _IO_flockfile (_IO_acquire_lock_file);
++# else
++# define _IO_acquire_lock(_fp) _IO_acquire_lock_needs_exceptions_enabled
++# define _IO_acquire_lock_clear_flags2(_fp) _IO_acquire_lock (_fp)
++# endif
++# define _IO_release_lock(_fp) ; } while (0)
++
++#endif
++
++#endif /* stdio-lock.h */
+--- /dev/null
+++ b/fbtl/sysdeps/pthread/tcb-offsets.h
@@ -0,0 +1 @@
+/* This is overridden by generated tcb-offsets.h on arches which need it. */
@@ -36110,7 +36104,7 @@
+ ({ register char *frame __asm__("rsp"); frame; })
--- /dev/null
+++ b/fbtl/sysdeps/x86_64/tcb-offsets.sym
-@@ -0,0 +1,28 @@
+@@ -0,0 +1,27 @@
+#include <sysdep.h>
+#include <tls.h>
+
@@ -36128,7 +36122,6 @@
+#ifndef __ASSUME_PRIVATE_FUTEX
+PRIVATE_FUTEX offsetof (tcbhead_t, private_futex)
+#endif
-+RTLD_SAVESPACE_SSE offsetof (tcbhead_t, rtld_savespace_sse)
+
+-- Not strictly offsets, but these values are also used in the TCB.
+TCB_CANCELSTATE_BITMASK CANCELSTATE_BITMASK
@@ -36141,7 +36134,7 @@
+TCB_PTHREAD_CANCELED PTHREAD_CANCELED
--- /dev/null
+++ b/fbtl/sysdeps/x86_64/tls.h
-@@ -0,0 +1,423 @@
+@@ -0,0 +1,389 @@
+/* Definition for thread-local data handling. nptl/x86_64 version.
+ Copyright (C) 2002-2013 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
@@ -36209,16 +36202,17 @@
+# ifndef __ASSUME_PRIVATE_FUTEX
+ int private_futex;
+# else
-+ int __unused1;
++ int __glibc_reserved1;
+# endif
-+ int rtld_must_xmm_save;
++ int __glibc_unused1;
+ /* Reservation of some values for the TM ABI. */
+ void *__private_tm[4];
+ /* GCC split stack support. */
+ void *__private_ss;
-+ long int __unused2;
-+ /* Have space for the post-AVX register size. */
-+ __128bits rtld_savespace_sse[8][4] __attribute__ ((aligned (32)));
++ long int __glibc_reserved2;
++ /* Must be kept even if it is no longer used by glibc since programs,
++ like AddressSanitizer, depend on the size of tcbhead_t. */
++ __128bits __glibc_unused2[8][4] __attribute__ ((aligned (32)));
+
+ void *__padding[8];
+} tcbhead_t;
@@ -36527,45 +36521,34 @@
+# define THREAD_GSCOPE_WAIT() \
+ GL(dl_wait_lookup_done) ()
+
-+
-+# ifdef SHARED
-+/* Defined in dl-trampoline.S. */
-+extern void _dl_x86_64_save_sse (void);
-+extern void _dl_x86_64_restore_sse (void);
-+
-+# define RTLD_CHECK_FOREIGN_CALL \
-+ (THREAD_GETMEM (THREAD_SELF, header.rtld_must_xmm_save) != 0)
-+
-+/* NB: Don't use the xchg operation because that would imply a lock
-+ prefix which is expensive and unnecessary. The cache line is also
-+ not contested at all. */
-+# define RTLD_ENABLE_FOREIGN_CALL \
-+ int old_rtld_must_xmm_save = THREAD_GETMEM (THREAD_SELF, \
-+ header.rtld_must_xmm_save); \
-+ THREAD_SETMEM (THREAD_SELF, header.rtld_must_xmm_save, 1)
-+
-+# define RTLD_PREPARE_FOREIGN_CALL \
-+ do if (THREAD_GETMEM (THREAD_SELF, header.rtld_must_xmm_save)) \
-+ { \
-+ _dl_x86_64_save_sse (); \
-+ THREAD_SETMEM (THREAD_SELF, header.rtld_must_xmm_save, 0); \
-+ } \
-+ while (0)
-+
-+# define RTLD_FINALIZE_FOREIGN_CALL \
-+ do { \
-+ if (THREAD_GETMEM (THREAD_SELF, header.rtld_must_xmm_save) == 0) \
-+ _dl_x86_64_restore_sse (); \
-+ THREAD_SETMEM (THREAD_SELF, header.rtld_must_xmm_save, \
-+ old_rtld_must_xmm_save); \
-+ } while (0)
-+# endif
-+
-+
+#endif /* __ASSEMBLER__ */
+
+#endif /* tls.h */
--- /dev/null
++++ b/fbtl/sysdeps/x86_64/tls.h.rej
+@@ -0,0 +1,21 @@
++--- fbtl/sysdeps/x86_64/tls.h
+++++ fbtl/sysdeps/x86_64/tls.h
++@@ -67,14 +67,15 @@ typedef struct
++ # else
++ int __glibc_reserved1;
++ # endif
++- int rtld_must_xmm_save;
+++ int __glibc_unused1;
++ /* Reservation of some values for the TM ABI. */
++ void *__private_tm[4];
++ /* GCC split stack support. */
++ void *__private_ss;
++ long int __glibc_reserved2;
++- /* Have space for the post-AVX register size. */
++- __128bits rtld_savespace_sse[8][4] __attribute__ ((aligned (32)));
+++ /* Must be kept even if it is no longer used by glibc since programs,
+++ like AddressSanitizer, depend on the size of tcbhead_t. */
+++ __128bits __glibc_unused2[8][4] __attribute__ ((aligned (32)));
++
++ void *__padding[8];
++ } tcbhead_t;
+--- /dev/null
+++ b/fbtl/tpp.c
@@ -0,0 +1,171 @@
+/* Thread Priority Protect helpers.
@@ -54497,9 +54480,9 @@
+ if (rwl_writer.__data.__flags
+ != PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP)
+ return 6;
-+ /* <bits/libc-lock.h> __libc_rwlock_init definition for libc.so
++ /* <libc-lock.h> __libc_rwlock_init definition for libc.so
+ relies on PTHREAD_RWLOCK_INITIALIZER being all zeros. If
-+ that ever changes, <bits/libc-lock.h> needs updating. */
++ that ever changes, <libc-lock.h> needs updating. */
+ size_t i;
+ for (i = 0; i < sizeof (rwl_normal); i++)
+ if (((char *) &rwl_normal)[i] != '\0')
diff --git a/debian/patches/kfreebsd/local-sysdeps.diff b/debian/patches/kfreebsd/local-sysdeps.diff
index 2197ba2..21604d6 100644
--- a/debian/patches/kfreebsd/local-sysdeps.diff
+++ b/debian/patches/kfreebsd/local-sysdeps.diff
@@ -8188,7 +8188,7 @@
+strong_alias (__clock_settime, clock_settime)
--- /dev/null
+++ b/sysdeps/unix/bsd/bsd4.4/kfreebsd/fbtl/fatal-prepare.h
-@@ -0,0 +1,37 @@
+@@ -0,0 +1,24 @@
+/* Copyright (C) 2003-2013 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
@@ -8210,22 +8210,9 @@
+
+/* We have to completely disable cancellation. assert() must not be a
+ cancellation point but the implementation uses write() etc. */
-+#ifdef SHARED
-+# include <pthread-functions.h>
-+# define FATAL_PREPARE \
-+ { \
-+ if (__libc_pthread_functions_init) \
-+ PTHFCT_CALL (ptr_pthread_setcancelstate, (PTHREAD_CANCEL_DISABLE, \
-+ NULL)); \
-+ }
-+#else
-+# pragma weak pthread_setcancelstate
-+# define FATAL_PREPARE \
-+ { \
-+ if (pthread_setcancelstate != NULL) \
-+ pthread_setcancelstate (PTHREAD_CANCEL_DISABLE, NULL); \
-+ }
-+#endif
++#define FATAL_PREPARE \
++ __libc_ptf_call (__pthread_setcancelstate, \
++ (PTHREAD_CANCEL_DISABLE, NULL), 0)
--- /dev/null
+++ b/sysdeps/unix/bsd/bsd4.4/kfreebsd/fbtl/fork.c
@@ -0,0 +1,231 @@
@@ -8257,7 +8244,7 @@
+#include "fork.h"
+#include <hp-timing.h>
+#include <ldsodefs.h>
-+#include <bits/stdio-lock.h>
++#include <stdio-lock.h>
+#include <atomic.h>
+#include <pthreadP.h>
+
@@ -9081,7 +9068,7 @@
+#include <tls.h>
+#include <string.h>
+#include <pthreadP.h>
-+#include <bits/libc-lock.h>
++#include <libc-lock.h>
+#include <sysdep.h>
+#include <ldsodefs.h>
+
@@ -26710,7 +26697,7 @@
+#include <stdio.h>
+#include <string.h>
+#include <sys/statfs.h>
-+#include <bits/libc-lock.h>
++#include <libc-lock.h>
+
+/* Mount point of the shared memory filesystem. */
+static struct
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/pkg-glibc/glibc.git
Reply to: