[Date Prev][Date Next] [Thread Prev][Thread Next] [Date Index] [Thread Index]

r927 - in linux-kernel-headers/branches/lkh-branch-2.6.12/debian: . patches



Author: gotom
Date: 2005-06-12 16:01:23 +0000 (Sun, 12 Jun 2005)
New Revision: 927

Modified:
   linux-kernel-headers/branches/lkh-branch-2.6.12/debian/changelog
   linux-kernel-headers/branches/lkh-branch-2.6.12/debian/patches/byteorder-inline.patch
Log:
    - debian/patches/byteorder-inline.patch: Enclose __STRICT_ANSI__ and
      __GNUC__ for __cpu_to[lb]e64p and __[lb]e64_to_cpup correctly.



Modified: linux-kernel-headers/branches/lkh-branch-2.6.12/debian/changelog
===================================================================
--- linux-kernel-headers/branches/lkh-branch-2.6.12/debian/changelog	2005-06-12 15:59:43 UTC (rev 926)
+++ linux-kernel-headers/branches/lkh-branch-2.6.12/debian/changelog	2005-06-12 16:01:23 UTC (rev 927)
@@ -17,6 +17,9 @@
     - debian/patches/byteorder-const.patch: Move asm-ppc64 part to
       asm-ppc64-swab64.patch.
 
+    - debian/patches/byteorder-inline.patch: Enclose __STRICT_ANSI__ and
+      __GNUC__ for __cpu_to[lb]e64p and __[lb]e64_to_cpup correctly.
+
     - debian/patches/ioctl-typecheck.patch: Add to make impotent _IOC_TYPECHECK
       checking macro for userland, to fix incompatible ioctl number in switch
       case statement with g++ 3.4 and later.  Proposed patch by Jeremy

Modified: linux-kernel-headers/branches/lkh-branch-2.6.12/debian/patches/byteorder-inline.patch
===================================================================
--- linux-kernel-headers/branches/lkh-branch-2.6.12/debian/patches/byteorder-inline.patch	2005-06-12 15:59:43 UTC (rev 926)
+++ linux-kernel-headers/branches/lkh-branch-2.6.12/debian/patches/byteorder-inline.patch	2005-06-12 16:01:23 UTC (rev 927)
@@ -1,13 +1,15 @@
 2005-05-05 gotom, Add to use __inline__ instead of inline directly for -ansi.
-Don't define __cpu_to[lb]64p and __[lb]64_to_cpup under __STRICT_ANSI__.
+Don't define __cpu_to[lb]e64p and __[lb]e64_to_cpup under __STRICT_ANSI__.
+2005-06-12 gotom, Updated.
 
---- include.orig/linux/byteorder/little_endian.h	2005-04-21 09:03:16.000000000 +0900
-+++ include/linux/byteorder/little_endian.h	2005-05-05 20:11:10.183350481 +0900
-@@ -40,51 +40,53 @@
+--- include.orig/linux/byteorder/little_endian.h	2005-05-25 12:31:20.000000000 +0900
++++ include/linux/byteorder/little_endian.h	2005-06-12 12:54:00.604863276 +0900
+@@ -40,51 +40,55 @@
  #define __cpu_to_be16(x) ((__force __be16)__swab16((x)))
  #define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x))
  
 -static inline __le64 __cpu_to_le64p(const __u64 *p)
++#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
 +static __inline__ __le64 __cpu_to_le64p(const __u64 *p)
  {
  	return (__force __le64)*p;
@@ -18,6 +20,7 @@
  	return (__force __u64)*p;
  }
 -static inline __le32 __cpu_to_le32p(const __u32 *p)
++#endif
 +static __inline__ __le32 __cpu_to_le32p(const __u32 *p)
  {
  	return (__force __le32)*p;
@@ -38,7 +41,7 @@
  	return (__force __u16)*p;
  }
 -static inline __be64 __cpu_to_be64p(const __u64 *p)
-+#if !defined(__STRICT_ANSI__)
++#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
 +static __inline__ __be64 __cpu_to_be64p(const __u64 *p)
  {
  	return (__force __be64)__swab64p(p);
@@ -69,13 +72,14 @@
  {
  	return __swab16p((__u16 *)p);
  }
---- include.orig/linux/byteorder/big_endian.h	2005-04-21 09:03:16.000000000 +0900
-+++ include/linux/byteorder/big_endian.h	2005-05-05 20:15:32.041497892 +0900
-@@ -40,51 +40,53 @@
+--- include.orig/linux/byteorder/big_endian.h	2005-05-25 12:31:20.000000000 +0900
++++ include/linux/byteorder/big_endian.h	2005-06-12 12:54:05.224244239 +0900
+@@ -40,51 +40,55 @@
  #define __cpu_to_be16(x) ((__force __be16)(__u16)(x))
  #define __be16_to_cpu(x) ((__force __u16)(__be16)(x))
  
 -static inline __le64 __cpu_to_le64p(const __u64 *p)
++#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
 +static __inline__ __le64 __cpu_to_le64p(const __u64 *p)
  {
  	return (__force __le64)__swab64p(p);
@@ -86,6 +90,7 @@
  	return __swab64p((__u64 *)p);
  }
 -static inline __le32 __cpu_to_le32p(const __u32 *p)
++#endif
 +static __inline__ __le32 __cpu_to_le32p(const __u32 *p)
  {
  	return (__force __le32)__swab32p(p);
@@ -106,7 +111,7 @@
  	return __swab16p((__u16 *)p);
  }
 -static inline __be64 __cpu_to_be64p(const __u64 *p)
-+#if !defined(__STRICT_ANSI__)
++#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
 +static __inline__ __be64 __cpu_to_be64p(const __u64 *p)
  {
  	return (__force __be64)*p;



Reply to: