--- Begin Message ---
- To: Debian Bug Tracking System <submit@bugs.debian.org>
- Subject: unblock: lzo2/2.08-1.2
- From: Jonathan Wiltshire <jmw@debian.org>
- Date: Sat, 27 Dec 2014 12:13:55 +0000
- Message-id: <20141227121355.18441.9031.reportbug@lupin.home.powdarrmonkey.net>
Package: release.debian.org
Severity: normal
Tags: d-i
User: release.debian.org@packages.debian.org
Usertags: unblock
Please unblock package lzo2
Simon McVittie NMUd lzo2 to fix a crash in openvpn, RC bug #757037. The
attached debdiff covers -1.1 and -1.2 since the first fix was incomplete.
Unblocked, but needs a kibi-ack for the udeb please.
unblock-udeb lzo2/2.08-1.2
-- System Information:
Debian Release: 8.0
APT prefers testing
APT policy: (990, 'testing'), (500, 'testing-proposed-updates'), (500, 'unstable'), (500, 'stable'), (1, 'experimental')
Architecture: amd64 (x86_64)
Foreign Architectures: i386
Kernel: Linux 3.16.0-4-amd64 (SMP w/4 CPU cores)
Locale: LANG=en_GB.utf8, LC_CTYPE=en_GB.utf8 (charmap=UTF-8)
Shell: /bin/sh linked to /bin/dash
Init: systemd (via /run/systemd/system)
diff -Nru lzo2-2.08/debian/changelog lzo2-2.08/debian/changelog
--- lzo2-2.08/debian/changelog 2014-07-15 01:03:18.000000000 +0000
+++ lzo2-2.08/debian/changelog 2014-12-20 17:50:47.000000000 +0000
@@ -1,3 +1,26 @@
+lzo2 (2.08-1.2) unstable; urgency=low
+
+ * Non-maintainer upload.
+ * Adjust patch from previous upload so the "modern C" code path still
+ defines some typedefs: lzo_memops_TU1p is a pointer to unsigned byte
+ (used by the byteswapping implementation on non-powerpc big-endian
+ architectures), and lzo_memops_TU2p and lzo_memops_TU4p
+ are pointers to unsigned 2- and 4-byte quantities (needed by the
+ powerpc assembler implementation). Together, these fix FTBFS on
+ big-endian platforms. (Closes: #773580)
+
+ -- Simon McVittie <smcv@debian.org> Sat, 20 Dec 2014 17:50:38 +0000
+
+lzo2 (2.08-1.1) unstable; urgency=low
+
+ * Non-maintainer upload.
+ * Replace liblzo's reinvention of memcpy() with calls to memcpy().
+ gcc already knows how to inline memcpy calls with constant n,
+ and also gets the alignment constraints right, avoiding incorrect
+ unaligned accesses on armel (Closes: #757037)
+
+ -- Simon McVittie <smcv@debian.org> Tue, 16 Dec 2014 23:35:36 +0000
+
lzo2 (2.08-1) unstable; urgency=low
* New upstream release (closes: #752861) (CVE-2014-4607)
diff -Nru lzo2-2.08/debian/patches/0001-Conditionally-replace-reinvention-of-memcpy-with-cal.patch lzo2-2.08/debian/patches/0001-Conditionally-replace-reinvention-of-memcpy-with-cal.patch
--- lzo2-2.08/debian/patches/0001-Conditionally-replace-reinvention-of-memcpy-with-cal.patch 1970-01-01 00:00:00.000000000 +0000
+++ lzo2-2.08/debian/patches/0001-Conditionally-replace-reinvention-of-memcpy-with-cal.patch 2014-12-20 17:50:47.000000000 +0000
@@ -0,0 +1,366 @@
+From: Simon McVittie <smcv@debian.org>
+Date: Sat, 20 Dec 2014 17:50:27 +0000
+Subject: Conditionally replace reinvention of memcpy() with calls to memcpy()
+
+gcc already knows how to inline memcpy calls with constant n,
+and also gets the alignment constraints right, avoiding incorrect
+unaligned accesses on armel.
+
+Unconditionally define LZO_MEMOPS_GET_NE64 since it's trivial
+to do in terms of LZO_MEMOPS_COPY8.
+
+I've made the "modern C" version conditional since lzo seems to aim
+to be portable to anything and everything, but it would probably
+be better off just requiring a compiler from this century and
+a set of correctly working memwhatever() implementations.
+
+Bug-Debian: https://bugs.debian.org/757037
+---
+ minilzo/minilzo.c | 76 ++++++++++++++++++++++++++++++++++++++++++++++---------
+ src/lzo_conf.h | 2 --
+ src/lzo_func.h | 71 +++++++++++++++++++++++++++++++++++++++++++--------
+ 3 files changed, 125 insertions(+), 24 deletions(-)
+
+diff --git a/minilzo/minilzo.c b/minilzo/minilzo.c
+index ab2be5f..146b383 100644
+--- a/minilzo/minilzo.c
++++ b/minilzo/minilzo.c
+@@ -3354,6 +3354,49 @@ lzo_bitops_unused_funcs(void)
+ LZO_UNUSED_FUNC(lzo_bitops_unused_funcs);
+ }
+
++/* Modern compilers know that memcpy() and memset() with constant n can be
++ * inlined, and do so without violating alignment constraints on e.g. ARMv5,
++ * unlike the macros below. */
++#if LZO_CFG_MODERN_C+0
++
++/* ISO C says char pointers of any signedness can alias anything
++ * (C11 draft 1570, paragraph 6.5.7) so they are safe for this use */
++typedef unsigned char *lzo_memops_TU1p;
++
++/* Used by powerpc assembler implementations of byteswapping */
++#if (LZO_OPT_UNALIGNED16)
++typedef lzo_uint16_t __lzo_may_alias lzo_memops_TU2;
++typedef lzo_memops_TU2 *lzo_memops_TU2p;
++#endif
++
++/* Used by powerpc assembler implementations of byteswapping */
++#if (LZO_OPT_UNALIGNED32)
++typedef lzo_uint32_t __lzo_may_alias lzo_memops_TU4;
++typedef lzo_memops_TU4 *lzo_memops_TU4p;
++#endif
++
++#define LZO_MEMOPS_SET1(dd,cc) memset(dd, cc, 1)
++#define LZO_MEMOPS_SET2(dd,cc) memset(dd, cc, 2)
++#define LZO_MEMOPS_SET3(dd,cc) memset(dd, cc, 3)
++#define LZO_MEMOPS_SET4(dd,cc) memset(dd, cc, 4)
++/* lzo does not appear to use these macros between overlapping buffers
++ * in practice, so memmove() (which is not inlined by gcc) is unnecessary. */
++#define LZO_MEMOPS_MOVE1(dd,ss) memcpy(dd, ss, 1)
++#define LZO_MEMOPS_MOVE2(dd,ss) memcpy(dd, ss, 2)
++#define LZO_MEMOPS_MOVE3(dd,ss) memcpy(dd, ss, 3)
++#define LZO_MEMOPS_MOVE4(dd,ss) memcpy(dd, ss, 4)
++#define LZO_MEMOPS_MOVE8(dd,ss) memcpy(dd, ss, 8)
++#define LZO_MEMOPS_COPY1(dd,ss) memcpy(dd, ss, 1)
++#define LZO_MEMOPS_COPY2(dd,ss) memcpy(dd, ss, 2)
++#define LZO_MEMOPS_COPY4(dd,ss) memcpy(dd, ss, 4)
++#define LZO_MEMOPS_COPY8(dd,ss) memcpy(dd, ss, 8)
++#define LZO_MEMOPS_COPYN(dd,ss,nn) memcpy(dd, ss, nn)
++
++#else /* !LZO_CFG_MODERN_C */
++
++/* Standard C says a lot of this is undefined behaviour; maybe
++ * you can get away with it in older compilers. */
++
+ #if defined(__lzo_alignof) && !(LZO_CFG_NO_UNALIGNED)
+ #ifndef __lzo_memops_tcheck
+ #define __lzo_memops_tcheck(t,a,b) ((void)0, sizeof(t) == (a) && __lzo_alignof(t) == (b))
+@@ -3523,6 +3566,8 @@ LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(*(lzo_memops_TU8p)0)==8)
+ if ((void)0, n__n > 0) do { *d__n++ = *s__n++; } while (--n__n > 0); \
+ LZO_BLOCK_END
+
++#endif /* !LZO_CFG_MODERN_C */
++
+ __lzo_static_forceinline lzo_uint16_t lzo_memops_get_le16(const lzo_voidp ss)
+ {
+ lzo_uint16_t v;
+@@ -3539,7 +3584,7 @@ __lzo_static_forceinline lzo_uint16_t lzo_memops_get_le16(const lzo_voidp ss)
+ #endif
+ return v;
+ }
+-#if (LZO_OPT_UNALIGNED16) && (LZO_ABI_LITTLE_ENDIAN)
++#if (LZO_OPT_UNALIGNED16) && (LZO_ABI_LITTLE_ENDIAN) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_GET_LE16(ss) * (const lzo_memops_TU2p) (const lzo_memops_TU0p) (ss)
+ #else
+ #define LZO_MEMOPS_GET_LE16(ss) lzo_memops_get_le16(ss)
+@@ -3561,13 +3606,13 @@ __lzo_static_forceinline lzo_uint32_t lzo_memops_get_le32(const lzo_voidp ss)
+ #endif
+ return v;
+ }
+-#if (LZO_OPT_UNALIGNED32) && (LZO_ABI_LITTLE_ENDIAN)
++#if (LZO_OPT_UNALIGNED32) && (LZO_ABI_LITTLE_ENDIAN) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_GET_LE32(ss) * (const lzo_memops_TU4p) (const lzo_memops_TU0p) (ss)
+ #else
+ #define LZO_MEMOPS_GET_LE32(ss) lzo_memops_get_le32(ss)
+ #endif
+
+-#if (LZO_OPT_UNALIGNED64) && (LZO_ABI_LITTLE_ENDIAN)
++#if (LZO_OPT_UNALIGNED64) && (LZO_ABI_LITTLE_ENDIAN) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_GET_LE64(ss) * (const lzo_memops_TU8p) (const lzo_memops_TU0p) (ss)
+ #endif
+
+@@ -3577,7 +3622,7 @@ __lzo_static_forceinline lzo_uint16_t lzo_memops_get_ne16(const lzo_voidp ss)
+ LZO_MEMOPS_COPY2(&v, ss);
+ return v;
+ }
+-#if (LZO_OPT_UNALIGNED16)
++#if (LZO_OPT_UNALIGNED16) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_GET_NE16(ss) * (const lzo_memops_TU2p) (const lzo_memops_TU0p) (ss)
+ #else
+ #define LZO_MEMOPS_GET_NE16(ss) lzo_memops_get_ne16(ss)
+@@ -3589,14 +3634,23 @@ __lzo_static_forceinline lzo_uint32_t lzo_memops_get_ne32(const lzo_voidp ss)
+ LZO_MEMOPS_COPY4(&v, ss);
+ return v;
+ }
+-#if (LZO_OPT_UNALIGNED32)
++#if (LZO_OPT_UNALIGNED32) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_GET_NE32(ss) * (const lzo_memops_TU4p) (const lzo_memops_TU0p) (ss)
+ #else
+ #define LZO_MEMOPS_GET_NE32(ss) lzo_memops_get_ne32(ss)
+ #endif
+
+-#if (LZO_OPT_UNALIGNED64)
++__lzo_static_forceinline lzo_uint64_t lzo_memops_get_ne64(const lzo_voidp ss)
++{
++ lzo_uint64_t v;
++ LZO_MEMOPS_COPY8(&v, ss);
++ return v;
++}
++
++#if (LZO_OPT_UNALIGNED64) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_GET_NE64(ss) * (const lzo_memops_TU8p) (const lzo_memops_TU0p) (ss)
++#else
++#define LZO_MEMOPS_GET_NE64(ss) lzo_memops_get_ne64(ss)
+ #endif
+
+ __lzo_static_forceinline void lzo_memops_put_le16(lzo_voidp dd, lzo_uint16_t vv)
+@@ -3613,7 +3667,7 @@ __lzo_static_forceinline void lzo_memops_put_le16(lzo_voidp dd, lzo_uint16_t vv)
+ d[1] = LZO_BYTE((vv >> 8) & 0xff);
+ #endif
+ }
+-#if (LZO_OPT_UNALIGNED16) && (LZO_ABI_LITTLE_ENDIAN)
++#if (LZO_OPT_UNALIGNED16) && (LZO_ABI_LITTLE_ENDIAN) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_PUT_LE16(dd,vv) (* (lzo_memops_TU2p) (lzo_memops_TU0p) (dd) = (vv))
+ #else
+ #define LZO_MEMOPS_PUT_LE16(dd,vv) lzo_memops_put_le16(dd,vv)
+@@ -3635,7 +3689,7 @@ __lzo_static_forceinline void lzo_memops_put_le32(lzo_voidp dd, lzo_uint32_t vv)
+ d[3] = LZO_BYTE((vv >> 24) & 0xff);
+ #endif
+ }
+-#if (LZO_OPT_UNALIGNED32) && (LZO_ABI_LITTLE_ENDIAN)
++#if (LZO_OPT_UNALIGNED32) && (LZO_ABI_LITTLE_ENDIAN) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_PUT_LE32(dd,vv) (* (lzo_memops_TU4p) (lzo_memops_TU0p) (dd) = (vv))
+ #else
+ #define LZO_MEMOPS_PUT_LE32(dd,vv) lzo_memops_put_le32(dd,vv)
+@@ -3645,7 +3699,7 @@ __lzo_static_forceinline void lzo_memops_put_ne16(lzo_voidp dd, lzo_uint16_t vv)
+ {
+ LZO_MEMOPS_COPY2(dd, &vv);
+ }
+-#if (LZO_OPT_UNALIGNED16)
++#if (LZO_OPT_UNALIGNED16) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_PUT_NE16(dd,vv) (* (lzo_memops_TU2p) (lzo_memops_TU0p) (dd) = (vv))
+ #else
+ #define LZO_MEMOPS_PUT_NE16(dd,vv) lzo_memops_put_ne16(dd,vv)
+@@ -3655,7 +3709,7 @@ __lzo_static_forceinline void lzo_memops_put_ne32(lzo_voidp dd, lzo_uint32_t vv)
+ {
+ LZO_MEMOPS_COPY4(dd, &vv);
+ }
+-#if (LZO_OPT_UNALIGNED32)
++#if (LZO_OPT_UNALIGNED32) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_PUT_NE32(dd,vv) (* (lzo_memops_TU4p) (lzo_memops_TU0p) (dd) = (vv))
+ #else
+ #define LZO_MEMOPS_PUT_NE32(dd,vv) lzo_memops_put_ne32(dd,vv)
+@@ -3746,11 +3800,9 @@ lzo_memops_unused_funcs(void)
+ #ifndef UA_GET_NE32
+ #define UA_GET_NE32 LZO_MEMOPS_GET_NE32
+ #endif
+-#ifdef LZO_MEMOPS_GET_NE64
+ #ifndef UA_GET_NE64
+ #define UA_GET_NE64 LZO_MEMOPS_GET_NE64
+ #endif
+-#endif
+ #ifndef UA_PUT_LE16
+ #define UA_PUT_LE16 LZO_MEMOPS_PUT_LE16
+ #endif
+diff --git a/src/lzo_conf.h b/src/lzo_conf.h
+index cc2e85d..3c77caa 100644
+--- a/src/lzo_conf.h
++++ b/src/lzo_conf.h
+@@ -314,11 +314,9 @@ LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(lzo_uint64_t) == 8)
+ #ifndef UA_GET_NE32
+ #define UA_GET_NE32 LZO_MEMOPS_GET_NE32
+ #endif
+-#ifdef LZO_MEMOPS_GET_NE64
+ #ifndef UA_GET_NE64
+ #define UA_GET_NE64 LZO_MEMOPS_GET_NE64
+ #endif
+-#endif
+ #ifndef UA_PUT_LE16
+ #define UA_PUT_LE16 LZO_MEMOPS_PUT_LE16
+ #endif
+diff --git a/src/lzo_func.h b/src/lzo_func.h
+index dfaa676..5e3b814 100644
+--- a/src/lzo_func.h
++++ b/src/lzo_func.h
+@@ -164,6 +164,46 @@ lzo_bitops_unused_funcs(void)
+ // memops
+ ************************************************************************/
+
++/* Modern compilers know that memcpy() and memset() with constant n can be
++ * inlined, and do so without violating alignment constraints on e.g. ARMv5,
++ * unlike the macros below. */
++#if LZO_CFG_MODERN_C+0
++
++/* ISO C says char pointers of any signedness can alias anything
++ * (C11 draft 1570, paragraph 6.5.7) so they are safe for this use */
++typedef unsigned char *lzo_memops_TU1p;
++
++/* Used by powerpc assembler implementations of byteswapping */
++#if (LZO_OPT_UNALIGNED16)
++typedef lzo_uint16_t __lzo_may_alias lzo_memops_TU2;
++typedef lzo_memops_TU2 *lzo_memops_TU2p;
++#endif
++
++/* Used by powerpc assembler implementations of byteswapping */
++#if (LZO_OPT_UNALIGNED32)
++typedef lzo_uint32_t __lzo_may_alias lzo_memops_TU4;
++typedef lzo_memops_TU4 *lzo_memops_TU4p;
++#endif
++
++#define LZO_MEMOPS_SET1(dd,cc) memset(dd, cc, 1)
++#define LZO_MEMOPS_SET2(dd,cc) memset(dd, cc, 2)
++#define LZO_MEMOPS_SET3(dd,cc) memset(dd, cc, 3)
++#define LZO_MEMOPS_SET4(dd,cc) memset(dd, cc, 4)
++/* lzo does not appear to use these macros between overlapping buffers
++ * in practice, so memmove() (which is not inlined by gcc) is unnecessary. */
++#define LZO_MEMOPS_MOVE1(dd,ss) memcpy(dd, ss, 1)
++#define LZO_MEMOPS_MOVE2(dd,ss) memcpy(dd, ss, 2)
++#define LZO_MEMOPS_MOVE3(dd,ss) memcpy(dd, ss, 3)
++#define LZO_MEMOPS_MOVE4(dd,ss) memcpy(dd, ss, 4)
++#define LZO_MEMOPS_MOVE8(dd,ss) memcpy(dd, ss, 8)
++#define LZO_MEMOPS_COPY1(dd,ss) memcpy(dd, ss, 1)
++#define LZO_MEMOPS_COPY2(dd,ss) memcpy(dd, ss, 2)
++#define LZO_MEMOPS_COPY4(dd,ss) memcpy(dd, ss, 4)
++#define LZO_MEMOPS_COPY8(dd,ss) memcpy(dd, ss, 8)
++#define LZO_MEMOPS_COPYN(dd,ss,nn) memcpy(dd, ss, nn)
++
++#else /* !LZO_CFG_MODERN_C */
++
+ #if defined(__lzo_alignof) && !(LZO_CFG_NO_UNALIGNED)
+ #ifndef __lzo_memops_tcheck
+ #define __lzo_memops_tcheck(t,a,b) ((void)0, sizeof(t) == (a) && __lzo_alignof(t) == (b))
+@@ -333,6 +373,8 @@ LZO_COMPILE_TIME_ASSERT_HEADER(sizeof(*(lzo_memops_TU8p)0)==8)
+ if ((void)0, n__n > 0) do { *d__n++ = *s__n++; } while (--n__n > 0); \
+ LZO_BLOCK_END
+
++#endif /* !LZO_CFG_MODERN_C */
++
+ __lzo_static_forceinline lzo_uint16_t lzo_memops_get_le16(const lzo_voidp ss)
+ {
+ lzo_uint16_t v;
+@@ -349,7 +391,7 @@ __lzo_static_forceinline lzo_uint16_t lzo_memops_get_le16(const lzo_voidp ss)
+ #endif
+ return v;
+ }
+-#if (LZO_OPT_UNALIGNED16) && (LZO_ABI_LITTLE_ENDIAN)
++#if (LZO_OPT_UNALIGNED16) && (LZO_ABI_LITTLE_ENDIAN) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_GET_LE16(ss) * (const lzo_memops_TU2p) (const lzo_memops_TU0p) (ss)
+ #else
+ #define LZO_MEMOPS_GET_LE16(ss) lzo_memops_get_le16(ss)
+@@ -371,13 +413,13 @@ __lzo_static_forceinline lzo_uint32_t lzo_memops_get_le32(const lzo_voidp ss)
+ #endif
+ return v;
+ }
+-#if (LZO_OPT_UNALIGNED32) && (LZO_ABI_LITTLE_ENDIAN)
++#if (LZO_OPT_UNALIGNED32) && (LZO_ABI_LITTLE_ENDIAN) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_GET_LE32(ss) * (const lzo_memops_TU4p) (const lzo_memops_TU0p) (ss)
+ #else
+ #define LZO_MEMOPS_GET_LE32(ss) lzo_memops_get_le32(ss)
+ #endif
+
+-#if (LZO_OPT_UNALIGNED64) && (LZO_ABI_LITTLE_ENDIAN)
++#if (LZO_OPT_UNALIGNED64) && (LZO_ABI_LITTLE_ENDIAN) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_GET_LE64(ss) * (const lzo_memops_TU8p) (const lzo_memops_TU0p) (ss)
+ #endif
+
+@@ -387,7 +429,7 @@ __lzo_static_forceinline lzo_uint16_t lzo_memops_get_ne16(const lzo_voidp ss)
+ LZO_MEMOPS_COPY2(&v, ss);
+ return v;
+ }
+-#if (LZO_OPT_UNALIGNED16)
++#if (LZO_OPT_UNALIGNED16) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_GET_NE16(ss) * (const lzo_memops_TU2p) (const lzo_memops_TU0p) (ss)
+ #else
+ #define LZO_MEMOPS_GET_NE16(ss) lzo_memops_get_ne16(ss)
+@@ -399,14 +441,23 @@ __lzo_static_forceinline lzo_uint32_t lzo_memops_get_ne32(const lzo_voidp ss)
+ LZO_MEMOPS_COPY4(&v, ss);
+ return v;
+ }
+-#if (LZO_OPT_UNALIGNED32)
++#if (LZO_OPT_UNALIGNED32) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_GET_NE32(ss) * (const lzo_memops_TU4p) (const lzo_memops_TU0p) (ss)
+ #else
+ #define LZO_MEMOPS_GET_NE32(ss) lzo_memops_get_ne32(ss)
+ #endif
+
+-#if (LZO_OPT_UNALIGNED64)
++__lzo_static_forceinline lzo_uint64_t lzo_memops_get_ne64(const lzo_voidp ss)
++{
++ lzo_uint64_t v;
++ LZO_MEMOPS_COPY8(&v, ss);
++ return v;
++}
++
++#if (LZO_OPT_UNALIGNED64) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_GET_NE64(ss) * (const lzo_memops_TU8p) (const lzo_memops_TU0p) (ss)
++#else
++#define LZO_MEMOPS_GET_NE64(ss) lzo_memops_get_ne64(ss)
+ #endif
+
+ __lzo_static_forceinline void lzo_memops_put_le16(lzo_voidp dd, lzo_uint16_t vv)
+@@ -423,7 +474,7 @@ __lzo_static_forceinline void lzo_memops_put_le16(lzo_voidp dd, lzo_uint16_t vv)
+ d[1] = LZO_BYTE((vv >> 8) & 0xff);
+ #endif
+ }
+-#if (LZO_OPT_UNALIGNED16) && (LZO_ABI_LITTLE_ENDIAN)
++#if (LZO_OPT_UNALIGNED16) && (LZO_ABI_LITTLE_ENDIAN) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_PUT_LE16(dd,vv) (* (lzo_memops_TU2p) (lzo_memops_TU0p) (dd) = (vv))
+ #else
+ #define LZO_MEMOPS_PUT_LE16(dd,vv) lzo_memops_put_le16(dd,vv)
+@@ -445,7 +496,7 @@ __lzo_static_forceinline void lzo_memops_put_le32(lzo_voidp dd, lzo_uint32_t vv)
+ d[3] = LZO_BYTE((vv >> 24) & 0xff);
+ #endif
+ }
+-#if (LZO_OPT_UNALIGNED32) && (LZO_ABI_LITTLE_ENDIAN)
++#if (LZO_OPT_UNALIGNED32) && (LZO_ABI_LITTLE_ENDIAN) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_PUT_LE32(dd,vv) (* (lzo_memops_TU4p) (lzo_memops_TU0p) (dd) = (vv))
+ #else
+ #define LZO_MEMOPS_PUT_LE32(dd,vv) lzo_memops_put_le32(dd,vv)
+@@ -455,7 +506,7 @@ __lzo_static_forceinline void lzo_memops_put_ne16(lzo_voidp dd, lzo_uint16_t vv)
+ {
+ LZO_MEMOPS_COPY2(dd, &vv);
+ }
+-#if (LZO_OPT_UNALIGNED16)
++#if (LZO_OPT_UNALIGNED16) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_PUT_NE16(dd,vv) (* (lzo_memops_TU2p) (lzo_memops_TU0p) (dd) = (vv))
+ #else
+ #define LZO_MEMOPS_PUT_NE16(dd,vv) lzo_memops_put_ne16(dd,vv)
+@@ -465,7 +516,7 @@ __lzo_static_forceinline void lzo_memops_put_ne32(lzo_voidp dd, lzo_uint32_t vv)
+ {
+ LZO_MEMOPS_COPY4(dd, &vv);
+ }
+-#if (LZO_OPT_UNALIGNED32)
++#if (LZO_OPT_UNALIGNED32) && !(LZO_CFG_MODERN_C+0)
+ #define LZO_MEMOPS_PUT_NE32(dd,vv) (* (lzo_memops_TU4p) (lzo_memops_TU0p) (dd) = (vv))
+ #else
+ #define LZO_MEMOPS_PUT_NE32(dd,vv) lzo_memops_put_ne32(dd,vv)
diff -Nru lzo2-2.08/debian/patches/series lzo2-2.08/debian/patches/series
--- lzo2-2.08/debian/patches/series 1970-01-01 00:00:00.000000000 +0000
+++ lzo2-2.08/debian/patches/series 2014-12-20 17:50:47.000000000 +0000
@@ -0,0 +1 @@
+0001-Conditionally-replace-reinvention-of-memcpy-with-cal.patch
diff -Nru lzo2-2.08/debian/rules lzo2-2.08/debian/rules
--- lzo2-2.08/debian/rules 2013-08-26 19:24:58.000000000 +0000
+++ lzo2-2.08/debian/rules 2014-12-20 17:50:47.000000000 +0000
@@ -9,6 +9,7 @@
DEB_INSTALL_DOCS_ALL =
DEB_MAKE_CHECK_TARGET = check test
DEB_DH_MAKESHLIBS_ARGS = --add-udeb=liblzo2-2-udeb
+CPPFLAGS += -DLZO_CFG_MODERN_C=1
common-install-impl::
mkdir -p $(DEB_DESTDIR)/lib/$(DEB_HOST_MULTIARCH)
--- End Message ---