[Date Prev][Date Next] [Thread Prev][Thread Next] [Date Index] [Thread Index]

Re: Wheezy update of libgc?



Hi,

On Sun, 20 Nov 2016, Markus Koschany wrote:
> the Debian LTS team would like to fix the security issues which are
> currently open in the Wheezy version of libgc:
> https://security-tracker.debian.org/tracker/CVE-2016-9427

I have prepared an updated package (it required lots of manual
backporting). I attach the debdiff but it's also available in the wheezy
branch of the git repository:
https://anonscm.debian.org/cgit/collab-maint/libgc.git/commit/?h=wheezy&id=d3fef2d1fa732873e1e136b3bb3b7024af1e253c

The package builds and its test suite still passes. I have added the
supplementary tests that upstream added to cover this issue and they
also pass.

Christoph, is there anything else that we should test before releasing
the update?

Cheers,
-- 
Raphaël Hertzog ◈ Debian Developer

Support Debian LTS: http://www.freexian.com/services/debian-lts.html
Learn to master Debian: http://debian-handbook.info/get/
commit d3fef2d1fa732873e1e136b3bb3b7024af1e253c (HEAD -> wheezy, origin/wheezy)
Author: Raphaël Hertzog <hertzog@debian.org>
Date:   Thu Nov 24 18:16:45 2016 +0100

    Backport upstream patches for CVE-2016-9427

diff --git a/debian/changelog b/debian/changelog
index e714d53..29d5ca5 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,12 @@
+libgc (1:7.1-9.1+deb7u1) wheezy-security; urgency=medium
+
+  * Non-maintainer upload by the Debian LTS team.
+  * Backport upstream patches for CVE-2016-9427, in multiple places libgc
+    fails to detect integer overflows and returns pointers to memory zones
+    smaller than the requested size. Closes: #844771
+
+ -- Raphaël Hertzog <hertzog@debian.org>  Thu, 24 Nov 2016 18:11:46 +0100
+
 libgc (1:7.1-9.1) unstable; urgency=low
 
   * Non-maintainer upload.
diff --git a/debian/patches/CVE-2016-9427-1.patch b/debian/patches/CVE-2016-9427-1.patch
new file mode 100644
index 0000000..041c517
--- /dev/null
+++ b/debian/patches/CVE-2016-9427-1.patch
@@ -0,0 +1,161 @@
+From: Ivan Maidanski <ivmai@mail.ru>
+Date: Thu, 24 Nov 2016 16:06:04 +0100
+Subject: Fix GET_MEM argument rounding in GC_scratch_alloc and similar
+
+(Prevent abort in GC_unix_mmap_get_mem if the allocation size is not
+a multiple of a page size.)
+(Apply commit 62bfeb0 from 'release-7_4' branch.)
+
+* backgraph.c (new_back_edges, push_in_progress): Use
+ROUNDUP_PAGESIZE_IF_MMAP() to adjust GET_MEM() argument (when needed).
+* headers.c (GC_scratch_alloc): Likewise.
+* misc.c (GC_envfile_init): Likewise.
+* include/private/gc_priv.h (ROUNDUP_PAGESIZE_IF_MMAP): New macro.
+* include/private/gcconfig.h (MMAP_SUPPORTED): Move definition from
+os_dep.c (as needed for ROUNDUP_PAGESIZE_IF_MMAP() definition).
+* include/private/gcconfig.h (GET_MEM): Refine comment (regarding its
+argument).
+
+[hertzog@debian.org: This commit is a pre-requisite to apply the patches
+that actually fix CVE-2016-9427.]
+
+Origin: backport, https://github.com/ivmai/bdwgc/commit/175b8d2c17a37fddbd76302baf42d73688b8bccb
+Bug: https://github.com/ivmai/bdwgc/issues/135
+Bug-Debian: https://bugs.debian.org/844771
+---
+ backgraph.c                |  8 ++++++--
+ headers.c                  | 14 ++++----------
+ include/private/gc_priv.h  | 12 ++++++++++++
+ include/private/gcconfig.h |  9 +++++++--
+ os_dep.c                   |  5 -----
+ 5 files changed, 29 insertions(+), 19 deletions(-)
+
+diff --git a/backgraph.c b/backgraph.c
+index 92d09e0..d7915ff 100644
+--- a/backgraph.c
++++ b/backgraph.c
+@@ -84,8 +84,9 @@ static back_edges *avail_back_edges = 0;
+ static back_edges * new_back_edges(void)
+ {
+   if (0 == back_edge_space) {
+-    back_edge_space = (back_edges *)
+-	    		GET_MEM(MAX_BACK_EDGE_STRUCTS*sizeof(back_edges));
++    back_edge_space = (back_edges *)GET_MEM(
++			ROUNDUP_PAGESIZE_IF_MMAP(MAX_BACK_EDGE_STRUCTS
++						 * sizeof(back_edges)));
+     GC_add_to_our_memory((ptr_t)back_edge_space,
+     			 MAX_BACK_EDGE_STRUCTS*sizeof(back_edges));
+   }
+@@ -126,6 +127,9 @@ static void push_in_progress(ptr_t p)
+   if (n_in_progress >= in_progress_size) 
+     if (in_progress_size == 0) {
+       in_progress_size = INITIAL_IN_PROGRESS;
++      in_progress_size = ROUNDUP_PAGESIZE_IF_MMAP(INITIAL_IN_PROGRESS
++						    * sizeof(ptr_t))
++			    / sizeof(ptr_t);
+       in_progress_space = (ptr_t *)GET_MEM(in_progress_size * sizeof(ptr_t));
+       GC_add_to_our_memory((ptr_t)in_progress_space,
+       			   in_progress_size * sizeof(ptr_t));
+diff --git a/headers.c b/headers.c
+index 7aef710..de80b21 100644
+--- a/headers.c
++++ b/headers.c
+@@ -126,28 +126,22 @@ ptr_t GC_scratch_alloc(size_t bytes)
+          
+         if (bytes_to_get <= bytes) {
+           /* Undo the damage, and get memory directly */
+-	    bytes_to_get = bytes;
+-#	    ifdef USE_MMAP
+-		bytes_to_get += GC_page_size - 1;
+-		bytes_to_get &= ~(GC_page_size - 1);
+-#	    endif
++	    bytes_to_get = ROUNDUP_PAGESIZE_IF_MMAP(bytes);
+    	    result = (ptr_t)GET_MEM(bytes_to_get);
+ 	    GC_add_to_our_memory(result, bytes_to_get);
+             scratch_free_ptr -= bytes;
+ 	    GC_scratch_last_end_ptr = result + bytes;
+             return(result);
+         }
++
++	bytes_to_get = ROUNDUP_PAGESIZE_IF_MMAP(bytes_to_get); /* for safety */
+         result = (ptr_t)GET_MEM(bytes_to_get);
+         GC_add_to_our_memory(result, bytes_to_get);
+         if (result == 0) {
+ 	    if (GC_print_stats)
+                 GC_printf("Out of memory - trying to allocate less\n");
+             scratch_free_ptr -= bytes;
+-	    bytes_to_get = bytes;
+-#	    ifdef USE_MMAP
+-		bytes_to_get += GC_page_size - 1;
+-		bytes_to_get &= ~(GC_page_size - 1);
+-#	    endif
++	    bytes_to_get = ROUNDUP_PAGESIZE_IF_MMAP(bytes);
+             result = (ptr_t)GET_MEM(bytes_to_get);
+             GC_add_to_our_memory(result, bytes_to_get);
+ 	    return result;
+diff --git a/include/private/gc_priv.h b/include/private/gc_priv.h
+index 1d96d87..4a6edee 100644
+--- a/include/private/gc_priv.h
++++ b/include/private/gc_priv.h
+@@ -1184,6 +1184,18 @@ extern word GC_n_heap_sects;	/* Number of separately added heap	*/
+ 
+ extern word GC_page_size;
+ 
++/* Round up allocation size to a multiple of a page size.       */
++/* GC_setpagesize() is assumed to be already invoked.           */
++#define ROUNDUP_PAGESIZE(bytes) \
++                (((bytes) + GC_page_size - 1) & ~(GC_page_size - 1))
++
++/* Same as above but used to make GET_MEM() argument safe.      */
++#ifdef MMAP_SUPPORTED
++# define ROUNDUP_PAGESIZE_IF_MMAP(bytes) ROUNDUP_PAGESIZE(bytes)
++#else
++# define ROUNDUP_PAGESIZE_IF_MMAP(bytes) (bytes)
++#endif
++
+ # if defined(MSWIN32) || defined(MSWINCE)
+   struct _SYSTEM_INFO;
+   extern struct _SYSTEM_INFO GC_sysinfo;
+diff --git a/include/private/gcconfig.h b/include/private/gcconfig.h
+index d76929a..e226c5f 100644
+--- a/include/private/gcconfig.h
++++ b/include/private/gcconfig.h
+@@ -2129,6 +2129,11 @@
+ #   undef MPROTECT_VDB
+ # endif
+ 
++#if defined(LINUX) || defined(FREEBSD) || defined(SOLARIS) || defined(IRIX5) \
++	|| defined(USE_MMAP) || defined(USE_MUNMAP)
++# define MMAP_SUPPORTED
++#endif
++
+ # ifdef USE_MUNMAP
+ #   undef MPROTECT_VDB  /* Can't deal with address space holes. */
+ # endif
+@@ -2330,8 +2335,8 @@
+ 	/* -DREDIRECT_MALLOC.						*/
+ 	/* GET_MEM() returns a HLKSIZE aligned chunk.			*/
+ 	/* 0 is taken to mean failure. 					*/
+-	/* In the case os USE_MMAP, the argument must also be a 	*/
+-	/* physical page size.						*/
++	/* In case of MMAP_SUPPORTED, the argument must also be a 	*/
++	/* multiple of a physical page size.				*/
+ 	/* GET_MEM is currently not assumed to retrieve 0 filled space, */
+ 	/* though we should perhaps take advantage of the case in which */
+ 	/* does.							*/
+diff --git a/os_dep.c b/os_dep.c
+index f403375..b95cc21 100644
+--- a/os_dep.c
++++ b/os_dep.c
+@@ -97,11 +97,6 @@
+ # include <malloc.h>   /* for locking */
+ #endif
+ 
+-#if defined(LINUX) || defined(FREEBSD) || defined(SOLARIS) || defined(IRIX5) \
+-	|| defined(USE_MMAP) || defined(USE_MUNMAP)
+-# define MMAP_SUPPORTED
+-#endif
+-
+ #if defined(MMAP_SUPPORTED) || defined(ADD_HEAP_GUARD_PAGES)
+ # if defined(USE_MUNMAP) && !defined(USE_MMAP)
+     --> USE_MUNMAP requires USE_MMAP
diff --git a/debian/patches/CVE-2016-9427-2.patch b/debian/patches/CVE-2016-9427-2.patch
new file mode 100644
index 0000000..85b0f9a
--- /dev/null
+++ b/debian/patches/CVE-2016-9427-2.patch
@@ -0,0 +1,66 @@
+From: Ivan Maidanski <ivmai@mail.ru>
+Date: Thu, 24 Nov 2016 16:18:50 +0100
+Subject: Fix calloc_explicitly_typed in case of lb*n overflow
+
+(Cherry-pick commit 41a9ed4 from 'release-7_4' branch.)
+
+* typd_mlc.c: Include limits.h (for SIZE_MAX).
+* typd_mlc.c (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): New macro (same as in
+malloc.c).
+* typd_mlc.c (GC_calloc_explicitly_typed): Return NULL if lb * n
+overflows (same algorithm as in calloc defined in malloc.c); eliminate
+lb *= n code duplication.
+
+Origin: backport, https://github.com/ivmai/bdwgc/commit/a230ee8b21111b88749a97e6801048db1859a0fc
+Bug: https://github.com/ivmai/bdwgc/issues/135
+Bug-Debian: https://bugs.debian.org/844771
+---
+ typd_mlc.c | 22 +++++++++++++++++-----
+ 1 file changed, 17 insertions(+), 5 deletions(-)
+
+diff --git a/typd_mlc.c b/typd_mlc.c
+index ae529d3..dfd5bad 100644
+--- a/typd_mlc.c
++++ b/typd_mlc.c
+@@ -648,6 +648,15 @@ DCL_LOCK_STATE;
+    return((void *) op);
+ }
+ 
++#include <limits.h>
++#ifdef SIZE_MAX
++# define GC_SIZE_MAX SIZE_MAX
++#else
++# define GC_SIZE_MAX (~(size_t)0)
++#endif
++
++#define GC_SQRT_SIZE_MAX ((((size_t)1) << (WORDSZ / 2)) - 1)
++
+ void * GC_calloc_explicitly_typed(size_t n, size_t lb, GC_descr d)
+ {
+ ptr_t op;
+@@ -659,17 +668,20 @@ register int descr_type;
+ struct LeafDescriptor leaf;
+ DCL_LOCK_STATE;
+ 
+-    descr_type = GC_make_array_descriptor((word)n, (word)lb, d,
+-    					  &simple_descr, &complex_descr, &leaf);
++    descr_type = GC_make_array_descriptor((word)n, (word)lb, d, &simple_descr,
++					  &complex_descr, &leaf);
++    if ((lb | n) > GC_SQRT_SIZE_MAX /* fast initial check */
++        && lb > 0 && n > GC_SIZE_MAX / lb)
++      return NULL; /* n*lb overflow */
++    lb *= n;
+     switch(descr_type) {
+     	case NO_MEM: return(0);
+-    	case SIMPLE: return(GC_malloc_explicitly_typed(n*lb, simple_descr));
++    	case SIMPLE:
++	    return GC_malloc_explicitly_typed(lb, simple_descr);
+     	case LEAF:
+-    	    lb *= n;
+     	    lb += sizeof(struct LeafDescriptor) + TYPD_EXTRA_BYTES;
+     	    break;
+     	case COMPLEX:
+-    	    lb *= n;
+     	    lb += TYPD_EXTRA_BYTES;
+     	    break;
+     }
diff --git a/debian/patches/CVE-2016-9427-3.patch b/debian/patches/CVE-2016-9427-3.patch
new file mode 100644
index 0000000..675e1e9
--- /dev/null
+++ b/debian/patches/CVE-2016-9427-3.patch
@@ -0,0 +1,786 @@
+From: Ivan Maidanski <ivmai@mail.ru>
+Date: Thu, 24 Nov 2016 18:02:18 +0100
+Subject: Fix malloc routines to prevent size value wrap-around
+
+(Cherry-pick commit 0b68187 from 'release-7_4' branch.)
+
+See issue #135 on Github.
+
+* allchblk.c (GC_allochblk, GC_allochblk_nth): Use
+OBJ_SZ_TO_BLOCKS_CHECKED instead of OBJ_SZ_TO_BLOCKS.
+* malloc.c (GC_alloc_large): Likewise.
+* alloc.c (GC_expand_hp_inner): Type of "bytes" local variable changed
+from word to size_t; cast ROUNDUP_PAGESIZE argument to size_t; prevent
+overflow when computing GC_heapsize+bytes > GC_max_heapsize.
+* dbg_mlc.c (GC_debug_malloc, GC_debug_malloc_ignore_off_page,
+GC_debug_malloc_atomic_ignore_off_page,
+GC_debug_generic_malloc_inner,
+GC_debug_generic_malloc_inner_ignore_off_page,
+GC_debug_malloc_stubborn, GC_debug_malloc_atomic,
+GC_debug_malloc_uncollectable, GC_debug_malloc_atomic_uncollectable):
+Use SIZET_SAT_ADD (instead of "+" operator) to add extra bytes to lb
+value.
+* gcj_mlc.c (GC_debug_gcj_malloc): Likewise.
+* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
+ADD_SLOP, ROUNDUP_PAGESIZE): Likewise.
+* include/private/gcconfig.h (GET_MEM): Likewise.
+* mallocx.c (GC_malloc_many, GC_memalign): Likewise.
+* os_dep.c (GC_wince_get_mem, GC_win32_get_mem): Likewise.
+* typd_mlc.c (GC_malloc_explicitly_typed,
+GC_malloc_explicitly_typed_ignore_off_page,
+GC_calloc_explicitly_typed): Likewise.
+* headers.c (GC_scratch_alloc): Change type of bytes_to_get from word
+to size_t (because ROUNDUP_PAGESIZE_IF_MMAP result type changed).
+* include/private/gc_priv.h: Include limits.h (unless SIZE_MAX already
+defined).
+* include/private/gc_priv.h (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Move from
+malloc.c file.
+* include/private/gc_priv.h (SIZET_SAT_ADD): New macro (defined before
+include gcconfig.h).
+* include/private/gc_priv.h (EXTRA_BYTES, GC_page_size): Change type
+to size_t.
+* os_dep.c (GC_page_size): Likewise.
+* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
+ADD_SLOP, ROUNDUP_PAGESIZE): Add comment about the argument.
+* include/private/gcconfig.h (GET_MEM): Likewise.
+* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
+ADD_SLOP, OBJ_SZ_TO_BLOCKS, ROUNDUP_PAGESIZE,
+ROUNDUP_PAGESIZE_IF_MMAP): Rename argument to "lb".
+* include/private/gc_priv.h (OBJ_SZ_TO_BLOCKS_CHECKED): New macro.
+* include/private/gcconfig.h (GC_win32_get_mem, GC_wince_get_mem,
+GC_unix_get_mem): Change argument type from word to int.
+* os_dep.c (GC_unix_mmap_get_mem, GC_unix_get_mem,
+GC_unix_sbrk_get_mem, GC_wince_get_mem, GC_win32_get_mem): Likewise.
+* malloc.c (GC_alloc_large_and_clear): Call OBJ_SZ_TO_BLOCKS only
+if no value wrap around is guaranteed.
+* malloc.c (GC_generic_malloc): Do not check for lb_rounded < lb case
+(because ROUNDED_UP_GRANULES and GRANULES_TO_BYTES guarantees no value
+wrap around).
+* mallocx.c (GC_generic_malloc_ignore_off_page): Likewise.
+* misc.c (GC_init_size_map): Change "i" local variable type from int
+to size_t.
+* os_dep.c (GC_write_fault_handler, catch_exception_raise): Likewise.
+* misc.c (GC_envfile_init): Cast len to size_t when passed to
+ROUNDUP_PAGESIZE_IF_MMAP.
+* os_dep.c (GC_setpagesize): Cast GC_sysinfo.dwPageSize and
+GETPAGESIZE() to size_t (when setting GC_page_size).
+* os_dep.c (GC_unix_mmap_get_mem):
+Expand ROUNDUP_PAGESIZE macro but without value wrap-around checking
+(the argument is of word type).
+* os_dep.c (GC_unix_mmap_get_mem): Replace -GC_page_size with
+~GC_page_size+1 (because GC_page_size is unsigned); remove redundant
+cast to size_t.
+* os_dep.c (GC_unix_sbrk_get_mem): Add explicit cast of GC_page_size
+to SBRK_ARG_T.
+* os_dep.c (GC_wince_get_mem): Change type of res_bytes local variable
+to size_t.
+* typd_mlc.c: Do not include limits.h.
+* typd_mlc.c (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Remove (as defined in
+gc_priv.h now).
+
+[hertzog@debian.org:
+- Also adjusted ROUNDED_UP_WORDS like ROUNDED_UP_GRANULES
+]
+
+Origin: backport, https://github.com/ivmai/bdwgc/commit/2ea6d85adc5fe07d7e9c5d35f2e5886857338681
+Bug: https://github.com/ivmai/bdwgc/issues/135
+Bug-Debian: https://bugs.debian.org/844771
+---
+ allchblk.c                 |  4 ++--
+ alloc.c                    | 17 ++++++----------
+ dbg_mlc.c                  | 23 ++++++++++++----------
+ gcj_mlc.c                  |  3 ++-
+ headers.c                  |  3 +--
+ include/private/gc_priv.h  | 48 +++++++++++++++++++++++++++++++++-------------
+ include/private/gcconfig.h | 31 ++++++++++++++++--------------
+ malloc.c                   | 19 +++++-------------
+ mallocx.c                  | 13 ++++++-------
+ misc.c                     |  2 +-
+ os_dep.c                   | 39 +++++++++++++++++++------------------
+ typd_mlc.c                 | 22 +++++++--------------
+ 12 files changed, 115 insertions(+), 109 deletions(-)
+
+diff --git a/allchblk.c b/allchblk.c
+index 9347b67..753fa25 100644
+--- a/allchblk.c
++++ b/allchblk.c
+@@ -586,7 +586,7 @@ GC_allochblk(size_t sz, int kind, unsigned flags/* IGNORE_OFF_PAGE or 0 */)
+     		     /* split.						*/
+ 
+     GC_ASSERT((sz & (GRANULE_BYTES - 1)) == 0);
+-    blocks = OBJ_SZ_TO_BLOCKS(sz);
++    blocks = OBJ_SZ_TO_BLOCKS_CHECKED(sz);
+     if ((signed_word)(blocks * HBLKSIZE) < 0) {
+       return 0;
+     }
+@@ -648,7 +648,7 @@ GC_allochblk_nth(size_t sz, int kind, unsigned flags, int n, GC_bool may_split)
+     signed_word size_needed;    /* number of bytes in requested objects */
+     signed_word size_avail;	/* bytes available in this block	*/
+ 
+-    size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
++    size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS_CHECKED(sz);
+ 
+     /* search for a big enough block in free list */
+ 	hbp = GC_hblkfreelist[n];
+diff --git a/alloc.c b/alloc.c
+index cecdf82..eb3c4a3 100644
+--- a/alloc.c
++++ b/alloc.c
+@@ -898,21 +898,16 @@ GC_word GC_max_retries = 0;
+  */
+ GC_bool GC_expand_hp_inner(word n)
+ {
+-    word bytes;
++    size_t bytes;
+     struct hblk * space;
+     word expansion_slop;	/* Number of bytes by which we expect the */
+     				/* heap to expand soon.			  */
+ 
+     if (n < MINHINCR) n = MINHINCR;
+-    bytes = n * HBLKSIZE;
+-    /* Make sure bytes is a multiple of GC_page_size */
+-      {
+-	word mask = GC_page_size - 1;
+-	bytes += mask;
+-	bytes &= ~mask;
+-      }
+-    
+-    if (GC_max_heapsize != 0 && GC_heapsize + bytes > GC_max_heapsize) {
++    bytes = ROUNDUP_PAGESIZE((size_t)n * HBLKSIZE);
++    if (GC_max_heapsize != 0
++        && (GC_max_heapsize < (word)bytes
++            || GC_heapsize > GC_max_heapsize - (word)bytes)) {
+         /* Exceeded self-imposed limit */
+         return(FALSE);
+     }
+@@ -936,7 +931,7 @@ GC_bool GC_expand_hp_inner(word n)
+     if ((GC_last_heap_addr == 0 && !((word)space & SIGNB))
+         || (GC_last_heap_addr != 0 && GC_last_heap_addr < (ptr_t)space)) {
+         /* Assume the heap is growing up */
+-	word new_limit = (word)space + bytes + expansion_slop;
++        word new_limit = (word)space + (word)bytes + expansion_slop;
+ 	if (new_limit > (word)space) {
+           GC_greatest_plausible_heap_addr =
+             (void *)GC_max((word)GC_greatest_plausible_heap_addr,
+diff --git a/dbg_mlc.c b/dbg_mlc.c
+index 4bb0e13..80728de 100644
+--- a/dbg_mlc.c
++++ b/dbg_mlc.c
+@@ -458,7 +458,7 @@ void GC_debug_register_displacement(size_t offset)
+ 
+ void * GC_debug_malloc(size_t lb, GC_EXTRA_PARAMS)
+ {
+-    void * result = GC_malloc(lb + DEBUG_BYTES);
++    void * result = GC_malloc(SIZET_SAT_ADD(lb, DEBUG_BYTES));
+     
+     if (result == 0) {
+         GC_err_printf("GC_debug_malloc(%lu) returning NIL (",
+@@ -476,7 +476,7 @@ void * GC_debug_malloc(size_t lb, GC_EXTRA_PARAMS)
+ 
+ void * GC_debug_malloc_ignore_off_page(size_t lb, GC_EXTRA_PARAMS)
+ {
+-    void * result = GC_malloc_ignore_off_page(lb + DEBUG_BYTES);
++    void * result = GC_malloc_ignore_off_page(SIZET_SAT_ADD(lb, DEBUG_BYTES));
+     
+     if (result == 0) {
+         GC_err_printf("GC_debug_malloc_ignore_off_page(%lu) returning NIL (",
+@@ -494,7 +494,8 @@ void * GC_debug_malloc_ignore_off_page(size_t lb, GC_EXTRA_PARAMS)
+ 
+ void * GC_debug_malloc_atomic_ignore_off_page(size_t lb, GC_EXTRA_PARAMS)
+ {
+-    void * result = GC_malloc_atomic_ignore_off_page(lb + DEBUG_BYTES);
++    void * result = GC_malloc_atomic_ignore_off_page(
++                                SIZET_SAT_ADD(lb, DEBUG_BYTES));
+     
+     if (result == 0) {
+         GC_err_printf("GC_debug_malloc_atomic_ignore_off_page(%lu)"
+@@ -521,7 +522,8 @@ void * GC_debug_malloc_atomic_ignore_off_page(size_t lb, GC_EXTRA_PARAMS)
+  */
+   void * GC_debug_generic_malloc_inner(size_t lb, int k)
+   {
+-    void * result = GC_generic_malloc_inner(lb + DEBUG_BYTES, k);
++    void * result = GC_generic_malloc_inner(
++                                SIZET_SAT_ADD(lb, DEBUG_BYTES), k);
+     
+     if (result == 0) {
+         GC_err_printf("GC internal allocation (%lu bytes) returning NIL\n",
+@@ -535,7 +537,7 @@ void * GC_debug_malloc_atomic_ignore_off_page(size_t lb, GC_EXTRA_PARAMS)
+   void * GC_debug_generic_malloc_inner_ignore_off_page(size_t lb, int k)
+   {
+     void * result = GC_generic_malloc_inner_ignore_off_page(
+-					        lb + DEBUG_BYTES, k);
++                                SIZET_SAT_ADD(lb, DEBUG_BYTES), k);
+     
+     if (result == 0) {
+         GC_err_printf("GC internal allocation (%lu bytes) returning NIL\n",
+@@ -550,7 +552,7 @@ void * GC_debug_malloc_atomic_ignore_off_page(size_t lb, GC_EXTRA_PARAMS)
+ #ifdef STUBBORN_ALLOC
+ void * GC_debug_malloc_stubborn(size_t lb, GC_EXTRA_PARAMS)
+ {
+-    void * result = GC_malloc_stubborn(lb + DEBUG_BYTES);
++    void * result = GC_malloc_stubborn(SIZET_SAT_ADD(lb, DEBUG_BYTES));
+     
+     if (result == 0) {
+         GC_err_printf("GC_debug_malloc(%lu) returning NIL (",
+@@ -619,7 +621,7 @@ void GC_debug_end_stubborn_change(void *p)
+ 
+ void * GC_debug_malloc_atomic(size_t lb, GC_EXTRA_PARAMS)
+ {
+-    void * result = GC_malloc_atomic(lb + DEBUG_BYTES);
++    void * result = GC_malloc_atomic(SIZET_SAT_ADD(lb, DEBUG_BYTES));
+     
+     if (result == 0) {
+         GC_err_printf("GC_debug_malloc_atomic(%lu) returning NIL (",
+@@ -650,7 +652,8 @@ char *GC_debug_strdup(const char *str, GC_EXTRA_PARAMS)
+ 
+ void * GC_debug_malloc_uncollectable(size_t lb, GC_EXTRA_PARAMS)
+ {
+-    void * result = GC_malloc_uncollectable(lb + UNCOLLECTABLE_DEBUG_BYTES);
++    void * result = GC_malloc_uncollectable(
++                                SIZET_SAT_ADD(lb, UNCOLLECTABLE_DEBUG_BYTES));
+     
+     if (result == 0) {
+         GC_err_printf("GC_debug_malloc_uncollectable(%lu) returning NIL (",
+@@ -669,8 +672,8 @@ void * GC_debug_malloc_uncollectable(size_t lb, GC_EXTRA_PARAMS)
+ #ifdef ATOMIC_UNCOLLECTABLE
+ void * GC_debug_malloc_atomic_uncollectable(size_t lb, GC_EXTRA_PARAMS)
+ {
+-    void * result =
+-	GC_malloc_atomic_uncollectable(lb + UNCOLLECTABLE_DEBUG_BYTES);
++    void * result = GC_malloc_atomic_uncollectable(
++			    SIZET_SAT_ADD(lb, UNCOLLECTABLE_DEBUG_BYTES));
+     
+     if (result == 0) {
+         GC_err_printf(
+diff --git a/gcj_mlc.c b/gcj_mlc.c
+index 7e5beb1..d081e48 100644
+--- a/gcj_mlc.c
++++ b/gcj_mlc.c
+@@ -186,7 +186,8 @@ void * GC_debug_gcj_malloc(size_t lb, void * ptr_to_struct_containing_descr,
+     /* confuse the backtrace.					*/
+     LOCK();
+     maybe_finalize();
+-    result = GC_generic_malloc_inner(lb + DEBUG_BYTES, GC_gcj_debug_kind);
++    result = GC_generic_malloc_inner(SIZET_SAT_ADD(lb, DEBUG_BYTES),
++                                     GC_gcj_debug_kind);
+     if (result == 0) {
+ 	UNLOCK();
+         GC_err_printf("GC_debug_gcj_malloc(%ld, %p) returning NIL (",
+diff --git a/headers.c b/headers.c
+index de80b21..d036b46 100644
+--- a/headers.c
++++ b/headers.c
+@@ -115,8 +115,7 @@ ptr_t GC_scratch_alloc(size_t bytes)
+ {
+     register ptr_t result = scratch_free_ptr;
+ 
+-    bytes += GRANULE_BYTES-1;
+-    bytes &= ~(GRANULE_BYTES-1);
++    bytes = ROUNDUP_GRANULE_SIZE(bytes);
+     scratch_free_ptr += bytes;
+     if (scratch_free_ptr <= GC_scratch_end_ptr) {
+         return(result);
+diff --git a/include/private/gc_priv.h b/include/private/gc_priv.h
+index 4a6edee..e89f080 100644
+--- a/include/private/gc_priv.h
++++ b/include/private/gc_priv.h
+@@ -64,6 +64,20 @@ typedef char * ptr_t;	/* A generic pointer to which we can add	*/
+ 			/* byte displacements and which can be used	*/
+ 			/* for address comparisons.			*/
+ 
++#ifndef SIZE_MAX
++# include <limits.h>
++#endif
++#ifdef SIZE_MAX
++# define GC_SIZE_MAX SIZE_MAX
++#else
++# define GC_SIZE_MAX (~(size_t)0)
++#endif
++
++/* Saturated addition of size_t values.  Used to avoid value wrap       */
++/* around on overflow.  The arguments should have no side effects.      */
++#define SIZET_SAT_ADD(a, b) \
++                ((a) < GC_SIZE_MAX - (b) ? (a) + (b) : GC_SIZE_MAX)
++
+ # ifndef GCCONFIG_H
+ #   include "gcconfig.h"
+ # endif
+@@ -154,10 +168,10 @@ typedef char * ptr_t;	/* A generic pointer to which we can add	*/
+ #define GC_INVOKE_FINALIZERS() GC_notify_or_invoke_finalizers()
+ 
+ #if !defined(DONT_ADD_BYTE_AT_END)
+-# define EXTRA_BYTES GC_all_interior_pointers
++# define EXTRA_BYTES (size_t)GC_all_interior_pointers
+ # define MAX_EXTRA_BYTES 1
+ #else
+-# define EXTRA_BYTES 0
++# define EXTRA_BYTES (size_t)0
+ # define MAX_EXTRA_BYTES 0
+ #endif
+ 
+@@ -537,6 +551,7 @@ extern GC_warn_proc GC_current_warn_proc;
+ # define LOG_HBLKSIZE   ((size_t)CPP_LOG_HBLKSIZE)
+ # define HBLKSIZE ((size_t)CPP_HBLKSIZE)
+ 
++#define GC_SQRT_SIZE_MAX ((((size_t)1) << (WORDSZ / 2)) - 1)
+ 
+ /*  max size objects supported by freelist (larger objects are	*/
+ /*  allocated directly with allchblk(), by rounding to the next */
+@@ -565,11 +580,15 @@ extern GC_warn_proc GC_current_warn_proc;
+ 
+ # define HBLKDISPL(objptr) (((size_t) (objptr)) & (HBLKSIZE-1))
+ 
++/* Round up allocation size (in bytes) to a multiple of a granule.      */
++#define ROUNDUP_GRANULE_SIZE(lb) /* lb should have no side-effect */ \
++            (SIZET_SAT_ADD(lb, GRANULE_BYTES - 1) & ~(GRANULE_BYTES - 1))
+ /* Round up byte allocation requests to integral number of words, etc. */
+ # define ROUNDED_UP_WORDS(n) \
+-	BYTES_TO_WORDS((n) + (WORDS_TO_BYTES(1) - 1 + EXTRA_BYTES))
+-# define ROUNDED_UP_GRANULES(n) \
+-	BYTES_TO_GRANULES((n) + (GRANULE_BYTES - 1 + EXTRA_BYTES))
++	BYTES_TO_WORDS(SIZET_SAT_ADD(n, WORDS_TO_BYTES(1) - 1 + EXTRA_BYTES))
++# define ROUNDED_UP_GRANULES(lb) /* lb should have no side-effect */ \
++        BYTES_TO_GRANULES(SIZET_SAT_ADD(lb, GRANULE_BYTES - 1 + EXTRA_BYTES))
++
+ # if MAX_EXTRA_BYTES == 0
+ #  define SMALL_OBJ(bytes) EXPECT((bytes) <= (MAXOBJBYTES), 1)
+ # else
+@@ -579,7 +598,8 @@ extern GC_warn_proc GC_current_warn_proc;
+     	/* This really just tests bytes <= MAXOBJBYTES - EXTRA_BYTES.	*/
+     	/* But we try to avoid looking up EXTRA_BYTES.			*/
+ # endif
+-# define ADD_SLOP(bytes) ((bytes) + EXTRA_BYTES)
++# define ADD_SLOP(lb) /* lb should have no side-effect */ \
++                SIZET_SAT_ADD(lb, EXTRA_BYTES)
+ # ifndef MIN_WORDS
+ #  define MIN_WORDS 2	/* FIXME: obsolete */
+ # endif
+@@ -781,9 +801,11 @@ struct hblk {
+ 
+ # define HBLK_IS_FREE(hdr) (((hdr) -> hb_flags & FREE_BLK) != 0)
+ 
+-# define OBJ_SZ_TO_BLOCKS(sz) divHBLKSZ(sz + HBLKSIZE-1)
++# define OBJ_SZ_TO_BLOCKS(lb) divHBLKSZ((lb) + HBLKSIZE-1)
++# define OBJ_SZ_TO_BLOCKS_CHECKED(lb) /* lb should have no side-effect */ \
++                                divHBLKSZ(SIZET_SAT_ADD(lb, HBLKSIZE - 1))
+     /* Size of block (in units of HBLKSIZE) needed to hold objects of	*/
+-    /* given sz (in bytes).						*/
++    /* given lb (in bytes). The checked variant prevents wrap around.	*/
+ 
+ /* Object free list link */
+ # define obj_link(p) (*(void  **)(p))
+@@ -1182,18 +1204,18 @@ extern word GC_n_heap_sects;	/* Number of separately added heap	*/
+ 				/* sections.				*/
+ #endif
+ 
+-extern word GC_page_size;
++extern size_t GC_page_size;
+ 
+ /* Round up allocation size to a multiple of a page size.       */
+ /* GC_setpagesize() is assumed to be already invoked.           */
+-#define ROUNDUP_PAGESIZE(bytes) \
+-                (((bytes) + GC_page_size - 1) & ~(GC_page_size - 1))
++#define ROUNDUP_PAGESIZE(lb) /* lb should have no side-effect */ \
++            (SIZET_SAT_ADD(lb, GC_page_size - 1) & ~(GC_page_size - 1))
+ 
+ /* Same as above but used to make GET_MEM() argument safe.      */
+ #ifdef MMAP_SUPPORTED
+-# define ROUNDUP_PAGESIZE_IF_MMAP(bytes) ROUNDUP_PAGESIZE(bytes)
++# define ROUNDUP_PAGESIZE_IF_MMAP(lb) ROUNDUP_PAGESIZE(lb)
+ #else
+-# define ROUNDUP_PAGESIZE_IF_MMAP(bytes) (bytes)
++# define ROUNDUP_PAGESIZE_IF_MMAP(lb) (lb)
+ #endif
+ 
+ # if defined(MSWIN32) || defined(MSWINCE)
+diff --git a/include/private/gcconfig.h b/include/private/gcconfig.h
+index e226c5f..37a1a02 100644
+--- a/include/private/gcconfig.h
++++ b/include/private/gcconfig.h
+@@ -2333,7 +2333,8 @@
+ 	/* usually makes it possible to merge consecutively allocated	*/
+ 	/* chunks.  It also avoids unintented recursion with		*/
+ 	/* -DREDIRECT_MALLOC.						*/
+-	/* GET_MEM() returns a HLKSIZE aligned chunk.			*/
++        /* GET_MEM() argument should be of size_t type and have         */
++        /* no side-effect.  GET_MEM() returns HLKSIZE-aligned chunk;    */
+ 	/* 0 is taken to mean failure. 					*/
+ 	/* In case of MMAP_SUPPORTED, the argument must also be a 	*/
+ 	/* multiple of a physical page size.				*/
+@@ -2342,42 +2343,44 @@
+ 	/* does.							*/
+ 	struct hblk;	/* See gc_priv.h.	*/
+ # if defined(PCR)
+-    char * real_malloc();
+-#   define GET_MEM(bytes) HBLKPTR(real_malloc((size_t)bytes + GC_page_size) \
++    char * real_malloc(size_t bytes);
++#   define GET_MEM(bytes) HBLKPTR(real_malloc(SIZET_SAT_ADD(bytes, \
++							    GC_page_size)) \
+ 					  + GC_page_size-1)
+ # elif defined(OS2)
+     void * os2_alloc(size_t bytes);
+-#   define GET_MEM(bytes) HBLKPTR((ptr_t)os2_alloc((size_t)bytes \
+-					    + GC_page_size) \
+-					    + GC_page_size-1)
++#   define GET_MEM(bytes) HBLKPTR((ptr_t)os2_alloc( \
++					    SIZET_SAT_ADD(bytes, \
++							  GC_page_size)) \
++				  + GC_page_size-1)
+ # elif defined(NEXT) || defined(DOS4GW) || defined(NONSTOP) || \
+ 		 (defined(AMIGA) && !defined(GC_AMIGA_FASTALLOC)) || \
+ 		 (defined(SOLARIS) && !defined(USE_MMAP))
+-#   define GET_MEM(bytes) HBLKPTR((size_t) calloc(1, (size_t)bytes + GC_page_size) \
++#   define GET_MEM(bytes) HBLKPTR((size_t) calloc(1, SIZET_SAT_ADD(bytes, GC_page_size)) \
+ 					             + GC_page_size-1)
+ # elif defined(MSWIN32)
+-    extern ptr_t GC_win32_get_mem();
++    extern ptr_t GC_win32_get_mem(size_t bytes);
+ #   define GET_MEM(bytes) (struct hblk *)GC_win32_get_mem(bytes)
+ # elif defined(MACOS)
+ #   if defined(USE_TEMPORARY_MEMORY)
+       extern Ptr GC_MacTemporaryNewPtr(size_t size, Boolean clearMemory);
+ #     define GET_MEM(bytes) HBLKPTR( \
+-			    GC_MacTemporaryNewPtr(bytes + GC_page_size, true) \
++			    GC_MacTemporaryNewPtr(SIZET_SAT_ADD(bytes, GC_page_size), true) \
+ 			    + GC_page_size-1)
+ #   else
+ #     define GET_MEM(bytes) HBLKPTR( \
+-				NewPtrClear(bytes + GC_page_size) + GC_page_size-1)
++				NewPtrClear(SIZET_SAT_ADD(bytes, GC_page_size)) + GC_page_size-1)
+ #   endif
+ # elif defined(MSWINCE)
+-    extern ptr_t GC_wince_get_mem();
++    extern ptr_t GC_wince_get_mem(size_t bytes);
+ #   define GET_MEM(bytes) (struct hblk *)GC_wince_get_mem(bytes)
+ # elif defined(AMIGA) && defined(GC_AMIGA_FASTALLOC)
+-    extern void *GC_amiga_get_mem(size_t size);
++    extern void *GC_amiga_get_mem(size_t bytes);
+ #   define GET_MEM(bytes) HBLKPTR((size_t) \
+-			  GC_amiga_get_mem((size_t)bytes + GC_page_size) \
++			  GC_amiga_get_mem(SIZET_SAT_ADD(bytes, GC_page_size)) \
+ 			  + GC_page_size-1)
+ # else
+-    extern ptr_t GC_unix_get_mem();
++    extern ptr_t GC_unix_get_mem(size_t bytes);
+ #   define GET_MEM(bytes) (struct hblk *)GC_unix_get_mem(bytes)
+ # endif
+ 
+diff --git a/malloc.c b/malloc.c
+index 59e49c0..f241305 100644
+--- a/malloc.c
++++ b/malloc.c
+@@ -45,8 +45,9 @@ ptr_t GC_alloc_large(size_t lb, int k, unsigned flags)
+     ptr_t result;
+ 	
+     /* Round up to a multiple of a granule. */
+-      lb = (lb + GRANULE_BYTES - 1) & ~(GRANULE_BYTES - 1);
+-    n_blocks = OBJ_SZ_TO_BLOCKS(lb);
++    lb = ROUNDUP_GRANULE_SIZE(lb);
++    n_blocks = OBJ_SZ_TO_BLOCKS_CHECKED(lb);
++
+     if (!GC_is_initialized) GC_init_inner();
+     /* Do our share of marking work */
+         if(GC_incremental && !GC_dont_gc)
+@@ -82,10 +83,11 @@ ptr_t GC_alloc_large(size_t lb, int k, unsigned flags)
+ ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags)
+ {
+     ptr_t result = GC_alloc_large(lb, k, flags);
+-    word n_blocks = OBJ_SZ_TO_BLOCKS(lb);
+ 
+     if (0 == result) return 0;
+     if (GC_debugging_started || GC_obj_kinds[k].ok_init) {
++        word n_blocks = OBJ_SZ_TO_BLOCKS(lb);
++
+ 	/* Clear the whole block, in case of GC_realloc call. */
+ 	BZERO(result, n_blocks * HBLKSIZE);
+     }
+@@ -165,8 +167,6 @@ void * GC_generic_malloc(size_t lb, int k)
+ 	GC_bool init;
+ 	lw = ROUNDED_UP_WORDS(lb);
+ 	lb_rounded = WORDS_TO_BYTES(lw);
+-	if (lb_rounded < lb)
+-	  return((*GC_oom_fn)(lb));
+ 
+ 	n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
+ 	init = GC_obj_kinds[k].ok_init;
+@@ -347,15 +347,6 @@ void * malloc(size_t lb)
+   }
+ #endif
+ 
+-#include <limits.h>
+-#ifdef SIZE_MAX
+-# define GC_SIZE_MAX SIZE_MAX
+-#else
+-# define GC_SIZE_MAX (~(size_t)0)
+-#endif
+-
+-#define GC_SQRT_SIZE_MAX ((1U << (WORDSZ / 2)) - 1)
+-
+ void * calloc(size_t n, size_t lb)
+ {
+     if ((lb | n) > GC_SQRT_SIZE_MAX /* fast initial test */
+diff --git a/mallocx.c b/mallocx.c
+index 889182e..916a09f 100644
+--- a/mallocx.c
++++ b/mallocx.c
+@@ -179,9 +179,6 @@ void * GC_generic_malloc_ignore_off_page(size_t lb, int k)
+         return(GC_generic_malloc((word)lb, k));
+     lw = ROUNDED_UP_WORDS(lb);
+     lb_rounded = WORDS_TO_BYTES(lw);
+-    if (lb_rounded < lb)
+-	  return((*GC_oom_fn)(lb));
+-
+     n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
+     init = GC_obj_kinds[k].ok_init;
+     if (GC_have_errors) GC_print_all_errors();
+@@ -427,9 +424,11 @@ DCL_LOCK_STATE;
+ void * GC_malloc_many(size_t lb)
+ {
+     void *result;
+-    GC_generic_malloc_many(((lb + EXTRA_BYTES + GRANULE_BYTES-1)
+-			   & ~(GRANULE_BYTES-1)),
+-	    		   NORMAL, &result);
++    /* Add EXTRA_BYTES and round up to a multiple of a granule. */
++    lb = SIZET_SAT_ADD(lb, EXTRA_BYTES + GRANULE_BYTES - 1)
++            & ~(GRANULE_BYTES - 1);
++
++    GC_generic_malloc_many(lb, NORMAL, &result);
+     return result;
+ }
+ 
+@@ -508,7 +507,7 @@ void * GC_memalign(size_t align, size_t lb)
+     }
+     /* We could also try to make sure that the real rounded-up object size */
+     /* is a multiple of align.  That would be correct up to HBLKSIZE.	   */
+-    new_lb = lb + align - 1;
++    new_lb = SIZET_SAT_ADD(lb, align - 1);
+     result = GC_malloc(new_lb);
+     offset = (word)result % align;
+     if (offset != 0) {
+diff --git a/misc.c b/misc.c
+index 5f3eef6..794ff49 100755
+--- a/misc.c
++++ b/misc.c
+@@ -142,7 +142,7 @@ void * GC_project2(void *arg1, void *arg2)
+ /* quantization alogrithm (but we precompute it).			*/ 
+ void GC_init_size_map(void)
+ {
+-    int i;
++    size_t i;
+ 
+     /* Map size 0 to something bigger.			*/
+     /* This avoids problems at lower levels.		*/
+diff --git a/os_dep.c b/os_dep.c
+index b95cc21..7a3d126 100644
+--- a/os_dep.c
++++ b/os_dep.c
+@@ -691,20 +691,20 @@ void GC_enable_signals(void)
+ #endif
+ 
+ /* Find the page size */
+-word GC_page_size;
++size_t GC_page_size;
+ 
+ # if defined(MSWIN32) || defined(MSWINCE)
+   void GC_setpagesize(void)
+   {
+     GetSystemInfo(&GC_sysinfo);
+-    GC_page_size = GC_sysinfo.dwPageSize;
++    GC_page_size = (size_t)GC_sysinfo.dwPageSize;
+   }
+ 
+ # else
+ #   if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP)
+ 	void GC_setpagesize(void)
+ 	{
+-	    GC_page_size = GETPAGESIZE();
++	    GC_page_size = (size_t)GETPAGESIZE();
+ 	}
+ #   else
+ 	/* It's acceptable to fake it. */
+@@ -1755,7 +1755,7 @@ void GC_register_data_segments(void)
+ #   define HEAP_START 0
+ #endif
+ 
+-ptr_t GC_unix_mmap_get_mem(word bytes)
++ptr_t GC_unix_mmap_get_mem(size_t bytes)
+ {
+     void *result;
+     static ptr_t last_addr = HEAP_START;
+@@ -1781,7 +1781,7 @@ ptr_t GC_unix_mmap_get_mem(word bytes)
+         /* Oops.  We got the end of the address space.  This isn't	*/
+ 	/* usable by arbitrary C code, since one-past-end pointers	*/
+ 	/* don't work, so we discard it and try again.			*/
+-	munmap(result, (size_t)(-GC_page_size) - (size_t)result);
++        munmap(result, ~GC_page_size - (size_t)result + 1);
+ 			/* Leave last page mapped, so we can't repeat. */
+ 	return GC_unix_mmap_get_mem(bytes);
+       }
+@@ -1795,14 +1795,14 @@ ptr_t GC_unix_mmap_get_mem(word bytes)
+ 
+ #if defined(USE_MMAP)
+ 
+-ptr_t GC_unix_get_mem(word bytes)
++ptr_t GC_unix_get_mem(size_t bytes)
+ {
+     return GC_unix_mmap_get_mem(bytes);
+ }
+ 
+ #else /* Not USE_MMAP */
+ 
+-ptr_t GC_unix_sbrk_get_mem(word bytes)
++ptr_t GC_unix_sbrk_get_mem(size_t bytes)
+ {
+   ptr_t result;
+ # ifdef IRIX5
+@@ -1819,7 +1819,7 @@ ptr_t GC_unix_sbrk_get_mem(word bytes)
+ 	goto out;
+     }
+     if (lsbs != 0) {
+-        if((ptr_t)sbrk(GC_page_size - lsbs) == (ptr_t)(-1)) {
++        if((ptr_t)sbrk((SBRK_ARG_T)GC_page_size - lsbs) == (ptr_t)(-1)) {
+ 	    result = 0;
+ 	    goto out;
+ 	}
+@@ -1846,7 +1846,7 @@ ptr_t GC_unix_sbrk_get_mem(word bytes)
+ #if defined(MMAP_SUPPORTED)
+ 
+ /* By default, we try both sbrk and mmap, in that order. */
+-ptr_t GC_unix_get_mem(word bytes)
++ptr_t GC_unix_get_mem(size_t bytes)
+ {
+     static GC_bool sbrk_failed = FALSE;
+     ptr_t result = 0;
+@@ -1865,7 +1865,7 @@ ptr_t GC_unix_get_mem(word bytes)
+ 
+ #else /* !MMAP_SUPPORTED */
+ 
+-ptr_t GC_unix_get_mem(word bytes)
++ptr_t GC_unix_get_mem(size_t bytes)
+ {
+     return GC_unix_sbrk_get_mem(bytes);
+ }
+@@ -1912,7 +1912,7 @@ word GC_mem_top_down = 0;  /* Change to MEM_TOP_DOWN  for better 64-bit */
+ 			   /* testing.  Otherwise all addresses tend to */
+ 			   /* end up in first 4GB, hiding bugs.		*/
+ 
+-ptr_t GC_win32_get_mem(word bytes)
++ptr_t GC_win32_get_mem(size_t bytes)
+ {
+     ptr_t result;
+ 
+@@ -1920,8 +1920,8 @@ ptr_t GC_win32_get_mem(word bytes)
+     	/* VirtualAlloc doesn't like PAGE_EXECUTE_READWRITE.	*/
+     	/* There are also unconfirmed rumors of other		*/
+     	/* problems, so we dodge the issue.			*/
+-        result = (ptr_t) GlobalAlloc(0, bytes + HBLKSIZE);
+-        result = (ptr_t)(((word)result + HBLKSIZE - 1) & ~(HBLKSIZE-1));
++        result = (ptr_t)(((word)GlobalAlloc(0, SIZET_SAT_ADD(bytes, HBLKSIZE))
++                            + HBLKSIZE - 1) & ~(HBLKSIZE - 1));
+     } else {
+ 	/* VirtualProtect only works on regions returned by a	*/
+ 	/* single VirtualAlloc call.  Thus we allocate one 	*/
+@@ -1936,7 +1936,7 @@ ptr_t GC_win32_get_mem(word bytes)
+         /* available.  Otherwise we waste resources or possibly */
+         /* cause VirtualAlloc to fail (observed in Windows 2000 */
+         /* SP2).                                                */
+-        result = (ptr_t) VirtualAlloc(NULL, bytes + 1,
++        result = (ptr_t) VirtualAlloc(NULL, SIZET_SAT_ADD(bytes, 1),
+ #                                     ifdef GWW_VDB
+                                         GetWriteWatch_alloc_flag |
+ #                                     endif
+@@ -1973,7 +1973,7 @@ void GC_win32_free_heap(void)
+ # ifdef MSWINCE
+ word GC_n_heap_bases = 0;
+ 
+-ptr_t GC_wince_get_mem(word bytes)
++ptr_t GC_wince_get_mem(size_t bytes)
+ {
+     ptr_t result;
+     word i;
+@@ -1993,8 +1993,9 @@ ptr_t GC_wince_get_mem(word bytes)
+ 
+     if (i == GC_n_heap_bases) {
+ 	/* Reserve more pages */
+-	word res_bytes = (bytes + GC_sysinfo.dwAllocationGranularity-1)
+-			 & ~(GC_sysinfo.dwAllocationGranularity-1);
++        size_t res_bytes =
++            SIZET_SAT_ADD(bytes, (size_t)GC_sysinfo.dwAllocationGranularity-1)
++            & ~((size_t)GC_sysinfo.dwAllocationGranularity-1);
+ 	/* If we ever support MPROTECT_VDB here, we will probably need to    */
+ 	/* ensure that res_bytes is strictly > bytes, so that VirtualProtect */
+ 	/* never spans regions.  It seems to be OK for a VirtualFree	     */
+@@ -2763,7 +2764,7 @@ GC_bool GC_old_segv_handler_used_si;
+ 				-> ExceptionInformation[1]);
+ #	define sig SIGSEGV
+ #   endif
+-    unsigned i;
++    size_t i;
+     
+     if (SIG_OK && CODE_OK) {
+         register struct hblk * h =
+@@ -3851,7 +3852,7 @@ catch_exception_raise(mach_port_t exception_port, mach_port_t thread,
+   kern_return_t r;
+   char *addr;
+   struct hblk *h;
+-  unsigned int i;
++  size_t i;
+ # if defined(POWERPC)
+ #   if CPP_WORDSZ == 32
+       thread_state_flavor_t flavor = PPC_EXCEPTION_STATE;
+diff --git a/typd_mlc.c b/typd_mlc.c
+index dfd5bad..5a06f9f 100644
+--- a/typd_mlc.c
++++ b/typd_mlc.c
+@@ -590,8 +590,8 @@ void * GC_malloc_explicitly_typed(size_t lb, GC_descr d)
+     size_t lg;
+     DCL_LOCK_STATE;
+ 
+-    lb += TYPD_EXTRA_BYTES;
+-    if(SMALL_OBJ(lb)) {
++    lb = SIZET_SAT_ADD(lb, TYPD_EXTRA_BYTES);
++    if (SMALL_OBJ(lb)) {
+ 	lg = GC_size_map[lb];
+ 	opp = &(GC_eobjfreelist[lg]);
+ 	LOCK();
+@@ -623,8 +623,8 @@ ptr_t * opp;
+ size_t lg;
+ DCL_LOCK_STATE;
+ 
+-    lb += TYPD_EXTRA_BYTES;
+-    if( SMALL_OBJ(lb) ) {
++    lb = SIZET_SAT_ADD(lb, TYPD_EXTRA_BYTES);
++    if (SMALL_OBJ(lb)) {
+ 	lg = GC_size_map[lb];
+ 	opp = &(GC_eobjfreelist[lg]);
+ 	LOCK();
+@@ -648,15 +648,6 @@ DCL_LOCK_STATE;
+    return((void *) op);
+ }
+ 
+-#include <limits.h>
+-#ifdef SIZE_MAX
+-# define GC_SIZE_MAX SIZE_MAX
+-#else
+-# define GC_SIZE_MAX (~(size_t)0)
+-#endif
+-
+-#define GC_SQRT_SIZE_MAX ((((size_t)1) << (WORDSZ / 2)) - 1)
+-
+ void * GC_calloc_explicitly_typed(size_t n, size_t lb, GC_descr d)
+ {
+ ptr_t op;
+@@ -679,10 +670,11 @@ DCL_LOCK_STATE;
+     	case SIMPLE:
+ 	    return GC_malloc_explicitly_typed(lb, simple_descr);
+     	case LEAF:
+-    	    lb += sizeof(struct LeafDescriptor) + TYPD_EXTRA_BYTES;
++            lb = SIZET_SAT_ADD(lb,
++                        sizeof(struct LeafDescriptor) + TYPD_EXTRA_BYTES);
+     	    break;
+     	case COMPLEX:
+-    	    lb += TYPD_EXTRA_BYTES;
++            lb = SIZET_SAT_ADD(lb, TYPD_EXTRA_BYTES);
+     	    break;
+     }
+     if( SMALL_OBJ(lb) ) {
diff --git a/debian/patches/CVE-2016-9427-4.patch b/debian/patches/CVE-2016-9427-4.patch
new file mode 100644
index 0000000..63079d8
--- /dev/null
+++ b/debian/patches/CVE-2016-9427-4.patch
@@ -0,0 +1,42 @@
+From: Ivan Maidanski <ivmai@mail.ru>
+Date: Thu, 24 Nov 2016 18:09:27 +0100
+Subject: Fix GC_collect_or_expand to prevent allocation size value wrap-around
+
+(Cherry-pick commit 1f3c938 from 'release-7_4' branch.)
+
+Relates to issue #135 on Github.
+
+* alloc.c (GC_WORD_MAX): New macro.
+* alloc.c (GC_collect_or_expand): Limit blocks_to_get by
+GC_WORD_MAX / HBLKSIZE value (to avoid multiplication overflow in
+GC_expand_hp_inner).
+
+Origin: backport, https://github.com/ivmai/bdwgc/commit/949a7533d47e0ce0976e2d7aa3daa3bf9f31cabd
+Bug: https://github.com/ivmai/bdwgc/issues/135
+Bug-Debian: https://bugs.debian.org/844771
+---
+ alloc.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/alloc.c b/alloc.c
+index eb3c4a3..0357b38 100644
+--- a/alloc.c
++++ b/alloc.c
+@@ -978,6 +978,8 @@ unsigned GC_fail_count = 0;
+ 			/* How many consecutive GC/expansion failures?	*/
+ 			/* Reset by GC_allochblk.			*/
+ 
++#define GC_WORD_MAX (~(word)0)
++
+ GC_bool GC_collect_or_expand(word needed_blocks, GC_bool ignore_off_page)
+ {
+     if (!GC_incremental && !GC_dont_gc &&
+@@ -1004,6 +1006,8 @@ GC_bool GC_collect_or_expand(word needed_blocks, GC_bool ignore_off_page)
+           } else {
+               blocks_to_get = MAXHINCR;
+           }
++	  if (blocks_to_get > divHBLKSZ(GC_WORD_MAX))
++	      blocks_to_get = divHBLKSZ(GC_WORD_MAX);
+       }
+       if (!GC_expand_hp_inner(blocks_to_get)
+         && !GC_expand_hp_inner(needed_blocks)) {
diff --git a/debian/patches/CVE-2016-9427-5.patch b/debian/patches/CVE-2016-9427-5.patch
new file mode 100644
index 0000000..950f05e
--- /dev/null
+++ b/debian/patches/CVE-2016-9427-5.patch
@@ -0,0 +1,79 @@
+From: =?utf-8?q?Rapha=C3=ABl_Hertzog?= <hertzog@debian.org>
+Date: Thu, 24 Nov 2016 18:33:49 +0100
+Subject: Add more cases to huge_test to cover sizes close to word-type maximum
+
+* tests/huge_test.c (GC_WORD_MAX): New macro.
+* tests/huge_test.c (GC_SWORD_MAX): Use GC_WORD_MAX.
+* tests/huge_test.c (main): Add GC_SWORD_MAX+1, GC_WORD_MAX,
+GC_WORD_MAX-4/8/16/1024 test cases.
+
+[hertzog@debian.org: backported the CHECK_ALLOC_FAILED macro and rewrote the
+other checks at the same time, it was easier than adding the tests by
+duplicating the same chunk of code]
+
+Origin: backport, https://github.com/ivmai/bdwgc/commit/e273661227b4684265c09e04f75db81f7c5e697e
+Bug: https://github.com/ivmai/bdwgc/issues/135
+Bug-Debian: https://bugs.debian.org/844771
+---
+ tests/huge_test.c | 41 +++++++++++++++++++++--------------------
+ 1 file changed, 21 insertions(+), 20 deletions(-)
+
+diff --git a/tests/huge_test.c b/tests/huge_test.c
+index 248b1d7..9d801c3 100644
+--- a/tests/huge_test.c
++++ b/tests/huge_test.c
+@@ -10,6 +10,17 @@
+  * expected manner.
+  */
+ 
++#define CHECK_ALLOC_FAILED(r, sz_str) \
++  do { \
++    if (NULL != (r)) { \
++        fprintf(stderr, \
++                "Size " sz_str " allocation unexpectedly succeeded\n"); \
++        exit(1); \
++    } \
++  } while (0)
++
++#define GC_WORD_MAX ((GC_word)-1)
++#define GC_SWORD_MAX ((GC_signed_word)(GC_WORD_MAX >> 1))
+ 
+ main()
+ {
+@@ -20,26 +31,16 @@ main()
+         /* That's OK.  We test this corner case mostly to make sure that  */
+         /* it fails predictably.					  */
+     GC_expand_hp(1024*1024*5);
+-    if (sizeof(long) == sizeof(void *)) {
+-        void *r = GC_MALLOC(LONG_MAX-1024);
+-	if (0 != r) {
+-	    fprintf(stderr,
+-	    	    "Size LONG_MAX-1024 allocation unexpectedly succeeded\n");
+-	    exit(1);
+-	}
+-        r = GC_MALLOC(LONG_MAX);
+-	if (0 != r) {
+-	    fprintf(stderr,
+-	            "Size LONG_MAX allocation unexpectedly succeeded\n");
+-	    exit(1);
+-	}
+-        r = GC_MALLOC((size_t)LONG_MAX + 1024);
+-	if (0 != r) {
+-	    fprintf(stderr,
+-	    	    "Size LONG_MAX+1024 allocation unexpectedly succeeded\n");
+-	    exit(1);
+-	}
+-    }
++    CHECK_ALLOC_FAILED(GC_MALLOC(GC_SWORD_MAX - 1024), "SWORD_MAX-1024");
++    CHECK_ALLOC_FAILED(GC_MALLOC(GC_SWORD_MAX), "SWORD_MAX");
++    CHECK_ALLOC_FAILED(GC_MALLOC((GC_word)GC_SWORD_MAX + 1), "SWORD_MAX+1");
++    CHECK_ALLOC_FAILED(GC_MALLOC((GC_word)GC_SWORD_MAX + 1024),
++                       "SWORD_MAX+1024");
++    CHECK_ALLOC_FAILED(GC_MALLOC(GC_WORD_MAX - 1024), "WORD_MAX-1024");
++    CHECK_ALLOC_FAILED(GC_MALLOC(GC_WORD_MAX - 16), "WORD_MAX-16");
++    CHECK_ALLOC_FAILED(GC_MALLOC(GC_WORD_MAX - 8), "WORD_MAX-8");
++    CHECK_ALLOC_FAILED(GC_MALLOC(GC_WORD_MAX - 4), "WORD_MAX-4");
++    CHECK_ALLOC_FAILED(GC_MALLOC(GC_WORD_MAX), "WORD_MAX");
+     return 0;
+ }
+ 
diff --git a/debian/patches/series b/debian/patches/series
index c22ba2e..b925361 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -2,10 +2,14 @@
 02-manpage.diff
 03-add-avr32-support.diff
 04-fix-sparc-ftbfs.diff
-#05-s390-includes.diff
 enable-threads.diff
 06-m68k-no-getcontext.diff
 CVE-2012-2673-calloc-1.diff
 CVE-2012-2673-calloc-2.diff
 CVE-2012-2673-calloc-3.diff
 CVE-2012-2673-malloc.diff
+CVE-2016-9427-1.patch
+CVE-2016-9427-2.patch
+CVE-2016-9427-3.patch
+CVE-2016-9427-4.patch
+CVE-2016-9427-5.patch

Reply to: