[Date Prev][Date Next] [Thread Prev][Thread Next] [Date Index] [Thread Index]

Bug#659363: Hibernate freezes on HP dc7900 with Linux 3.2 (regression)



Per Olofsson wrote:

> Here is the final patch which fixes the bug:

Thanks, Pelle.  This is e9cbc5a6270b from the linux-next branch of
Rafael's linux-pm tree[1] and should be part of linux-next when
Stephen next rebuilds it.  It applies without change to the 3.2.y
tree, so I'm attaching a patch for the packaging repo that applies the
fix for convenience.

Hope that helps,
Jonathan

[1] git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Index: debian/changelog
===================================================================
--- debian/changelog	(revision 18944)
+++ debian/changelog	(working copy)
@@ -4,6 +4,10 @@
   * [ia64] Fix futex_atomic_cmpxchg_inatomic() (Closes: #659485)
   * rt2x00: Identify rt2800usb chipsets. (Closes: #658067)
 
+  [ Jonathan Nieder ]
+  * PM / Hibernate: fix the number of pages used for hibernate/thaw
+    buffering (Closes: #659363)
+
  -- Ben Hutchings <ben@decadent.org.uk>  Mon, 16 Apr 2012 02:27:29 +0100
 
 linux-2.6 (3.2.15-1) unstable; urgency=high
Index: debian/patches/bugfix/all/PM-Hibernate-fix-the-number-of-pages-used-for-hibern.patch
===================================================================
--- debian/patches/bugfix/all/PM-Hibernate-fix-the-number-of-pages-used-for-hibern.patch	(revision 0)
+++ debian/patches/bugfix/all/PM-Hibernate-fix-the-number-of-pages-used-for-hibern.patch	(working copy)
@@ -0,0 +1,221 @@
+From: Bojan Smojver <bojan@rexursive.com>
+Date: Sun, 22 Apr 2012 22:32:32 +0200
+Subject: PM / Hibernate: fix the number of pages used for hibernate/thaw buffering
+
+commit e9cbc5a6270be7aa9c42d9b15293ba9ac7161262 upstream.
+
+Hibernation/thaw fixes/improvements:
+
+1. Calculate the number of required free pages based on non-high memory
+pages only, because that is where the buffers will come from.
+
+2. Do not allocate memory for buffers from emergency pools, unless
+absolutely required. Do not warn about and do not retry non-essential
+failed allocations.
+
+3. Do not check the amount of free pages left on every single page
+write, but wait until one map is completely populated and then check.
+
+4. Set maximum number of pages for read buffering consistently, instead
+of inadvertently depending on the size of the sector type.
+
+5. Fix copyright line, which I missed when I submitted the hibernation
+threading patch.
+
+6. Dispense with bit shifting arithmetic to improve readability.
+
+Signed-off-by: Bojan Smojver <bojan@rexursive.com>
+Reviewed-by: Per Olofsson <pelle@debian.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
+---
+ kernel/power/swap.c |   84 +++++++++++++++++++++++++++++++++++----------------
+ 1 file changed, 58 insertions(+), 26 deletions(-)
+
+diff --git a/kernel/power/swap.c b/kernel/power/swap.c
+index 11a594c4ba25..64f8f973674b 100644
+--- a/kernel/power/swap.c
++++ b/kernel/power/swap.c
+@@ -6,7 +6,7 @@
+  *
+  * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
+  * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
+- * Copyright (C) 2010 Bojan Smojver <bojan@rexursive.com>
++ * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
+  *
+  * This file is released under the GPLv2.
+  *
+@@ -52,6 +52,23 @@
+ 
+ #define MAP_PAGE_ENTRIES	(PAGE_SIZE / sizeof(sector_t) - 1)
+ 
++/*
++ * Number of free pages that are not high.
++ */
++static inline unsigned long low_free_pages(void)
++{
++	return nr_free_pages() - nr_free_highpages();
++}
++
++/*
++ * Number of pages required to be kept free while writing the image. Always
++ * half of all available low pages before the writing starts.
++ */
++static inline unsigned long reqd_free_pages(void)
++{
++	return low_free_pages() / 2;
++}
++
+ struct swap_map_page {
+ 	sector_t entries[MAP_PAGE_ENTRIES];
+ 	sector_t next_swap;
+@@ -73,7 +90,7 @@ struct swap_map_handle {
+ 	sector_t cur_swap;
+ 	sector_t first_sector;
+ 	unsigned int k;
+-	unsigned long nr_free_pages, written;
++	unsigned long reqd_free_pages;
+ 	u32 crc32;
+ };
+ 
+@@ -266,14 +283,17 @@ static int write_page(void *buf, sector_t offset, struct bio **bio_chain)
+ 		return -ENOSPC;
+ 
+ 	if (bio_chain) {
+-		src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
++		src = (void *)__get_free_page(__GFP_WAIT | __GFP_NOWARN |
++		                              __GFP_NORETRY);
+ 		if (src) {
+ 			copy_page(src, buf);
+ 		} else {
+ 			ret = hib_wait_on_bio_chain(bio_chain); /* Free pages */
+ 			if (ret)
+ 				return ret;
+-			src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
++			src = (void *)__get_free_page(__GFP_WAIT |
++			                              __GFP_NOWARN |
++			                              __GFP_NORETRY);
+ 			if (src) {
+ 				copy_page(src, buf);
+ 			} else {
+@@ -317,8 +337,7 @@ static int get_swap_writer(struct swap_map_handle *handle)
+ 		goto err_rel;
+ 	}
+ 	handle->k = 0;
+-	handle->nr_free_pages = nr_free_pages() >> 1;
+-	handle->written = 0;
++	handle->reqd_free_pages = reqd_free_pages();
+ 	handle->first_sector = handle->cur_swap;
+ 	return 0;
+ err_rel:
+@@ -352,12 +371,17 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf,
+ 		clear_page(handle->cur);
+ 		handle->cur_swap = offset;
+ 		handle->k = 0;
+-	}
+-	if (bio_chain && ++handle->written > handle->nr_free_pages) {
+-		error = hib_wait_on_bio_chain(bio_chain);
+-		if (error)
+-			goto out;
+-		handle->written = 0;
++
++		if (bio_chain && low_free_pages() <= handle->reqd_free_pages) {
++			error = hib_wait_on_bio_chain(bio_chain);
++			if (error)
++				goto out;
++			/*
++			 * Recalculate the number of required free pages, to
++			 * make sure we never take more than half.
++			 */
++			handle->reqd_free_pages = reqd_free_pages();
++		}
+ 	}
+  out:
+ 	return error;
+@@ -404,8 +428,9 @@ static int swap_writer_finish(struct swap_map_handle *handle,
+ /* Maximum number of threads for compression/decompression. */
+ #define LZO_THREADS	3
+ 
+-/* Maximum number of pages for read buffering. */
+-#define LZO_READ_PAGES	(MAP_PAGE_ENTRIES * 8)
++/* Minimum/maximum number of pages for read buffering. */
++#define LZO_MIN_RD_PAGES	1024
++#define LZO_MAX_RD_PAGES	8192
+ 
+ 
+ /**
+@@ -616,12 +641,6 @@ static int save_image_lzo(struct swap_map_handle *handle,
+ 	}
+ 
+ 	/*
+-	 * Adjust number of free pages after all allocations have been done.
+-	 * We don't want to run out of pages when writing.
+-	 */
+-	handle->nr_free_pages = nr_free_pages() >> 1;
+-
+-	/*
+ 	 * Start the CRC32 thread.
+ 	 */
+ 	init_waitqueue_head(&crc->go);
+@@ -642,6 +661,12 @@ static int save_image_lzo(struct swap_map_handle *handle,
+ 		goto out_clean;
+ 	}
+ 
++	/*
++	 * Adjust the number of required free pages after all allocations have
++	 * been done. We don't want to run out of pages when writing.
++	 */
++	handle->reqd_free_pages = reqd_free_pages();
++
+ 	printk(KERN_INFO
+ 		"PM: Using %u thread(s) for compression.\n"
+ 		"PM: Compressing and saving image data (%u pages) ...     ",
+@@ -1051,7 +1076,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
+ 	unsigned i, thr, run_threads, nr_threads;
+ 	unsigned ring = 0, pg = 0, ring_size = 0,
+ 	         have = 0, want, need, asked = 0;
+-	unsigned long read_pages;
++	unsigned long read_pages = 0;
+ 	unsigned char **page = NULL;
+ 	struct dec_data *data = NULL;
+ 	struct crc_data *crc = NULL;
+@@ -1063,7 +1088,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
+ 	nr_threads = num_online_cpus() - 1;
+ 	nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
+ 
+-	page = vmalloc(sizeof(*page) * LZO_READ_PAGES);
++	page = vmalloc(sizeof(*page) * LZO_MAX_RD_PAGES);
+ 	if (!page) {
+ 		printk(KERN_ERR "PM: Failed to allocate LZO page\n");
+ 		ret = -ENOMEM;
+@@ -1128,15 +1153,22 @@ static int load_image_lzo(struct swap_map_handle *handle,
+ 	}
+ 
+ 	/*
+-	 * Adjust number of pages for read buffering, in case we are short.
++	 * Set the number of pages for read buffering.
++	 * This is complete guesswork, because we'll only know the real
++	 * picture once prepare_image() is called, which is much later on
++	 * during the image load phase. We'll assume the worst case and
++	 * say that none of the image pages are from high memory.
+ 	 */
+-	read_pages = (nr_free_pages() - snapshot_get_image_size()) >> 1;
+-	read_pages = clamp_val(read_pages, LZO_CMP_PAGES, LZO_READ_PAGES);
++	if (low_free_pages() > snapshot_get_image_size())
++		read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
++	read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
+ 
+ 	for (i = 0; i < read_pages; i++) {
+ 		page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
+ 		                                  __GFP_WAIT | __GFP_HIGH :
+-		                                  __GFP_WAIT);
++		                                  __GFP_WAIT | __GFP_NOWARN |
++		                                  __GFP_NORETRY);
++
+ 		if (!page[i]) {
+ 			if (i < LZO_CMP_PAGES) {
+ 				ring_size = i;
+-- 
+1.7.10
+
Index: debian/patches/series/base
===================================================================
--- debian/patches/series/base	(revision 18944)
+++ debian/patches/series/base	(working copy)
@@ -174,3 +174,4 @@
 + bugfix/all/hugetlb-fix-race-condition-in-hugetlb_fault.patch
 + bugfix/ia64/IA64-Fix-futex_atomic_cmpxchg_inatomic.patch
 + features/all/rt2x00-Identify-rt2800usb-chipsets.patch
++ bugfix/all/PM-Hibernate-fix-the-number-of-pages-used-for-hibern.patch

Reply to: