Use the new read_page_state() in vmscan.c to avoid large on-stack structures.

Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 25-akpm/mm/vmscan.c |    9 +++++----
 1 files changed, 5 insertions(+), 4 deletions(-)

diff -puN mm/vmscan.c~vmscan-use-read_page_state mm/vmscan.c
--- 25/mm/vmscan.c~vmscan-use-read_page_state	Tue Jun  8 16:17:26 2004
+++ 25-akpm/mm/vmscan.c	Tue Jun  8 16:17:26 2004
@@ -320,13 +320,14 @@ struct scan_control {
 	/* Incremented by the number of pages reclaimed */
 	unsigned long nr_reclaimed;
 
+	unsigned long nr_mapped;	/* From page_state */
+
 	/* Ask shrink_caches, or shrink_zone to scan at this priority */
 	unsigned int priority;
 
 	/* This context's GFP mask */
 	unsigned int gfp_mask;
 
-	struct page_state ps;
 	int may_writepage;
 };
 
@@ -686,7 +687,7 @@ refill_inactive_zone(struct zone *zone, 
 	 * mapped memory instead of just pagecache.  Work out how much memory
 	 * is mapped.
 	 */
-	mapped_ratio = (sc->ps.nr_mapped * 100) / total_memory;
+	mapped_ratio = (sc->nr_mapped * 100) / total_memory;
 
 	/*
 	 * Now decide how much we really want to unmap some pages.  The mapped
@@ -908,7 +909,7 @@ int try_to_free_pages(struct zone **zone
 		zones[i]->temp_priority = DEF_PRIORITY;
 
 	for (priority = DEF_PRIORITY; priority >= 0; priority--) {
-		get_page_state(&sc.ps);
+		sc.nr_mapped = read_page_state(nr_mapped);
 		sc.nr_scanned = 0;
 		sc.nr_reclaimed = 0;
 		sc.priority = priority;
@@ -985,7 +986,7 @@ static int balance_pgdat(pg_data_t *pgda
 
 	sc.gfp_mask = GFP_KERNEL;
 	sc.may_writepage = 0;
-	get_page_state(&sc.ps);
+	sc.nr_mapped = read_page_state(nr_mapped);
 
 	inc_page_state(pageoutrun);
 
_