diff -ur linux.orig/fs/proc/proc_misc.c linux/fs/proc/proc_misc.c
--- linux.orig/fs/proc/proc_misc.c	Mon May  7 11:31:00 2001
+++ linux/fs/proc/proc_misc.c	Tue May  8 16:31:14 2001
@@ -148,6 +148,8 @@
 #define B(x) ((x) << PAGE_SHIFT)
         si_meminfo(&i);
         si_swapinfo(&i);
+	len = 0;
+#if 0
         len = sprintf(page, "        total:    used:    free:  shared: buffers:  cached:\n"
                 "Mem:  %8lu %8lu %8lu %8lu %8lu %8u\n"
                 "Swap: %8lu %8lu %8lu\n",
@@ -155,6 +157,7 @@
                 B(i.sharedram), B(i.bufferram),
                 B(atomic_read(&page_cache_size)), B(i.totalswap),
                 B(i.totalswap-i.freeswap), B(i.freeswap));
+#endif
         /*
          * Tagged format, for easy grepping and expansion.
          * The above will go away eventually, once the tools
@@ -175,7 +178,11 @@
                 "LowTotal:     %8lu kB\n"
                 "LowFree:      %8lu kB\n"
                 "SwapTotal:    %8lu kB\n"
-                "SwapFree:     %8lu kB\n",
+                "SwapFree:     %8lu kB\n"
+		"writable:     %8lu\n"
+		"kernel:       %8lu\n"
+		"locked:       %8lu\n"
+		,
                 K(i.totalram),
                 K(i.freeram),
                 K(i.sharedram),
@@ -190,7 +197,11 @@
                 K(i.totalram-i.totalhigh),
                 K(i.freeram-i.freehigh),
                 K(i.totalswap),
-                K(i.freeswap));
+                K(i.freeswap),
+		(long)atomic_read(&writable_pages),
+		(long)atomic_read(&kernel_pages),
+		(long)atomic_read(&locked_pages)
+	);
 
 	return proc_calc_metrics(page, start, off, count, eof, len);
 #undef B
diff -ur linux.orig/include/linux/mm.h linux/include/linux/mm.h
--- linux.orig/include/linux/mm.h	Mon May  7 13:24:36 2001
+++ linux/include/linux/mm.h	Tue May  8 16:18:54 2001
@@ -153,6 +153,7 @@
 	struct page *next_hash;		/* Next page sharing our hash bucket in
 					   the page cache hash table. */
 	atomic_t count;			/* Usage count, see below. */
+	atomic_t write_count;
 	unsigned long flags;		/* atomic flags, some possibly
 					   updated asynchronously */
 	struct list_head lru;		/* Pageout list, eg active_list;
@@ -282,9 +283,41 @@
 #define PG_skip			10
 #define PG_inactive_clean	11
 #define PG_highmem		12
+#define PG_writable		20
+#define PG_kernel		21
 				/* bits 21-29 unused */
 #define PG_arch_1		30
 #define PG_reserved		31
+
+extern atomic_t kernel_pages, writable_pages, locked_pages;
+
+static inline void get_page_writable(struct page *page)
+{
+	atomic_inc(&page->write_count);
+	if (!test_and_set_bit(PG_writable, &page->flags))
+		atomic_inc(&writable_pages);
+}
+
+static inline void put_page_writable(struct page *page)
+{
+	if (atomic_dec_and_test(&page->write_count)) {
+		if (!test_and_set_bit(PG_writable, &page->flags))
+			BUG();
+		atomic_dec(&writable_pages);
+	}
+}
+
+static inline void mark_page_kernel(struct page *page)
+{
+	if (!test_and_set_bit(PG_kernel, &page->flags))
+		atomic_inc(&kernel_pages);
+}
+
+static inline void clear_page_kernel(struct page *page)
+{
+	if (!test_and_clear_bit(PG_kernel, &page->flags))
+		atomic_dec(&kernel_pages);
+}
 
 /* Make it prettier to test the above... */
 #define Page_Uptodate(page)	test_bit(PG_uptodate, &(page)->flags)
diff -ur linux.orig/mm/filemap.c linux/mm/filemap.c
--- linux.orig/mm/filemap.c	Mon May  7 11:31:01 2001
+++ linux/mm/filemap.c	Tue May  8 12:12:18 2001
@@ -616,8 +616,8 @@
 }
 
 /**
- *      filemap_fdatawait - walk the list of locked pages of the given address space
- *     	and wait for all of them.
+ *      filemap_fdatawait - walk the list of locked pages of the given
+ *	address space and wait for all of them.
  * 
  *      @mapping: address space structure to wait for
  *
@@ -2788,6 +2788,7 @@
 			PAGE_BUG(page);
 		}
 
+		get_page_writable(page);
 		status = mapping->a_ops->prepare_write(file, page, offset, offset+bytes);
 		if (status)
 			goto sync_failure;
@@ -2808,6 +2809,7 @@
 		}
 unlock:
 		/* Mark it unlocked again and drop the page.. */
+		put_page_writable(page);
 		UnlockPage(page);
 		if (deactivate)
 			deactivate_page(page);
diff -ur linux.orig/mm/memory.c linux/mm/memory.c
--- linux.orig/mm/memory.c	Mon May  7 11:31:01 2001
+++ linux/mm/memory.c	Tue May  8 12:44:10 2001
@@ -871,6 +871,7 @@
 {
 	flush_page_to_ram(new_page);
 	flush_cache_page(vma, address);
+	get_page_writable(new_page);
 	establish_pte(vma, address, page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))));
 }
 
@@ -951,6 +952,7 @@
 		/* FallThrough */
 	case 1:
 		flush_cache_page(vma, address);
+		get_page_writable(old_page);
 		establish_pte(vma, address, page_table, pte_mkyoung(pte_mkdirty(pte_mkwrite(pte))));
 		return 1;	/* Minor fault */
 	}
@@ -1179,8 +1181,10 @@
 	pte = mk_pte(page, vma->vm_page_prot);
 
 	swap_free(entry);
-	if (write_access && exclusive_swap_page(page))
+	if (write_access && exclusive_swap_page(page)) {
+		get_page_writable(page);
 		pte = pte_mkwrite(pte_mkdirty(pte));
+	}
 	UnlockPage(page);
 
 	set_pte(page_table, pte);
@@ -1220,6 +1224,7 @@
 		}
 		mm->rss++;
 		flush_page_to_ram(page);
+		get_page_writable(page);
 		entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
 	}
 
@@ -1281,6 +1286,7 @@
 		flush_icache_page(vma, new_page);
 		entry = mk_pte(new_page, vma->vm_page_prot);
 		if (write_access) {
+			get_page_writable(new_page);
 			entry = pte_mkwrite(pte_mkdirty(entry));
 		} else if (page_count(new_page) > 1 &&
 			   !(vma->vm_flags & VM_SHARED))
diff -ur linux.orig/mm/page_alloc.c linux/mm/page_alloc.c
--- linux.orig/mm/page_alloc.c	Mon May  7 11:31:01 2001
+++ linux/mm/page_alloc.c	Tue May  8 19:35:37 2001
@@ -24,6 +24,8 @@
 int nr_inactive_dirty_pages;
 pg_data_t *pgdat_list;
 
+atomic_t writable_pages, locked_pages, kernel_pages;
+
 static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" };
 static int zone_balance_ratio[MAX_NR_ZONES] = { 32, 128, 128, };
 static int zone_balance_min[MAX_NR_ZONES] = { 10 , 10, 10, };
@@ -238,6 +240,7 @@
 			if (BAD_RANGE(zone,page))
 				BUG();
 			DEBUG_ADD_PAGE
+
 			return page;	
 		}
 		curr_order++;
@@ -248,9 +251,10 @@
 	return NULL;
 }
 
-#define PAGES_MIN	0
-#define PAGES_LOW	1
-#define PAGES_HIGH	2
+#define PAGES_EMERG	0
+#define PAGES_MIN	1
+#define PAGES_LOW	2
+#define PAGES_HIGH	3
 
 /*
  * This function does the dirty work for __alloc_pages
@@ -277,6 +281,9 @@
 		 */
 		switch (limit) {
 			default:
+			case PAGES_EMERG:
+				water_mark = z->pages_min/4;
+				break;
 			case PAGES_MIN:
 				water_mark = z->pages_min;
 				break;
@@ -314,7 +321,7 @@
 	int direct_reclaim = 0;
 	unsigned int gfp_mask = zonelist->gfp_mask;
 	struct page * page;
-
+	unsigned index;
 
 	/*
 	 * Allocations put pressure on the VM subsystem.
@@ -354,7 +361,7 @@
 		if (z->free_pages >= z->pages_low) {
 			page = rmqueue(z, order);
 			if (page)
-				return page;
+				goto common_return_page;
 		} else if (z->free_pages < z->pages_min &&
 					waitqueue_active(&kreclaimd_wait)) {
 				wake_up_interruptible(&kreclaimd_wait);
@@ -371,7 +378,7 @@
 	 */
 	page = __alloc_pages_limit(zonelist, order, PAGES_HIGH, direct_reclaim);
 	if (page)
-		return page;
+		goto common_return_page;
 
 	/*
 	 * Then try to allocate a page from a zone with more
@@ -383,7 +390,7 @@
 	 */
 	page = __alloc_pages_limit(zonelist, order, PAGES_LOW, direct_reclaim);
 	if (page)
-		return page;
+		goto common_return_page;
 
 	/*
 	 * OK, none of the zones on our zonelist has lots
@@ -418,7 +425,7 @@
 	 */
 	page = __alloc_pages_limit(zonelist, order, PAGES_MIN, direct_reclaim);
 	if (page)
-		return page;
+		goto common_return_page;
 		
 	/*
 	 *	If we dont want to try too hard then we can give up
@@ -467,7 +474,7 @@
 					/* Try if the allocation succeeds. */
 					page = rmqueue(z, order);
 					if (page)
-						return page;
+						goto common_return_page;
 				}
 			}
 		}
@@ -511,31 +518,40 @@
 		if (direct_reclaim) {
 			page = reclaim_page(z);
  			if (page)
-				return page;
+				goto common_return_page;
 		}
 
 		/* XXX: is pages_min/4 a good amount to reserve for this? */
 		if (z->free_pages < z->pages_min / 4 &&
-		    !((current->flags & PF_MEMALLOC) && 
-		      (gfp_mask & __GFP_WAIT)))
+		    (!(current->flags & PF_MEMALLOC) ||
+		      in_interrupt()))
 			continue;
 		page = rmqueue(z, order);
 		if (page)
-			return page;
+			goto common_return_page;
 	}
 
 	// okay - we are in trouble, lets go to the DMA pool directly:
-
+#if 0
 	{
 		zone_t *z = pgdat_list->node_zones;
 
 		page = rmqueue(z, order);
 		if (page)
-			return page;
+			goto common_return_page;
 	}
+#endif
+
 	/* No luck.. */
-	printk(KERN_INFO "__alloc_pages: %lu-order allocation failed.\n", order);
+	printk("__alloc_pages: %lu-order allocation failed.\n", order);
 	return NULL;
+
+common_return_page:
+	for (index = 0; index < (1 << order); index++) {
+		atomic_set(&page->write_count, 0);
+		mark_page_kernel(page+index);
+	}
+	return page;
 }
 
 /*