diff -urN dontuse-4/mm/filemap.c vm-tweaks-3/mm/filemap.c
--- dontuse-4/mm/filemap.c	Sun Sep 30 18:42:31 2001
+++ vm-tweaks-3/mm/filemap.c	Sun Sep 30 19:07:42 2001
@@ -1636,7 +1636,7 @@
 	 * and possibly copy it over to another page..
 	 */
 	old_page = page;
-	mark_page_accessed(page);
+	activate_page(page);
 	if (no_share) {
 		struct page *new_page = alloc_page(GFP_HIGHUSER);
 
diff -urN dontuse-4/mm/swap.c vm-tweaks-3/mm/swap.c
--- dontuse-4/mm/swap.c	Sun Sep 23 21:11:43 2001
+++ vm-tweaks-3/mm/swap.c	Sun Sep 30 19:07:42 2001
@@ -54,6 +54,7 @@
 		del_page_from_active_list(page);
 		add_page_to_inactive_list(page);
 	}
+	SetPageReferenced(page);
 }	
 
 void deactivate_page(struct page * page)
@@ -72,6 +73,7 @@
 		del_page_from_inactive_list(page);
 		add_page_to_active_list(page);
 	}
+	ClearPageReferenced(page);
 }
 
 void activate_page(struct page * page)
diff -urN dontuse-4/mm/vmscan.c vm-tweaks-3/mm/vmscan.c
--- dontuse-4/mm/vmscan.c	Sun Sep 30 18:42:31 2001
+++ vm-tweaks-3/mm/vmscan.c	Sun Sep 30 19:09:33 2001
@@ -55,6 +55,9 @@
 		return 0;
 	}
 
+	if (PageActive(page) || (PageInactive(page) && PageReferenced(page)))
+		return 0;
+
 	if (TryLockPage(page))
 		return 0;
 
@@ -329,7 +332,6 @@
 {
 	struct list_head * entry;
 
-	spin_lock(&pagemap_lru_lock);
 	while (max_scan && (entry = inactive_list.prev) != &inactive_list) {
 		struct page * page;
 		swp_entry_t swap;
@@ -358,8 +360,10 @@
 			continue;
 
 		/* Racy check to avoid trylocking when not worthwhile */
-		if (!page->buffers && page_count(page) != 1)
+		if (!page->buffers && page_count(page) != 1) {
+			activate_page_nolock(page);
 			continue;
+		}
 
 		/*
 		 * The page is locked. IO in progress?
@@ -508,7 +512,6 @@
 {
 	struct list_head * entry;
 
-	spin_lock(&pagemap_lru_lock);
 	entry = active_list.prev;
 	while (nr_pages-- && entry != &active_list) {
 		struct page * page;
@@ -523,8 +526,8 @@
 
 		del_page_from_active_list(page);
 		add_page_to_inactive_list(page);
+		SetPageReferenced(page);
 	}
-	spin_unlock(&pagemap_lru_lock);
 }
 
 static int FASTCALL(shrink_caches(int priority, zone_t * classzone, unsigned int gfp_mask, int nr_pages));
@@ -538,11 +541,12 @@
 	if (nr_pages <= 0)
 		return 0;
 
+	spin_lock(&pagemap_lru_lock);
 	nr_pages = chunk_size;
 	/* try to keep the active list 2/3 of the size of the cache */
 	ratio = (unsigned long) nr_pages * nr_active_pages / ((nr_inactive_pages + 1) * 2);
 	refill_inactive(ratio);
-  
+
 	max_scan = nr_inactive_pages / priority;
 	nr_pages = shrink_cache(nr_pages, max_scan, classzone, gfp_mask);
 	if (nr_pages <= 0)