diff -urNp --exclude CVS --exclude BitKeeper x-ref/fs/proc/base.c x/fs/proc/base.c
--- x-ref/fs/proc/base.c	2003-04-09 03:49:41.000000000 +0200
+++ x/fs/proc/base.c	2003-04-09 03:49:50.000000000 +0200
@@ -484,8 +484,25 @@ static ssize_t mapbase_write(struct file
 	
 	newbase = simple_strtoul(buffer, NULL, 0);
 
-	if (newbase > 0)
+	if (newbase > 0) {
+		struct mm_struct * mm;
+
 		task->map_base = newbase;
+
+		task_lock(task);
+		mm = task->mm;
+		if(mm)
+			atomic_inc(&mm->mm_users);
+		task_unlock(task);
+
+		if (mm) {
+			down_read(&mm->mmap_sem);
+			mm->free_area_cache = newbase;
+			up_read(&mm->mmap_sem);
+
+			mmput(mm);
+		}
+	}
 	
 	return len;
 }
diff -urNp --exclude CVS --exclude BitKeeper x-ref/include/linux/sched.h x/include/linux/sched.h
--- x-ref/include/linux/sched.h	2003-04-09 03:49:43.000000000 +0200
+++ x/include/linux/sched.h	2003-04-09 04:38:03.000000000 +0200
@@ -230,6 +230,7 @@ struct mm_struct {
 	struct vm_area_struct * mmap;		/* list of VMAs */
 	rb_root_t mm_rb;
 	struct vm_area_struct * mmap_cache;	/* last find_vma result */
+	unsigned long free_area_cache;		/* first hole */
 	pgd_t * pgd;
 	atomic_t mm_users;			/* How many users with user space? */
 	atomic_t mm_count;			/* How many references to "struct mm_struct" (users count as 1) */
diff -urNp --exclude CVS --exclude BitKeeper x-ref/kernel/fork.c x/kernel/fork.c
--- x-ref/kernel/fork.c	2003-04-09 03:49:43.000000000 +0200
+++ x/kernel/fork.c	2003-04-09 04:38:03.000000000 +0200
@@ -295,6 +295,7 @@ static struct mm_struct * mm_init(struct
 	atomic_set(&mm->mm_count, 1);
 	init_rwsem(&mm->mmap_sem);
 	mm->page_table_lock = SPIN_LOCK_UNLOCKED;
+	mm->free_area_cache = 0;
 	mm->pgd = pgd_alloc(mm);
 	mm->def_flags = 0;
 	if (mm->pgd)
diff -urNp --exclude CVS --exclude BitKeeper x-ref/mm/mmap.c x/mm/mmap.c
--- x-ref/mm/mmap.c	2003-04-09 03:49:43.000000000 +0200
+++ x/mm/mmap.c	2003-04-09 04:38:03.000000000 +0200
@@ -651,25 +651,33 @@ free_vma:
 #ifndef HAVE_ARCH_UNMAPPED_AREA
 static inline unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
 {
+	struct mm_struct *mm = current->mm;
 	struct vm_area_struct *vma;
+	int found_hole = 0;
 
 	if (len > TASK_SIZE)
 		return -ENOMEM;
 
 	if (addr) {
 		addr = PAGE_ALIGN(addr);
-		vma = find_vma(current->mm, addr);
+		vma = find_vma(mm, addr);
 		if (TASK_SIZE - len >= addr &&
 		    (!vma || addr + len <= vma->vm_start))
 			return addr;
 	}
-	addr = PAGE_ALIGN(TASK_UNMAPPED_BASE);
+	if (unlikely(!mm->free_area_cache))
+		mm->free_area_cache = PAGE_ALIGN(TASK_UNMAPPED_BASE);
+	addr = mm->free_area_cache;
 
-	for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
+	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
 		/* At this point:  (!vma || addr < vma->vm_end). */
 		unsigned long __heap_stack_gap;
 		if (TASK_SIZE - len < addr)
 			return -ENOMEM;
+		if (!found_hole && (!vma || addr < vma->vm_start)) {
+			mm->free_area_cache = addr;
+			found_hole = 1;
+		}
 		if (!vma)
 			return addr;
 		__heap_stack_gap = 0;
@@ -839,6 +847,12 @@ static struct vm_area_struct * unmap_fix
 	if (area->vm_flags & VM_LOCKED)
 		area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
 
+	/*
+	 * Is this a new hole at the lowest possible address?
+	 */
+	if (addr >= PAGE_ALIGN(TASK_UNMAPPED_BASE) && addr < mm->free_area_cache)
+		mm->free_area_cache = addr;
+
 	/* Unmapping the whole area. */
 	if (addr == area->vm_start && end == area->vm_end) {
 		if (area->vm_ops && area->vm_ops->close)