diff -urNp --exclude CVS --exclude BitKeeper x-ref/include/linux/mm.h x/include/linux/mm.h
--- x-ref/include/linux/mm.h	2003-05-11 05:52:51.000000000 +0200
+++ x/include/linux/mm.h	2003-05-11 05:54:20.000000000 +0200
@@ -572,11 +572,28 @@ static inline void __vma_unlink(struct m
 		mm->mmap_cache = prev;
 }
 
-static inline int can_vma_merge(struct vm_area_struct * vma, unsigned long vm_flags)
+#define VM_SPECIAL (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED)
+
+#define can_vma_merge(vma, vm_flags) __can_vma_merge(vma, vm_flags, NULL, 0, 0)
+/*
+ * We don't check here for the merged mmap wrapping around the end of pagecache
+ * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which
+ * wrap, nor mmaps which cover the final page at index 0xffffffff.
+ */
+static inline int __can_vma_merge(struct vm_area_struct * vma, unsigned long vm_flags,
+				  struct file * file, unsigned long vm_pgoff, unsigned long offset)
 {
-	if (!vma->vm_file && vma->vm_flags == vm_flags)
+	if (vma->vm_file == file && vma->vm_flags == vm_flags &&
+	    likely((!vma->vm_ops || !vma->vm_ops->close) && !vma->vm_private_data &&
+		   !(vm_flags & VM_SPECIAL))) {
+		if (file) {
+			if (vma->vm_pgoff == vm_pgoff + offset)
+				return 1;
+			else
+				return 0;
+		}
 		return 1;
-	else
+	} else
 		return 0;
 }
 
diff -urNp --exclude CVS --exclude BitKeeper x-ref/mm/mmap.c x/mm/mmap.c
--- x-ref/mm/mmap.c	2003-05-11 05:52:51.000000000 +0200
+++ x/mm/mmap.c	2003-05-11 05:53:12.000000000 +0200
@@ -349,40 +349,56 @@ static inline void vma_link(struct mm_st
 }
 
 static int vma_merge(struct mm_struct * mm, struct vm_area_struct * prev,
-		     rb_node_t * rb_parent, unsigned long addr, unsigned long end, unsigned long vm_flags)
+		     rb_node_t * rb_parent, unsigned long addr, unsigned long end,
+		     unsigned long vm_flags, struct file * file, unsigned long pgoff)
 {
 	spinlock_t * lock = &mm->page_table_lock;
 	if (!prev) {
 		prev = rb_entry(rb_parent, struct vm_area_struct, vm_rb);
 		goto merge_next;
 	}
-	if (prev->vm_end == addr && can_vma_merge(prev, vm_flags)) {
+	if (prev->vm_end == addr && __can_vma_merge(prev, vm_flags, file,
+						    pgoff, -((prev->vm_end - prev->vm_start) >> PAGE_SHIFT))) {
 		struct vm_area_struct * next;
+		int need_unlock = 0;
 
+		if (unlikely(file && prev->vm_next && prev->vm_next->vm_file == file)) {
+			lock_vma_mappings(prev->vm_next);
+			need_unlock = 1;
+		}
 		spin_lock(lock);
 		prev->vm_end = end;
 		next = prev->vm_next;
-		if (next && prev->vm_end == next->vm_start && can_vma_merge(next, vm_flags)) {
+		if (next && prev->vm_end == next->vm_start && __can_vma_merge(next, vm_flags, file,
+									      pgoff, (end - addr) >> PAGE_SHIFT)) {
 			prev->vm_end = next->vm_end;
 			__vma_unlink(mm, next, prev);
+			__remove_shared_vm_struct(next);
 			spin_unlock(lock);
+			if (need_unlock)
+				unlock_vma_mappings(next);
+			if (file)
+				fput(file);
 
 			mm->map_count--;
 			kmem_cache_free(vm_area_cachep, next);
 			return 1;
 		}
 		spin_unlock(lock);
+		if (need_unlock)
+			unlock_vma_mappings(next);
 		return 1;
 	}
 
 	prev = prev->vm_next;
 	if (prev) {
  merge_next:
-		if (!can_vma_merge(prev, vm_flags))
+		if (!__can_vma_merge(prev, vm_flags, file, pgoff, (end - addr) >> PAGE_SHIFT))
 			return 0;
 		if (end == prev->vm_start) {
 			spin_lock(lock);
 			prev->vm_start = addr;
+			prev->vm_pgoff = pgoff;
 			spin_unlock(lock);
 			return 1;
 		}
@@ -503,7 +519,7 @@ munmap_back:
 
 	/* Can we just expand an old anonymous mapping? */
 	if (!file && !(vm_flags & VM_SHARED) && rb_parent)
-		if (vma_merge(mm, prev, rb_parent, addr, addr + len, vm_flags))
+		if (vma_merge(mm, prev, rb_parent, addr, addr + len, vm_flags, NULL, 0))
 			goto out;
 
 	/* Determine the object being mapped and call the appropriate
@@ -581,10 +597,18 @@ munmap_back:
 		}
 	}
 
-	vma_link(mm, vma, prev, rb_link, rb_parent);
-	if (correct_wcount)
-		atomic_inc(&file->f_dentry->d_inode->i_writecount);
-
+	if (!file || !rb_parent || !vma_merge(mm, prev, rb_parent, addr, addr + len, vma->vm_flags, file, pgoff)) {
+		vma_link(mm, vma, prev, rb_link, rb_parent);
+		if (correct_wcount)
+			atomic_inc(&file->f_dentry->d_inode->i_writecount);
+	} else {
+		if (file) {
+			if (correct_wcount)
+				atomic_inc(&file->f_dentry->d_inode->i_writecount);
+			fput(file);
+		}
+		kmem_cache_free(vm_area_cachep, vma);
+	}
 out:	
 	mm->total_vm += len >> PAGE_SHIFT;
 	if (vm_flags & VM_LOCKED) {
@@ -1087,7 +1111,7 @@ unsigned long do_brk(unsigned long addr,
 	flags = VM_DATA_DEFAULT_FLAGS | mm->def_flags;
 
 	/* Can we just expand an old anonymous mapping? */
-	if (rb_parent && vma_merge(mm, prev, rb_parent, addr, addr + len, flags))
+	if (rb_parent && vma_merge(mm, prev, rb_parent, addr, addr + len, flags, NULL, 0))
 		goto out;
 
 	/*