diff -upN reference/Documentation/vm/locking current/Documentation/vm/locking
--- reference/Documentation/vm/locking	2003-10-14 15:50:09.000000000 -0700
+++ current/Documentation/vm/locking	2004-04-29 10:39:31.000000000 -0700
@@ -66,7 +66,7 @@ in some cases it is not really needed. E
 expand_stack(), it is hard to come up with a destructive scenario without 
 having the vmlist protection in this case.
 
-The page_table_lock nests with the inode i_shared_sem and the kmem cache
+The page_table_lock nests with the inode i_shared_lock and the kmem cache
 c_spinlock spinlocks.  This is okay, since the kmem code asks for pages after
 dropping c_spinlock.  The page_table_lock also nests with pagecache_lock and
 pagemap_lru_lock spinlocks, and no code asks for memory with these locks
diff -upN reference/fs/hugetlbfs/inode.c current/fs/hugetlbfs/inode.c
--- reference/fs/hugetlbfs/inode.c	2004-04-29 10:39:30.000000000 -0700
+++ current/fs/hugetlbfs/inode.c	2004-04-29 10:39:31.000000000 -0700
@@ -325,14 +325,14 @@ static int hugetlb_vmtruncate(struct ino
 	pgoff = offset >> HPAGE_SHIFT;
 
 	inode->i_size = offset;
-	down(&mapping->i_shared_sem);
+	spin_lock(&mapping->i_shared_lock);
 	/* Protect against page fault */
 	atomic_inc(&mapping->truncate_count);
 	if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
 		hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff);
 	if (unlikely(!prio_tree_empty(&mapping->i_mmap_shared)))
 		hugetlb_vmtruncate_list(&mapping->i_mmap_shared, pgoff);
-	up(&mapping->i_shared_sem);
+	spin_unlock(&mapping->i_shared_lock);
 	truncate_hugepages(mapping, offset);
 	return 0;
 }
diff -upN reference/fs/inode.c current/fs/inode.c
--- reference/fs/inode.c	2004-04-29 10:39:30.000000000 -0700
+++ current/fs/inode.c	2004-04-29 10:39:31.000000000 -0700
@@ -185,7 +185,7 @@ void inode_init_once(struct inode *inode
 	sema_init(&inode->i_sem, 1);
 	INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
 	spin_lock_init(&inode->i_data.page_lock);
-	init_MUTEX(&inode->i_data.i_shared_sem);
+	spin_lock_init(&inode->i_data.i_shared_lock);
 	atomic_set(&inode->i_data.truncate_count, 0);
 	INIT_LIST_HEAD(&inode->i_data.private_list);
 	spin_lock_init(&inode->i_data.private_lock);
diff -upN reference/include/linux/fs.h current/include/linux/fs.h
--- reference/include/linux/fs.h	2004-04-29 10:39:30.000000000 -0700
+++ current/include/linux/fs.h	2004-04-29 10:39:31.000000000 -0700
@@ -333,7 +333,7 @@ struct address_space {
 	struct prio_tree_root	i_mmap;		/* tree of private mappings */
 	struct prio_tree_root	i_mmap_shared;	/* tree of shared mappings */
 	struct list_head	i_mmap_nonlinear;/*list of nonlinear mappings */
-	struct semaphore	i_shared_sem;	/* protect both above lists */
+	spinlock_t		i_shared_lock;	/* spinlock protecting both */
 	atomic_t		truncate_count;	/* Cover race condition with truncate */
 	unsigned long		flags;		/* error bits/gfp mask */
 	struct backing_dev_info *backing_dev_info; /* device readahead, etc */
diff -upN reference/include/linux/mm.h current/include/linux/mm.h
--- reference/include/linux/mm.h	2004-04-29 10:39:30.000000000 -0700
+++ current/include/linux/mm.h	2004-04-29 10:39:31.000000000 -0700
@@ -232,7 +232,7 @@ static inline void __vma_prio_tree_add(s
  * We cannot modify vm_start, vm_end, vm_pgoff fields of a vma that has been
  * already present in an i_mmap{_shared} tree without modifying the tree. The
  * following helper function should be used when such modifications are
- * necessary. We should hold the mapping's i_shared_sem.
+ * necessary. We should hold the mapping's i_shared_lock.
  *
  * This function can be (micro)optimized for some special cases (maybe later).
  */
diff -upN reference/kernel/fork.c current/kernel/fork.c
--- reference/kernel/fork.c	2004-04-29 10:39:30.000000000 -0700
+++ current/kernel/fork.c	2004-04-29 10:39:31.000000000 -0700
@@ -332,9 +332,9 @@ static inline int dup_mmap(struct mm_str
 				atomic_dec(&inode->i_writecount);
       
 			/* insert tmp into the share list, just after mpnt */
-			down(&file->f_mapping->i_shared_sem);
+			spin_lock(&file->f_mapping->i_shared_lock);
 			__vma_prio_tree_add(tmp, mpnt);
-			up(&file->f_mapping->i_shared_sem);
+			spin_unlock(&file->f_mapping->i_shared_lock);
 		}
 
 		/*
diff -upN reference/mm/filemap.c current/mm/filemap.c
--- reference/mm/filemap.c	2004-04-29 10:39:30.000000000 -0700
+++ current/mm/filemap.c	2004-04-29 10:39:31.000000000 -0700
@@ -55,17 +55,17 @@
 /*
  * Lock ordering:
  *
- *  ->i_shared_sem		(vmtruncate)
+ *  ->i_shared_lock		(vmtruncate)
  *    ->private_lock		(__free_pte->__set_page_dirty_buffers)
  *      ->swap_list_lock
  *        ->swap_device_lock	(exclusive_swap_page, others)
  *          ->mapping->page_lock
  *
  *  ->i_sem
- *    ->i_shared_sem		(truncate->invalidate_mmap_range)
+ *    ->i_shared_lock		(truncate->invalidate_mmap_range)
  *
  *  ->mmap_sem
- *    ->i_shared_sem		(various places)
+ *    ->i_shared_lock		(various places)
  *
  *  ->mmap_sem
  *    ->lock_page		(access_process_vm)
diff -upN reference/mm/fremap.c current/mm/fremap.c
--- reference/mm/fremap.c	2004-04-29 10:39:30.000000000 -0700
+++ current/mm/fremap.c	2004-04-29 10:39:31.000000000 -0700
@@ -203,13 +203,13 @@ asmlinkage long sys_remap_file_pages(uns
 		linear_pgoff += ((start - vma->vm_start) >> PAGE_SHIFT);
 		if (pgoff != linear_pgoff && !(vma->vm_flags & VM_NONLINEAR)) {
 			mapping = vma->vm_file->f_mapping;
-			down(&mapping->i_shared_sem);
+			spin_lock(&mapping->i_shared_lock);
 			vma->vm_flags |= VM_NONLINEAR;
 			__vma_prio_tree_remove(&mapping->i_mmap_shared, vma);
 			INIT_VMA_SHARED_LIST(vma);
 			list_add_tail(&vma->shared.vm_set.list,
 					&mapping->i_mmap_nonlinear);
-			up(&mapping->i_shared_sem);
+			spin_unlock(&mapping->i_shared_lock);
 		}
 
 		/* ->populate can take a long time, so downgrade the lock. */
diff -upN reference/mm/memory.c current/mm/memory.c
--- reference/mm/memory.c	2004-04-29 10:39:30.000000000 -0700
+++ current/mm/memory.c	2004-04-29 10:39:31.000000000 -0700
@@ -1133,14 +1133,14 @@ void invalidate_mmap_range(struct addres
 		if (holeend & ~(long long)ULONG_MAX)
 			hlen = ULONG_MAX - hba + 1;
 	}
-	down(&mapping->i_shared_sem);
+	spin_lock(&mapping->i_shared_lock);
 	/* Protect against page fault */
 	atomic_inc(&mapping->truncate_count);
 	if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
 		invalidate_mmap_range_list(&mapping->i_mmap, hba, hlen);
 	if (unlikely(!prio_tree_empty(&mapping->i_mmap_shared)))
 		invalidate_mmap_range_list(&mapping->i_mmap_shared, hba, hlen);
-	up(&mapping->i_shared_sem);
+	spin_unlock(&mapping->i_shared_lock);
 }
 EXPORT_SYMBOL_GPL(invalidate_mmap_range);
 
diff -upN reference/mm/mmap.c current/mm/mmap.c
--- reference/mm/mmap.c	2004-04-29 10:39:30.000000000 -0700
+++ current/mm/mmap.c	2004-04-29 10:39:31.000000000 -0700
@@ -67,7 +67,7 @@ int mmap_use_hugepages = 0;
 int mmap_hugepages_map_sz = 256;
 
 /*
- * Requires inode->i_mapping->i_shared_sem
+ * Requires inode->i_mapping->i_shared_lock
  */
 static inline void
 __remove_shared_vm_struct(struct vm_area_struct *vma, struct inode *inode,
@@ -96,10 +96,10 @@ static void remove_shared_vm_struct(stru
 
 	if (file) {
 		struct address_space *mapping = file->f_mapping;
-		down(&mapping->i_shared_sem);
+		spin_lock(&mapping->i_shared_lock);
 		__remove_shared_vm_struct(vma, file->f_dentry->d_inode,
 				mapping);
-		up(&mapping->i_shared_sem);
+		spin_unlock(&mapping->i_shared_lock);
 	}
 }
 
@@ -304,12 +304,12 @@ static void vma_link(struct mm_struct *m
 		mapping = vma->vm_file->f_mapping;
 
 	if (mapping)
-		down(&mapping->i_shared_sem);
+		spin_lock(&mapping->i_shared_lock);
 	spin_lock(&mm->page_table_lock);
 	__vma_link(mm, vma, prev, rb_link, rb_parent);
 	spin_unlock(&mm->page_table_lock);
 	if (mapping)
-		up(&mapping->i_shared_sem);
+		spin_unlock(&mapping->i_shared_lock);
 
 	mark_mm_hugetlb(mm, vma);
 	mm->map_count++;
@@ -319,7 +319,7 @@ static void vma_link(struct mm_struct *m
 /*
  * Insert vm structure into process list sorted by address and into the inode's
  * i_mmap ring. The caller should hold mm->page_table_lock and
- * ->f_mappping->i_shared_sem if vm_file is non-NULL.
+ * ->f_mappping->i_shared_lock if vm_file is non-NULL.
  */
 static void
 __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
@@ -410,7 +410,7 @@ static struct vm_area_struct *vma_merge(
 	spinlock_t *lock = &mm->page_table_lock;
 	struct inode *inode = file ? file->f_dentry->d_inode : NULL;
 	struct address_space *mapping = file ? file->f_mapping : NULL;
-	struct semaphore *i_shared_sem;
+	spinlock_t *i_shared_lock;
 	struct prio_tree_root *root = NULL;
 
 	/*
@@ -420,7 +420,7 @@ static struct vm_area_struct *vma_merge(
 	if (vm_flags & VM_SPECIAL)
 		return NULL;
 
-	i_shared_sem = file ? &file->f_mapping->i_shared_sem : NULL;
+	i_shared_lock = file ? &file->f_mapping->i_shared_lock : NULL;
 
 	if (mapping) {
 		if (vm_flags & VM_SHARED) {
@@ -446,7 +446,7 @@ static struct vm_area_struct *vma_merge(
 
 		if (unlikely(file && prev->vm_next &&
 				prev->vm_next->vm_file == file)) {
-			down(i_shared_sem);
+			spin_lock(i_shared_lock);
 			need_up = 1;
 		}
 		spin_lock(lock);
@@ -464,7 +464,7 @@ static struct vm_area_struct *vma_merge(
 			__remove_shared_vm_struct(next, inode, mapping);
 			spin_unlock(lock);
 			if (need_up)
-				up(i_shared_sem);
+				spin_unlock(i_shared_lock);
 			if (file)
 				fput(file);
 
@@ -476,7 +476,7 @@ static struct vm_area_struct *vma_merge(
 		__vma_modify(root, prev, prev->vm_start, end, prev->vm_pgoff);
 		spin_unlock(lock);
 		if (need_up)
-			up(i_shared_sem);
+			spin_unlock(i_shared_lock);
 		return prev;
 	}
 
@@ -491,13 +491,13 @@ static struct vm_area_struct *vma_merge(
 			return NULL;
 		if (end == prev->vm_start) {
 			if (file)
-				down(i_shared_sem);
+				spin_lock(i_shared_lock);
 			spin_lock(lock);
 			__vma_modify(root, prev, addr, prev->vm_end,
 				prev->vm_pgoff - ((end - addr) >> PAGE_SHIFT));
 			spin_unlock(lock);
 			if (file)
-				up(i_shared_sem);
+				spin_unlock(i_shared_lock);
 			return prev;
 		}
 	}
@@ -1362,7 +1362,7 @@ int split_vma(struct mm_struct * mm, str
 	}
 
 	if (mapping)
-		down(&mapping->i_shared_sem);
+		spin_lock(&mapping->i_shared_lock);
 	spin_lock(&mm->page_table_lock);
 
 	if (new_below)
@@ -1375,7 +1375,7 @@ int split_vma(struct mm_struct * mm, str
 
 	spin_unlock(&mm->page_table_lock);
 	if (mapping)
-		up(&mapping->i_shared_sem);
+		spin_unlock(&mapping->i_shared_lock);
 
 	return 0;
 }
@@ -1609,7 +1609,7 @@ void exit_mmap(struct mm_struct *mm)
 
 /* Insert vm structure into process list sorted by address
  * and into the inode's i_mmap ring.  If vm_file is non-NULL
- * then i_shared_sem is taken here.
+ * then i_shared_lock is taken here.
  */
 void insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
 {
diff -upN reference/mm/mremap.c current/mm/mremap.c
--- reference/mm/mremap.c	2004-04-29 10:39:30.000000000 -0700
+++ current/mm/mremap.c	2004-04-29 10:39:31.000000000 -0700
@@ -295,7 +295,7 @@ static unsigned long move_vma(struct vm_
 		 * and we propagate stale pages into the dst afterward.
 		 */
 		mapping = vma->vm_file->f_mapping;
-		down(&mapping->i_shared_sem);
+		spin_lock(&mapping->i_shared_lock);
 	}
 	moved_len = move_page_tables(vma, new_addr, old_addr, old_len);
 	if (moved_len < old_len) {
@@ -311,7 +311,7 @@ static unsigned long move_vma(struct vm_
 		new_addr = -ENOMEM;
 	}
 	if (mapping)
-		up(&mapping->i_shared_sem);
+		spin_unlock(&mapping->i_shared_lock);
 
 	/* Conceal VM_ACCOUNT so old reservation is not undone */
 	if (vm_flags & VM_ACCOUNT) {
@@ -476,7 +476,7 @@ unsigned long do_mremap(unsigned long ad
 				}
 				else
 					root = &mapping->i_mmap;
-				down(&mapping->i_shared_sem);
+				spin_lock(&mapping->i_shared_lock);
 			}
 
 			spin_lock(&vma->vm_mm->page_table_lock);
@@ -485,7 +485,7 @@ unsigned long do_mremap(unsigned long ad
 			spin_unlock(&vma->vm_mm->page_table_lock);
 
 			if(mapping)
-				up(&mapping->i_shared_sem);
+				spin_unlock(&mapping->i_shared_lock);
 
 			current->mm->total_vm += pages;
 			if (vma->vm_flags & VM_LOCKED) {
diff -upN reference/mm/rmap.c current/mm/rmap.c
--- reference/mm/rmap.c	2004-04-29 10:39:30.000000000 -0700
+++ current/mm/rmap.c	2004-04-29 10:39:31.000000000 -0700
@@ -267,7 +267,7 @@ out:
  *
  * This function is only called from page_referenced for object-based pages.
  *
- * The semaphore address_space->i_shared_sem is tried.  If it can't be gotten,
+ * The spinlock address_space->i_shared_lock is tried.  If it can't be gotten,
  * assume a reference count of 0, so try_to_unmap will then have a go.
  */
 static inline int page_referenced_obj(struct page *page, int *mapcount)
@@ -279,7 +279,7 @@ static inline int page_referenced_obj(st
 	unsigned long address;
 	int referenced = 0;
 
-	if (down_trylock(&mapping->i_shared_sem))
+	if (!spin_trylock(&mapping->i_shared_lock))
 		return 0;
 
 	vma = __vma_prio_tree_first(&mapping->i_mmap,
@@ -322,7 +322,7 @@ static inline int page_referenced_obj(st
 	if (list_empty(&mapping->i_mmap_nonlinear))
 		WARN_ON(*mapcount > 0);
 out:
-	up(&mapping->i_shared_sem);
+	spin_unlock(&mapping->i_shared_lock);
 	return referenced;
 }
 
@@ -696,7 +696,7 @@ out:
  *
  * This function is only called from try_to_unmap for object-based pages.
  *
- * The semaphore address_space->i_shared_sem is tried.  If it can't be gotten,
+ * The spinlock address_space->i_shared_lock is tried.  If it can't be gotten,
  * return a temporary error.
  */
 static inline int try_to_unmap_obj(struct page *page, int *mapcount)
@@ -711,7 +711,7 @@ static inline int try_to_unmap_obj(struc
 	unsigned long max_nl_cursor = 0;
 	unsigned long max_nl_size = 0;
 
-	if (down_trylock(&mapping->i_shared_sem))
+	if (!spin_trylock(&mapping->i_shared_lock))
 		return ret;
 
 	vma = __vma_prio_tree_first(&mapping->i_mmap,
@@ -813,7 +813,7 @@ static inline int try_to_unmap_obj(struc
 relock:
 	rmap_lock(page);
 out:
-	up(&mapping->i_shared_sem);
+	spin_unlock(&mapping->i_shared_lock);
 	return ret;
 }