diff -ur /md0/kernels/2.4/v2.4.8-ac3/include/asm-i386/kmap_types.h vm-2.4.8-ac3/include/asm-i386/kmap_types.h
--- /md0/kernels/2.4/v2.4.8-ac3/include/asm-i386/kmap_types.h	Thu May  3 11:22:18 2001
+++ vm-2.4.8-ac3/include/asm-i386/kmap_types.h	Mon Aug 13 15:21:00 2001
@@ -6,6 +6,8 @@
 	KM_BOUNCE_WRITE,
 	KM_SKB_DATA,
 	KM_SKB_DATA_SOFTIRQ,
+	KM_USER0,
+	KM_USER1,
 	KM_TYPE_NR
 };
 
diff -ur /md0/kernels/2.4/v2.4.8-ac3/include/linux/highmem.h vm-2.4.8-ac3/include/linux/highmem.h
--- /md0/kernels/2.4/v2.4.8-ac3/include/linux/highmem.h	Wed Aug  8 20:31:43 2001
+++ vm-2.4.8-ac3/include/linux/highmem.h	Mon Aug 13 15:25:38 2001
@@ -45,8 +45,9 @@
 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
 {
-	clear_user_page(kmap(page), vaddr);
-	kunmap(page);
+	void *addr = kmap_atomic(page, KM_USER0);
+	clear_user_page(addr, vaddr);
+	kunmap_atomic(addr, KM_USER0);
 }
 
 static inline void clear_highpage(struct page *page)
@@ -85,11 +86,11 @@
 {
 	char *vfrom, *vto;
 
-	vfrom = kmap(from);
-	vto = kmap(to);
+	vfrom = kmap_atomic(from, KM_USER0);
+	vto = kmap_atomic(to, KM_USER1);
 	copy_user_page(vto, vfrom, vaddr);
-	kunmap(from);
-	kunmap(to);
+	kunmap_atomic(vfrom, KM_USER0);
+	kunmap_atomic(vto, KM_USER1);
 }
 
 static inline void copy_highpage(struct page *to, struct page *from)
diff -ur /md0/kernels/2.4/v2.4.8-ac3/mm/memory.c vm-2.4.8-ac3/mm/memory.c
--- /md0/kernels/2.4/v2.4.8-ac3/mm/memory.c	Mon Aug 13 15:12:54 2001
+++ vm-2.4.8-ac3/mm/memory.c	Mon Aug 13 15:21:00 2001
@@ -238,8 +238,10 @@
 
 cont_copy_pte_range:		set_pte(dst_pte, pte);
 cont_copy_pte_range_noset:	address += PAGE_SIZE;
-				if (address >= end)
-					goto out_unlock;
+				if (address >= end) {
+					spin_unlock(&src->page_table_lock);
+					goto out;
+				}
 				src_pte++;
 				dst_pte++;
 			} while ((unsigned long)src_pte & PTE_TABLE_MASK);
@@ -249,8 +251,6 @@
 			dst_pmd++;
 		} while ((unsigned long)src_pmd & PMD_TABLE_MASK);
 	}
-out_unlock:
-	spin_unlock(&src->page_table_lock);
 out:
 	spin_unlock(&dst->page_table_lock);
 	return 0;
@@ -452,7 +452,7 @@
 	if (err)
 		return err;
 
-	down_write(&mm->mmap_sem);
+	down_read(&mm->mmap_sem);
 
 	err = -EFAULT;
 	iobuf->locked = 0;
@@ -510,12 +510,12 @@
 		ptr += PAGE_SIZE;
 	}
 
-	up_write(&mm->mmap_sem);
+	up_read(&mm->mmap_sem);
 	dprintk ("map_user_kiobuf: end OK\n");
 	return 0;
 
  out_unlock:
-	up_write(&mm->mmap_sem);
+	up_read(&mm->mmap_sem);
 	unmap_kiobuf(iobuf);
 	dprintk ("map_user_kiobuf: end %d\n", err);
 	return err;
@@ -859,7 +859,6 @@
 static inline void break_cow(struct vm_area_struct * vma, struct page *	old_page, struct page * new_page, unsigned long address, 
 		pte_t *page_table)
 {
-	copy_cow_page(old_page,new_page,address);
 	flush_page_to_ram(new_page);
 	flush_cache_page(vma, address);
 	establish_pte(vma, address, page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))));
@@ -893,7 +892,7 @@
 	if (!VALID_PAGE(old_page))
 		goto bad_wp_page;
 
-	if (old_page == ZERO_PAGE(address))
+	if (old_page == ZERO_PAGE(address) || PageReserved(old_page))
 		goto copy;
 
 	/*
@@ -935,6 +934,13 @@
  	set_pte(page_table, pte);
 	spin_unlock(&mm->page_table_lock);
 	new_page = alloc_page(GFP_HIGHUSER);
+
+	/* Speculatively copy the page while not holding
+	 * the page table lock.  Make threads happy.  -ben
+	 */
+	if (new_page)
+		copy_cow_page(old_page, new_page, address);
+
 	spin_lock(&mm->page_table_lock);
 	if (!new_page)
 		return -1;
@@ -1190,6 +1196,8 @@
 		/* Allocate our own private page. */
 		spin_unlock(&mm->page_table_lock);
 		page = alloc_page(GFP_HIGHUSER);
+		if (page)
+			clear_user_highpage(page, addr);
 		spin_lock(&mm->page_table_lock);
 		if (!page)
 			return -1;
@@ -1198,7 +1206,6 @@
 			return 1;
 		}
 		mm->rss++;
-		clear_user_highpage(page, addr);
 		flush_page_to_ram(page);
 		entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
 	}
