diff -urpN -X /home/fletch/.diff.exclude 801-separate_pmd/arch/i386/Kconfig 802-banana_split/arch/i386/Kconfig
--- 801-separate_pmd/arch/i386/Kconfig	Wed Aug 13 20:48:59 2003
+++ 802-banana_split/arch/i386/Kconfig	Wed Aug 13 20:51:03 2003
@@ -706,7 +706,6 @@ choice
 	
 config	05GB
 	bool "3.5 GB"
-	depends on !HIGHMEM64G
 	
 config	1GB
 	bool "3 GB"
diff -urpN -X /home/fletch/.diff.exclude 801-separate_pmd/arch/i386/mm/init.c 802-banana_split/arch/i386/mm/init.c
--- 801-separate_pmd/arch/i386/mm/init.c	Wed Aug 13 20:50:53 2003
+++ 802-banana_split/arch/i386/mm/init.c	Wed Aug 13 20:51:03 2003
@@ -121,6 +121,24 @@ static void __init page_table_range_init
 	}
 }
 
+
+/*
+ * Abstract out using large pages when mapping KVA, or the SMP identity
+ * mapping
+ */
+void pmd_map_pfn_range(pmd_t* pmd_entry, unsigned long pfn, unsigned long max_pfn)
+{
+	int pte_ofs;
+	/* Map with big pages if possible, otherwise create normal page tables. */
+	if (cpu_has_pse) {
+		set_pmd(pmd_entry, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
+		pfn += PTRS_PER_PTE;
+	} else {
+		pte_t* pte = one_page_table_init(pmd_entry);
+		for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE && pfn < max_pfn; pte++, pfn++, pte_ofs++)
+			set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
+	}
+}
 /*
  * This maps the physical memory to kernel virtual address space, a total 
  * of max_low_pfn pages, by creating page tables starting from address 
@@ -131,8 +149,7 @@ static void __init kernel_physical_mappi
 	unsigned long pfn;
 	pgd_t *pgd;
 	pmd_t *pmd;
-	pte_t *pte;
-	int pgd_idx, pmd_idx, pte_ofs;
+	int pgd_idx, pmd_idx;
 
 	pgd_idx = pgd_index(PAGE_OFFSET);
 	pgd = pgd_base + pgd_idx;
@@ -142,21 +159,48 @@ static void __init kernel_physical_mappi
 		pmd = one_md_table_init(pgd);
 		if (pfn >= max_low_pfn)
 			continue;
-		for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
-			/* Map with big pages if possible, otherwise create normal page tables. */
-			if (cpu_has_pse) {
-				set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
-				pfn += PTRS_PER_PTE;
-			} else {
-				pte = one_page_table_init(pmd);
-
-				for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++)
-					set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
-			}
+	
+		/* beware of starting KVA in the middle of a pmd. */
+		if( pgd_idx == pgd_index(PAGE_OFFSET) ) {
+			pmd_idx = pmd_index(PAGE_OFFSET);
+			pmd = &pmd[pmd_idx];
+		} else
+			pmd_idx = 0;
+
+		for (; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
+			pmd_map_pfn_range(pmd, pfn, max_low_pfn);
+			pfn += PTRS_PER_PTE; 
 		}
 	}	
 }
 
+/*
+ * Add low memory identity-mappings - SMP needs it when
+ * starting up on an AP from real-mode. In the non-PAE
+ * case we already have these mappings through head.S.
+ * All user-space mappings are explicitly cleared after
+ * SMP startup in zap_low_mappings().
+ */
+static void __init low_physical_mapping_init(pgd_t *pgd_base)
+{
+#if CONFIG_X86_PAE
+	unsigned long pfn = 0;
+	int pmd_ofs = 0;
+	pmd_t *pmd = one_md_table_init(pgd_base);
+
+	if(!cpu_has_pse) {
+		printk("PAE enabled, but no support for PSE (large pages)!\n");
+		printk("this is likely to waste some RAM.");
+	}
+	
+	for (; pmd_ofs < PTRS_PER_PMD && pfn <= max_low_pfn; pmd++, pmd_ofs++) { 
+		pmd_map_pfn_range(pmd, pfn, max_low_pfn);
+		pfn += PTRS_PER_PTE;
+	}		
+#endif
+}
+
+
 static inline int page_kills_ppro(unsigned long pagenr)
 {
 	if (pagenr >= 0x70000 && pagenr <= 0x7003F)
@@ -217,7 +261,7 @@ void __init permanent_kmaps_init(pgd_t *
 	pgd = swapper_pg_dir + pgd_index(vaddr);
 	pmd = pmd_offset(pgd, vaddr);
 	pte = pte_offset_kernel(pmd, vaddr);
-	pkmap_page_table = pte;	
+	pkmap_page_table = pte;
 }
 
 void __init one_highpage_init(struct page *page, int pfn, int bad_ppro)
@@ -282,6 +326,7 @@ static void __init pagetable_init (void)
 	}
 
 	kernel_physical_mapping_init(pgd_base);
+	low_physical_mapping_init(pgd_base);
 	remap_numa_kva();
 
 	/*
@@ -290,19 +335,7 @@ static void __init pagetable_init (void)
 	 */
 	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
 	page_table_range_init(vaddr, 0, pgd_base);
-
 	permanent_kmaps_init(pgd_base);
-
-#ifdef CONFIG_X86_PAE
-	/*
-	 * Add low memory identity-mappings - SMP needs it when
-	 * starting up on an AP from real-mode. In the non-PAE
-	 * case we already have these mappings through head.S.
-	 * All user-space mappings are explicitly cleared after
-	 * SMP startup.
-	 */
-	pgd_base[0] = pgd_base[USER_PTRS_PER_PGD];
-#endif
 }
 
 void zap_low_mappings (void)
@@ -314,7 +347,7 @@ void zap_low_mappings (void)
 	 * Note that "pgd_clear()" doesn't do it for
 	 * us, because pgd_clear() is a no-op on i386.
 	 */
-	for (i = 0; i < USER_PTRS_PER_PGD; i++)
+	for (i = 0; i < FIRST_KERNEL_PGD_PTR; i++)
 #ifdef CONFIG_X86_PAE
 		set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
 #else
diff -urpN -X /home/fletch/.diff.exclude 801-separate_pmd/arch/i386/mm/pgtable.c 802-banana_split/arch/i386/mm/pgtable.c
--- 801-separate_pmd/arch/i386/mm/pgtable.c	Wed Aug 13 20:50:53 2003
+++ 802-banana_split/arch/i386/mm/pgtable.c	Wed Aug 13 20:51:03 2003
@@ -159,10 +159,23 @@ void pmd_ctor(void *pmd, kmem_cache_t *c
 
 void kernel_pmd_ctor(void *__pmd, kmem_cache_t *kernel_pmd_cache, unsigned long flags)
 {
+	pmd_t *pmd = __pmd;
 	int i;
-	for (i=USER_PGD_PTRS; i<PTRS_PER_PGD; i++) {
-		pmd_t *kern_pmd = (pmd_t *)pgd_page(swapper_pg_dir[i]); 
-		memcpy(__pmd+PAGE_SIZE*(i-USER_PGD_PTRS), kern_pmd, PAGE_SIZE);
+
+	/* 
+	 * you only need to memset the portion which isn't used by
+	 * the kernel
+	 */
+	clear_page(__pmd);
+
+	for (i=FIRST_KERNEL_PGD_PTR; i<PTRS_PER_PGD; i++, pmd+=PTRS_PER_PMD) {
+		pmd_t *kern_pmd = (pmd_t *)pgd_page(swapper_pg_dir[i]);
+		int start_index = USER_PTRS_PER_PMD(i);
+		pmd_t *dst_pmd = &pmd[start_index];
+		pmd_t *src_pmd = &kern_pmd[start_index];
+		int num_pmds = PTRS_PER_PMD-USER_PTRS_PER_PMD(i);
+		
+		memcpy(dst_pmd, src_pmd, num_pmds*sizeof(pmd_t));
 	}
 }
 
@@ -223,9 +236,9 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
 	for (i = 0; i < PTRS_PER_PGD; ++i) {
 		pmd_t *pmd = NULL;
 		
-		if (i == USER_PTRS_PER_PGD)
+		if (i == FIRST_KERNEL_PGD_PTR)
 			pmd = kmem_cache_alloc(kernel_pmd_cache, GFP_KERNEL);
-		else if (i < USER_PTRS_PER_PGD)
+		else if (i < FIRST_KERNEL_PGD_PTR)
 			pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
 		else
 			pmd += PTRS_PER_PMD;
@@ -257,11 +270,10 @@ void pgd_free(pgd_t *pgd)
 
 			set_pgd(&pgd[i], __pgd(0));
 			
-			if (i < USER_PGD_PTRS) {
+			if (i < FIRST_KERNEL_PGD_PTR)
 				kmem_cache_free(pmd_cache, pmd_to_free);
-			} else if (i == USER_PGD_PTRS) {
+			else if (i == FIRST_KERNEL_PGD_PTR)
 				kmem_cache_free(kernel_pmd_cache, pmd_to_free);
-			}
 		}
 	}
 	/* in the non-PAE case, clear_page_tables() clears user pgd entries */
diff -urpN -X /home/fletch/.diff.exclude 801-separate_pmd/include/asm-alpha/pgtable.h 802-banana_split/include/asm-alpha/pgtable.h
--- 801-separate_pmd/include/asm-alpha/pgtable.h	Tue Apr  8 14:38:20 2003
+++ 802-banana_split/include/asm-alpha/pgtable.h	Wed Aug 13 20:51:03 2003
@@ -39,6 +39,7 @@
 #define PTRS_PER_PMD	(1UL << (PAGE_SHIFT-3))
 #define PTRS_PER_PGD	(1UL << (PAGE_SHIFT-3))
 #define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
+#define USER_PTRS_PER_PMD(x)	(PTRS_PER_PMD)
 #define FIRST_USER_PGD_NR	0
 
 /* Number of pointers that fit on a page:  this will go away. */
diff -urpN -X /home/fletch/.diff.exclude 801-separate_pmd/include/asm-arm/pgtable.h 802-banana_split/include/asm-arm/pgtable.h
--- 801-separate_pmd/include/asm-arm/pgtable.h	Mon Mar 17 21:43:48 2003
+++ 802-banana_split/include/asm-arm/pgtable.h	Wed Aug 13 20:51:03 2003
@@ -45,6 +45,7 @@ extern void __pgd_error(const char *file
 
 #define FIRST_USER_PGD_NR	1
 #define USER_PTRS_PER_PGD	((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR)
+#define USER_PTRS_PER_PMD(x)	(PTRS_PER_PMD)
 
 /*
  * The table below defines the page protection levels that we insert into our
diff -urpN -X /home/fletch/.diff.exclude 801-separate_pmd/include/asm-cris/pgtable.h 802-banana_split/include/asm-cris/pgtable.h
--- 801-separate_pmd/include/asm-cris/pgtable.h	Tue Aug  5 19:59:16 2003
+++ 802-banana_split/include/asm-cris/pgtable.h	Wed Aug 13 20:51:03 2003
@@ -69,6 +69,7 @@ extern void paging_init(void);
  */
 
 #define USER_PTRS_PER_PGD       (TASK_SIZE/PGDIR_SIZE)
+#define USER_PTRS_PER_PMD(x)	(PTRS_PER_PMD)
 #define FIRST_USER_PGD_NR       0
 
 /* zero page used for uninitialized stuff */
diff -urpN -X /home/fletch/.diff.exclude 801-separate_pmd/include/asm-i386/pgtable.h 802-banana_split/include/asm-i386/pgtable.h
--- 801-separate_pmd/include/asm-i386/pgtable.h	Wed Aug 13 20:50:53 2003
+++ 802-banana_split/include/asm-i386/pgtable.h	Wed Aug 13 20:51:03 2003
@@ -65,7 +65,22 @@ void paging_init(void);
 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
 
-#define USER_PTRS_PER_PGD	(TASK_SIZE/PGDIR_SIZE)
+#define __USER_PTRS_PER_PGD	(TASK_SIZE/PGDIR_SIZE)
+#define FIRST_KERNEL_PGD_PTR	(__USER_PTRS_PER_PGD)
+#define PARTIAL_PGD	(TASK_SIZE > __USER_PTRS_PER_PGD*PGDIR_SIZE ? 1 : 0)
+#define PARTIAL_PMD	((TASK_SIZE % PGDIR_SIZE)/PMD_SIZE)
+#define USER_PTRS_PER_PGD	(PARTIAL_PGD + __USER_PTRS_PER_PGD)
+#ifndef __ASSEMBLY__
+static inline int USER_PTRS_PER_PMD(int pgd_index) {
+	if (pgd_index < __USER_PTRS_PER_PGD)
+		return PTRS_PER_PMD;
+	else if (PARTIAL_PMD && (pgd_index == __USER_PTRS_PER_PGD))
+		return (PTRS_PER_PMD-PARTIAL_PMD);
+	else
+		return 0;
+}
+#endif
+
 #define FIRST_USER_PGD_NR	0
 
 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
diff -urpN -X /home/fletch/.diff.exclude 801-separate_pmd/include/asm-ia64/pgtable.h 802-banana_split/include/asm-ia64/pgtable.h
--- 801-separate_pmd/include/asm-ia64/pgtable.h	Tue Jun 24 21:29:24 2003
+++ 802-banana_split/include/asm-ia64/pgtable.h	Wed Aug 13 20:51:03 2003
@@ -92,6 +92,7 @@
 #define PGDIR_MASK		(~(PGDIR_SIZE-1))
 #define PTRS_PER_PGD		(__IA64_UL(1) << (PAGE_SHIFT-3))
 #define USER_PTRS_PER_PGD	(5*PTRS_PER_PGD/8)	/* regions 0-4 are user regions */
+#define USER_PTRS_PER_PMD(x)	(PTRS_PER_PMD)
 #define FIRST_USER_PGD_NR	0
 
 /*
diff -urpN -X /home/fletch/.diff.exclude 801-separate_pmd/include/asm-m68k/pgtable.h 802-banana_split/include/asm-m68k/pgtable.h
--- 801-separate_pmd/include/asm-m68k/pgtable.h	Sat Jun 14 18:37:35 2003
+++ 802-banana_split/include/asm-m68k/pgtable.h	Wed Aug 13 20:51:03 2003
@@ -58,6 +58,7 @@
 #define PTRS_PER_PGD	128
 #endif
 #define USER_PTRS_PER_PGD	(TASK_SIZE/PGDIR_SIZE)
+#define USER_PTRS_PER_PMD(x)	(PTRS_PER_PMD)
 #define FIRST_USER_PGD_NR	0
 
 /* Virtual address region for use by kernel_map() */
diff -urpN -X /home/fletch/.diff.exclude 801-separate_pmd/include/asm-parisc/pgtable.h 802-banana_split/include/asm-parisc/pgtable.h
--- 801-separate_pmd/include/asm-parisc/pgtable.h	Tue Aug  5 20:01:43 2003
+++ 802-banana_split/include/asm-parisc/pgtable.h	Wed Aug 13 20:51:19 2003
@@ -81,6 +81,7 @@
 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
 #define PTRS_PER_PGD    (1UL << (PAGE_SHIFT - PT_NLEVELS))
 #define USER_PTRS_PER_PGD       PTRS_PER_PGD
+#define USER_PTRS_PER_PMD(x)	(PTRS_PER_PMD)
 
 /* Definitions for 2nd level */
 #define pgtable_cache_init()	do { } while (0)
diff -urpN -X /home/fletch/.diff.exclude 801-separate_pmd/include/asm-ppc/pgtable.h 802-banana_split/include/asm-ppc/pgtable.h
--- 801-separate_pmd/include/asm-ppc/pgtable.h	Sat Jun 14 18:37:36 2003
+++ 802-banana_split/include/asm-ppc/pgtable.h	Wed Aug 13 20:51:19 2003
@@ -83,6 +83,7 @@ extern unsigned long ioremap_bot, iorema
 #define PTRS_PER_PMD	1
 #define PTRS_PER_PGD	1024
 #define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
+#define USER_PTRS_PER_PMD(x)	(PTRS_PER_PMD)
 #define FIRST_USER_PGD_NR	0
 
 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
diff -urpN -X /home/fletch/.diff.exclude 801-separate_pmd/include/asm-ppc64/pgtable.h 802-banana_split/include/asm-ppc64/pgtable.h
--- 801-separate_pmd/include/asm-ppc64/pgtable.h	Sat Jun 14 18:37:36 2003
+++ 802-banana_split/include/asm-ppc64/pgtable.h	Wed Aug 13 20:51:19 2003
@@ -36,6 +36,7 @@
 #define PTRS_PER_PGD	(1 << PGD_INDEX_SIZE)
 
 #define USER_PTRS_PER_PGD	(1024)
+#define USER_PTRS_PER_PMD(x)	(PTRS_PER_PMD)
 #define FIRST_USER_PGD_NR	0
 
 #define EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
diff -urpN -X /home/fletch/.diff.exclude 801-separate_pmd/include/asm-sh/pgtable.h 802-banana_split/include/asm-sh/pgtable.h
--- 801-separate_pmd/include/asm-sh/pgtable.h	Wed Jul  2 21:59:15 2003
+++ 802-banana_split/include/asm-sh/pgtable.h	Wed Aug 13 20:51:19 2003
@@ -41,6 +41,7 @@ extern unsigned long empty_zero_page[102
 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
 
 #define USER_PTRS_PER_PGD	(TASK_SIZE/PGDIR_SIZE)
+#define USER_PTRS_PER_PMD(x)	(PTRS_PER_PMD)
 #define FIRST_USER_PGD_NR	0
 
 #define PTE_PHYS_MASK	0x1ffff000
diff -urpN -X /home/fletch/.diff.exclude 801-separate_pmd/include/asm-sparc/pgtable.h 802-banana_split/include/asm-sparc/pgtable.h
--- 801-separate_pmd/include/asm-sparc/pgtable.h	Sat May 10 18:35:03 2003
+++ 802-banana_split/include/asm-sparc/pgtable.h	Wed Aug 13 20:51:19 2003
@@ -125,6 +125,7 @@ BTFIXUPDEF_INT(page_kernel)
 #define PTRS_PER_PMD    	BTFIXUP_SIMM13(ptrs_per_pmd)
 #define PTRS_PER_PGD    	BTFIXUP_SIMM13(ptrs_per_pgd)
 #define USER_PTRS_PER_PGD	BTFIXUP_SIMM13(user_ptrs_per_pgd)
+#define USER_PTRS_PER_PMD(x)	(PTRS_PER_PMD)
 #define FIRST_USER_PGD_NR	0
 
 #define PAGE_NONE      __pgprot(BTFIXUP_INT(page_none))
diff -urpN -X /home/fletch/.diff.exclude 801-separate_pmd/include/asm-sparc64/pgtable.h 802-banana_split/include/asm-sparc64/pgtable.h
--- 801-separate_pmd/include/asm-sparc64/pgtable.h	Wed Mar 26 22:54:37 2003
+++ 802-banana_split/include/asm-sparc64/pgtable.h	Wed Aug 13 20:51:19 2003
@@ -93,6 +93,7 @@
 /* Kernel has a separate 44bit address space. */
 #define USER_PTRS_PER_PGD	((const int)(test_thread_flag(TIF_32BIT)) ? \
 				 (1) : (PTRS_PER_PGD))
+#define USER_PTRS_PER_PMD(x)	(PTRS_PER_PMD)
 #define FIRST_USER_PGD_NR	0
 
 #define pte_ERROR(e)	__builtin_trap()
diff -urpN -X /home/fletch/.diff.exclude 801-separate_pmd/include/asm-um/pgtable.h 802-banana_split/include/asm-um/pgtable.h
--- 801-separate_pmd/include/asm-um/pgtable.h	Fri May 30 19:02:21 2003
+++ 802-banana_split/include/asm-um/pgtable.h	Wed Aug 13 20:51:19 2003
@@ -40,6 +40,7 @@ extern unsigned long *empty_zero_page;
 #define PTRS_PER_PMD	1
 #define PTRS_PER_PGD	1024
 #define USER_PTRS_PER_PGD	(TASK_SIZE/PGDIR_SIZE)
+#define USER_PTRS_PER_PMD(x)	(PTRS_PER_PMD)
 #define FIRST_USER_PGD_NR       0
 
 #define pte_ERROR(e) \
diff -urpN -X /home/fletch/.diff.exclude 801-separate_pmd/include/asm-x86_64/pgtable.h 802-banana_split/include/asm-x86_64/pgtable.h
--- 801-separate_pmd/include/asm-x86_64/pgtable.h	Wed Jul  2 21:59:15 2003
+++ 802-banana_split/include/asm-x86_64/pgtable.h	Wed Aug 13 20:51:19 2003
@@ -112,6 +112,7 @@ static inline void set_pml4(pml4_t *dst,
 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
 
 #define USER_PTRS_PER_PGD	(TASK_SIZE/PGDIR_SIZE)
+#define USER_PTRS_PER_PMD(x)	(PTRS_PER_PMD)
 #define FIRST_USER_PGD_NR	0
 
 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
diff -urpN -X /home/fletch/.diff.exclude 801-separate_pmd/mm/memory.c 802-banana_split/mm/memory.c
--- 801-separate_pmd/mm/memory.c	Wed Aug 13 20:29:24 2003
+++ 802-banana_split/mm/memory.c	Wed Aug 13 20:51:19 2003
@@ -100,9 +100,10 @@ static inline void free_one_pmd(struct m
 	pte_free_tlb(tlb, page);
 }
 
-static inline void free_one_pgd(struct mmu_gather *tlb, pgd_t * dir)
+static inline void free_one_pgd(struct mmu_gather *tlb, pgd_t * pgd, unsigned long pgdi)
 {
 	pmd_t * pmd, * md, * emd;
+	pgd_t *dir = pgd + pgdi;
 
 	if (pgd_none(*dir))
 		return;
@@ -126,7 +127,7 @@ static inline void free_one_pgd(struct m
 	 * found at 0x80000000 onwards.  The loop below compiles instead
 	 * to be terminated by unsigned address comparison using "jb".
 	 */
-	for (md = pmd, emd = pmd + PTRS_PER_PMD; md < emd; md++)
+	for (md = pmd, emd = pmd + USER_PTRS_PER_PMD(pgdi); md < emd; md++)
 		free_one_pmd(tlb,md);
 	pmd_free_tlb(tlb, pmd);
 }
@@ -140,11 +141,11 @@ static inline void free_one_pgd(struct m
 void clear_page_tables(struct mmu_gather *tlb, unsigned long first, int nr)
 {
 	pgd_t * page_dir = tlb->mm->pgd;
-
-	page_dir += first;
+	int index = first;
+	
 	do {
-		free_one_pgd(tlb, page_dir);
-		page_dir++;
+		free_one_pgd(tlb, page_dir, index);
+		index++;
 	} while (--nr);
 }