
From: "Andi Kleen" <ak@suse.de>

Cleanups preparing for memory hotplug

From: Matt Tolentino

Some cleanup work in early page table init preparing for memory hotplug.

Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 25-akpm/arch/x86_64/kernel/setup.c |    2 
 25-akpm/arch/x86_64/mm/init.c      |   81 +++++++++++++++++++++----------------
 25-akpm/include/asm-x86_64/proto.h |    2 
 3 files changed, 48 insertions(+), 37 deletions(-)

diff -puN arch/x86_64/kernel/setup.c~x86_64-cleanups-preparing-for-memory-hotplug arch/x86_64/kernel/setup.c
--- 25/arch/x86_64/kernel/setup.c~x86_64-cleanups-preparing-for-memory-hotplug	2004-11-28 01:54:36.116106624 -0800
+++ 25-akpm/arch/x86_64/kernel/setup.c	2004-11-28 01:54:36.124105408 -0800
@@ -486,7 +486,7 @@ void __init setup_arch(char **cmdline_p)
 
 	check_efer();
 
-	init_memory_mapping(); 
+	init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
 
 #ifdef CONFIG_ACPI_BOOT
 	/*
diff -puN arch/x86_64/mm/init.c~x86_64-cleanups-preparing-for-memory-hotplug arch/x86_64/mm/init.c
--- 25/arch/x86_64/mm/init.c~x86_64-cleanups-preparing-for-memory-hotplug	2004-11-28 01:54:36.117106472 -0800
+++ 25-akpm/arch/x86_64/mm/init.c	2004-11-28 01:54:36.125105256 -0800
@@ -210,11 +210,28 @@ static __init void unmap_low_page(int i)
 	ti->allocated = 0; 
 } 
 
+static void __init phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
+{
+	int i;
+
+	for (i = 0; i < PTRS_PER_PMD; pmd++, i++, address += PMD_SIZE) {
+		unsigned long entry;
+
+		if (address > end) {
+			for (; i < PTRS_PER_PMD; i++, pmd++)
+				set_pmd(pmd, __pmd(0));
+			break;
+		}
+		entry = _PAGE_NX | _PAGE_PSE | _KERNPG_TABLE | _PAGE_GLOBAL | address;
+		entry &= __supported_pte_mask;
+		set_pmd(pmd, __pmd(entry));
+	}
+}
+
 static void __init phys_pgd_init(pgd_t *pgd, unsigned long address, unsigned long end)
 { 
-	long i, j; 
+	long i = pgd_index(address);
 
-	i = pgd_index(address);
 	pgd = pgd + i;
 	for (; i < PTRS_PER_PGD; pgd++, i++) {
 		int map; 
@@ -235,66 +252,60 @@ static void __init phys_pgd_init(pgd_t *
 
 		pmd = alloc_low_page(&map, &pmd_phys);
 		set_pgd(pgd, __pgd(pmd_phys | _KERNPG_TABLE));
-		for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
-			unsigned long pe;
-
-			if (paddr >= end) { 
-				for (; j < PTRS_PER_PMD; j++, pmd++)
-					set_pmd(pmd,  __pmd(0)); 
-				break;
-		}
-			pe = _PAGE_NX|_PAGE_PSE | _KERNPG_TABLE | _PAGE_GLOBAL | paddr;
-			pe &= __supported_pte_mask;
-			set_pmd(pmd, __pmd(pe));
-		}
+		phys_pmd_init(pmd, paddr, end);
 		unmap_low_page(map);
 	}
 	__flush_tlb();
 } 
 
+static void __init find_early_table_space(unsigned long end)
+{
+	unsigned long pgds, pmds, tables;
+
+	pgds = (end + PGDIR_SIZE - 1) >> PGDIR_SHIFT;
+	pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
+	tables = round_up(pgds*8, PAGE_SIZE) + round_up(pmds * 8, PAGE_SIZE);
+
+	table_start = find_e820_area(0x8000, __pa_symbol(&_text), tables);
+	if (table_start == -1UL)
+		panic("Cannot find space for the kernel page tables");
+
+	table_start >>= PAGE_SHIFT;
+	table_end = table_start;
+}
+
+
 /* Setup the direct mapping of the physical memory at PAGE_OFFSET.
    This runs before bootmem is initialized and gets pages directly from the 
    physical memory. To access them they are temporarily mapped. */
-void __init init_memory_mapping(void) 
+void __init init_memory_mapping(unsigned long start, unsigned long end)
 { 
-	unsigned long adr;	       
-	unsigned long end;
 	unsigned long next; 
-	unsigned long pgds, pmds, tables; 
 
 	Dprintk("init_memory_mapping\n");
 
-	end = end_pfn_map << PAGE_SHIFT;
-
 	/* 
 	 * Find space for the kernel direct mapping tables.
 	 * Later we should allocate these tables in the local node of the memory
 	 * mapped.  Unfortunately this is done currently before the nodes are 
 	 * discovered.
 	 */
+	find_early_table_space(end);
 
-	pgds = (end + PGDIR_SIZE - 1) >> PGDIR_SHIFT;
-	pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; 
-	tables = round_up(pgds*8, PAGE_SIZE) + round_up(pmds * 8, PAGE_SIZE); 
-
-	table_start = find_e820_area(0x8000, __pa_symbol(&_text), tables); 
-	if (table_start == -1UL) 
-		panic("Cannot find space for the kernel page tables"); 
-
-	table_start >>= PAGE_SHIFT; 
-	table_end = table_start;
-       
+	start += __PAGE_OFFSET;
 	end += __PAGE_OFFSET; /* turn virtual */  	
 
-	for (adr = PAGE_OFFSET; adr < end; adr = next) { 
+	for (; start < end; start = next) {
 		int map;
 		unsigned long pgd_phys; 
 		pgd_t *pgd = alloc_low_page(&map, &pgd_phys);
-		next = adr + PML4_SIZE;
+		pml4_t *pml4 = pml4_offset_k(start);
+
+		next = start + PML4_SIZE;
 		if (next > end) 
 			next = end; 
-		phys_pgd_init(pgd, adr-PAGE_OFFSET, next-PAGE_OFFSET); 
-		set_pml4(init_level4_pgt + pml4_index(adr), mk_kernel_pml4(pgd_phys));
+		phys_pgd_init(pgd, start-PAGE_OFFSET, end-PAGE_OFFSET);
+		set_pml4(pml4, mk_kernel_pml4(pgd_phys));
 		unmap_low_page(map);   
 	} 
 	asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
diff -puN include/asm-x86_64/proto.h~x86_64-cleanups-preparing-for-memory-hotplug include/asm-x86_64/proto.h
--- 25/include/asm-x86_64/proto.h~x86_64-cleanups-preparing-for-memory-hotplug	2004-11-28 01:54:36.119106168 -0800
+++ 25-akpm/include/asm-x86_64/proto.h	2004-11-28 01:54:36.126105104 -0800
@@ -15,7 +15,7 @@ extern void pda_init(int); 
 extern void early_idt_handler(void);
 
 extern void mcheck_init(struct cpuinfo_x86 *c);
-extern void init_memory_mapping(void);
+extern void init_memory_mapping(unsigned long start, unsigned long end);
 
 extern void system_call(void); 
 extern int kernel_syscall(void);
_
