
From: "Andi Kleen" <ak@suse.de>

change_page_attr logic fixes from Andrea

This avoids reference counting leaks and adds BUGs for more wrong cases.

Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 25-akpm/arch/i386/mm/pageattr.c   |   28 ++++++++++++++++++----------
 25-akpm/arch/x86_64/mm/pageattr.c |   26 +++++++++++++++++---------
 2 files changed, 35 insertions(+), 19 deletions(-)

diff -puN arch/i386/mm/pageattr.c~x86_64-change_page_attr-logic-fixes-from-andrea arch/i386/mm/pageattr.c
--- 25/arch/i386/mm/pageattr.c~x86_64-change_page_attr-logic-fixes-from-andrea	2005-01-09 23:01:30.226787936 -0800
+++ 25-akpm/arch/i386/mm/pageattr.c	2005-01-09 23:01:30.232787024 -0800
@@ -120,27 +120,35 @@ __change_page_attr(struct page *page, pg
 	kpte_page = virt_to_page(kpte);
 	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { 
 		if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
-			pte_t old = *kpte;
-			pte_t standard = mk_pte(page, PAGE_KERNEL); 
 			set_pte_atomic(kpte, mk_pte(page, prot)); 
-			if (pte_same(old,standard))
-				get_page(kpte_page);
 		} else {
 			struct page *split = split_large_page(address, prot); 
 			if (!split)
 				return -ENOMEM;
-			get_page(kpte_page);
 			set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL));
+			kpte_page = split;
 		}	
+		get_page(kpte_page);
 	} else if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
 		set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
 		__put_page(kpte_page);
-	}
+	} else
+		BUG();
 
-	if (cpu_has_pse && (page_count(kpte_page) == 1)) {
-		list_add(&kpte_page->lru, &df_list);
-		revert_page(kpte_page, address);
-	} 
+	/*
+	 * If the pte was reserved, it means it was created at boot
+	 * time (not via split_large_page) and in turn we must not
+	 * replace it with a largepage.
+	 */
+	if (!PageReserved(kpte_page)) {
+		/* memleak and potential failed 2M page regeneration */
+		BUG_ON(!page_count(kpte_page));
+
+		if (cpu_has_pse && (page_count(kpte_page) == 1)) {
+			list_add(&kpte_page->lru, &df_list);
+			revert_page(kpte_page, address);
+		}
+	}
 	return 0;
 } 
 
diff -puN arch/x86_64/mm/pageattr.c~x86_64-change_page_attr-logic-fixes-from-andrea arch/x86_64/mm/pageattr.c
--- 25/arch/x86_64/mm/pageattr.c~x86_64-change_page_attr-logic-fixes-from-andrea	2005-01-09 23:01:30.228787632 -0800
+++ 25-akpm/arch/x86_64/mm/pageattr.c	2005-01-09 23:01:30.233786872 -0800
@@ -131,28 +131,36 @@ __change_page_attr(unsigned long address
 	kpte_flags = pte_val(*kpte); 
 	if (pgprot_val(prot) != pgprot_val(ref_prot)) { 
 		if ((kpte_flags & _PAGE_PSE) == 0) { 
-			pte_t old = *kpte;
-			pte_t standard = pfn_pte(pfn, ref_prot);
-
 			set_pte(kpte, pfn_pte(pfn, prot));
-			if (pte_same(old,standard))
-				get_page(kpte_page);
 		} else {
+ 			/*
+ 			 * split_large_page will take the reference for this change_page_attr
+ 			 * on the split page.
+ 			 */
 			struct page *split = split_large_page(address, prot, ref_prot); 
 			if (!split)
 				return -ENOMEM;
-			get_page(split);
 			set_pte(kpte,mk_pte(split, ref_prot));
+			kpte_page = split;
 		}	
+		get_page(kpte_page);
 	} else if ((kpte_flags & _PAGE_PSE) == 0) { 
 		set_pte(kpte, pfn_pte(pfn, ref_prot));
 		__put_page(kpte_page);
-	}
+	} else
+		BUG();
+
+	/* on x86-64 the direct mapping set at boot is not using 4k pages */
+ 	BUG_ON(PageReserved(kpte_page));
 
-	if (page_count(kpte_page) == 1) {
+	switch (page_count(kpte_page)) {
+ 	case 1:
 		save_page(address, kpte_page); 		     
 		revert_page(address, ref_prot);
-	} 
+		break;
+ 	case 0:
+ 		BUG(); /* memleak and failed 2M page regeneration */
+ 	}
 	return 0;
 } 
 
_
