
From: Oleg Nesterov <oleg@tv-sign.ru>

This patch assumes that there is no valid usage of
vmalloced_or_vmaped_page->lru.

In such a case vm_struct->array could be eliminated.
It saves some memory and simplifies code a bit.

In vmap/vunmap case vm_struct->page_list is used only
in map_vm_area(), so it is ok to do:

	addr1 = vmap(pages, count);
	addr2 = vmap(pages, count);
	...
	vunmap(addr1);
	vunmap(addr2);

Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 25-akpm/include/linux/vmalloc.h |    8 +--
 25-akpm/mm/vmalloc.c            |   85 +++++++++++++++-------------------------
 2 files changed, 36 insertions(+), 57 deletions(-)

diff -puN include/linux/vmalloc.h~vmalloc-use-list-of-pages-instead-of-array-in-vm_struct include/linux/vmalloc.h
--- 25/include/linux/vmalloc.h~vmalloc-use-list-of-pages-instead-of-array-in-vm_struct	2005-03-10 05:03:50.000000000 -0800
+++ 25-akpm/include/linux/vmalloc.h	2005-03-10 05:03:50.000000000 -0800
@@ -2,6 +2,7 @@
 #define _LINUX_VMALLOC_H
 
 #include <linux/spinlock.h>
+#include <linux/list.h>
 #include <asm/page.h>		/* pgprot_t */
 
 /* bits in vm_struct->flags */
@@ -14,8 +15,7 @@ struct vm_struct {
 	void			*addr;
 	unsigned long		size;
 	unsigned long		flags;
-	struct page		**pages;
-	unsigned int		nr_pages;
+	struct list_head	page_list;
 	unsigned long		phys_addr;
 	struct vm_struct	*next;
 };
@@ -30,6 +30,7 @@ extern void *__vmalloc(unsigned long siz
 extern void *__vmalloc_area(struct vm_struct *area, int gfp_mask, pgprot_t prot);
 extern void vfree(void *addr);
 
+struct page;
 extern void *vmap(struct page **pages, unsigned int count,
 			unsigned long flags, pgprot_t prot);
 extern void vunmap(void *addr);
@@ -41,8 +42,7 @@ extern struct vm_struct *get_vm_area(uns
 extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
 					unsigned long start, unsigned long end);
 extern struct vm_struct *remove_vm_area(void *addr);
-extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
-			struct page ***pages);
+extern int map_vm_area(struct vm_struct *area, pgprot_t prot);
 extern void unmap_vm_area(struct vm_struct *area);
 
 /*
diff -puN mm/vmalloc.c~vmalloc-use-list-of-pages-instead-of-array-in-vm_struct mm/vmalloc.c
--- 25/mm/vmalloc.c~vmalloc-use-list-of-pages-instead-of-array-in-vm_struct	2005-03-10 05:03:50.000000000 -0800
+++ 25-akpm/mm/vmalloc.c	2005-03-10 05:03:50.000000000 -0800
@@ -84,7 +84,7 @@ void unmap_vm_area(struct vm_struct *are
 }
 
 static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
-			unsigned long end, pgprot_t prot, struct page ***pages)
+			unsigned long end, pgprot_t prot, struct list_head **pages)
 {
 	pte_t *pte;
 
@@ -92,18 +92,20 @@ static int vmap_pte_range(pmd_t *pmd, un
 	if (!pte)
 		return -ENOMEM;
 	do {
-		struct page *page = **pages;
+		struct page *page;
+
 		WARN_ON(!pte_none(*pte));
-		if (!page)
-			return -ENOMEM;
+
+		*pages = (*pages)->next;
+		page = list_entry(*pages, struct page, lru);
 		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
-		(*pages)++;
+
 	} while (pte++, addr += PAGE_SIZE, addr != end);
 	return 0;
 }
 
 static inline int vmap_pmd_range(pud_t *pud, unsigned long addr,
-			unsigned long end, pgprot_t prot, struct page ***pages)
+			unsigned long end, pgprot_t prot, struct list_head **pages)
 {
 	pmd_t *pmd;
 	unsigned long next;
@@ -120,7 +122,7 @@ static inline int vmap_pmd_range(pud_t *
 }
 
 static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr,
-			unsigned long end, pgprot_t prot, struct page ***pages)
+			unsigned long end, pgprot_t prot, struct list_head **pages)
 {
 	pud_t *pud;
 	unsigned long next;
@@ -136,12 +138,13 @@ static inline int vmap_pud_range(pgd_t *
 	return 0;
 }
 
-int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
+int map_vm_area(struct vm_struct *area, pgprot_t prot)
 {
 	pgd_t *pgd;
 	unsigned long next;
 	unsigned long addr = (unsigned long) area->addr;
 	unsigned long end = addr + area->size - PAGE_SIZE;
+	struct list_head *pages = &area->page_list;
 	int err;
 
 	BUG_ON(addr >= end);
@@ -149,7 +152,7 @@ int map_vm_area(struct vm_struct *area, 
 	spin_lock(&init_mm.page_table_lock);
 	do {
 		next = pgd_addr_end(addr, end);
-		err = vmap_pud_range(pgd, addr, next, prot, pages);
+		err = vmap_pud_range(pgd, addr, next, prot, &pages);
 		if (err)
 			break;
 	} while (pgd++, addr = next, addr != end);
@@ -218,8 +221,7 @@ found:
 	area->flags = flags;
 	area->addr = (void *)addr;
 	area->size = size;
-	area->pages = NULL;
-	area->nr_pages = 0;
+	INIT_LIST_HEAD(&area->page_list);
 	area->phys_addr = 0;
 	write_unlock(&vmlist_lock);
 
@@ -301,20 +303,11 @@ void __vunmap(void *addr, int deallocate
 		WARN_ON(1);
 		return;
 	}
-	
-	if (deallocate_pages) {
-		int i;
 
-		for (i = 0; i < area->nr_pages; i++) {
-			if (unlikely(!area->pages[i]))
-				BUG();
-			__free_page(area->pages[i]);
-		}
-
-		if (area->nr_pages > PAGE_SIZE/sizeof(struct page *))
-			vfree(area->pages);
-		else
-			kfree(area->pages);
+	if (deallocate_pages) {
+		struct page *page, *tmp;
+		list_for_each_entry_safe(page, tmp, &area->page_list, lru)
+			__free_page(page);
 	}
 
 	kfree(area);
@@ -373,13 +366,17 @@ void *vmap(struct page **pages, unsigned
 {
 	struct vm_struct *area;
 
-	if (count > num_physpages)
-		return NULL;
-
 	area = get_vm_area((count << PAGE_SHIFT), flags);
 	if (!area)
 		return NULL;
-	if (map_vm_area(area, prot, &pages)) {
+
+	while (count--) {
+		struct page *page = *pages++;
+		BUG_ON(!page);
+		list_add_tail(&page->lru, &area->page_list);
+	}
+
+	if (map_vm_area(area, prot)) {
 		vunmap(area->addr);
 		return NULL;
 	}
@@ -391,39 +388,21 @@ EXPORT_SYMBOL(vmap);
 
 void *__vmalloc_area(struct vm_struct *area, int gfp_mask, pgprot_t prot)
 {
-	struct page **pages;
-	unsigned int nr_pages, array_size, i;
+	unsigned int nr_pages;
 
 	nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
-	array_size = (nr_pages * sizeof(struct page *));
-
-	area->nr_pages = nr_pages;
-	/* Please note that the recursion is strictly bounded. */
-	if (array_size > PAGE_SIZE)
-		pages = __vmalloc(array_size, gfp_mask, PAGE_KERNEL);
-	else
-		pages = kmalloc(array_size, (gfp_mask & ~__GFP_HIGHMEM));
-	area->pages = pages;
-	if (!area->pages) {
-		remove_vm_area(area->addr);
-		kfree(area);
-		return NULL;
-	}
-	memset(area->pages, 0, array_size);
 
-	for (i = 0; i < area->nr_pages; i++) {
-		area->pages[i] = alloc_page(gfp_mask);
-		if (unlikely(!area->pages[i])) {
-			/* Successfully allocated i pages, free them in __vunmap() */
-			area->nr_pages = i;
+	while (nr_pages--) {
+		struct page *page = alloc_page(gfp_mask);
+		if (!page)
 			goto fail;
-		}
+		list_add_tail(&page->lru, &area->page_list);
 	}
 
-	if (map_vm_area(area, prot, &pages))
+	if (map_vm_area(area, prot))
 		goto fail;
-	return area->addr;
 
+	return area->addr;
 fail:
 	vfree(area->addr);
 	return NULL;
_
