
From: Christoph Lameter <clameter@engr.sgi.com>



Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 mm/slab.c |   60 +++++++++++++++++++++++++-----------------------------------
 1 files changed, 25 insertions(+), 35 deletions(-)

diff -puN mm/slab.c~numa-aware-slab-allocator-v3-cleanup mm/slab.c
--- 25/mm/slab.c~numa-aware-slab-allocator-v3-cleanup	2005-05-15 20:41:11.000000000 -0700
+++ 25-akpm/mm/slab.c	2005-05-15 20:41:11.000000000 -0700
@@ -391,13 +391,6 @@ static inline int index_of(const size_t 
 #define list3_data_ptr(cachep, ptr) \
 		list3_data(cachep)
 
-#ifdef CONFIG_NUMA
-#define is_node_online(node) node_online(node)
-#else
-#define is_node_online(node) \
-	(node == 0)
-#endif /* CONFIG_NUMA */
-
 /*
  * kmem_cache_t
  *
@@ -1159,15 +1152,14 @@ void __init kmem_cache_init(void)
 	}
 	/* 5) Replace the bootstrap kmem_list3's */
 	{
-		int j;
+		int node;
 		/* Replace the static kmem_list3 structures for the boot cpu */
 		init_list(&cache_cache, &initkmem_list3[CACHE_CACHE],
 				numa_node_id());
 
-		for (j = 0; j < MAX_NUMNODES; j++) {
-			if (is_node_online(j))
+		for_each_online_node(node) {
 				init_list(malloc_sizes[INDEX_L3].cs_cachep,
-						&initkmem_list3[SIZE_L3+j], j);
+						&initkmem_list3[SIZE_L3+node], node);
 		}
 		if (INDEX_AC != INDEX_L3) {
 			init_list(malloc_sizes[INDEX_AC].cs_cachep,
@@ -1492,15 +1484,13 @@ static void slab_destroy (kmem_cache_t *
    as size of kmem_list3. */
 static inline void set_up_list3s(kmem_cache_t *cachep)
 {
-	int i;
+	int node;
 
-	for (i = 0; i < MAX_NUMNODES; i++) {
-		if (is_node_online(i)) {
-			cachep->nodelists[i] = &initkmem_list3[SIZE_L3+i];
-			cachep->nodelists[i]->next_reap = jiffies +
-				REAPTIMEOUT_LIST3 +
-				((unsigned long)cachep)%REAPTIMEOUT_LIST3;
-		}
+	for_each_online_node(node) {
+		cachep->nodelists[node] = &initkmem_list3[SIZE_L3+node];
+		cachep->nodelists[node]->next_reap = jiffies +
+			REAPTIMEOUT_LIST3 +
+			((unsigned long)cachep)%REAPTIMEOUT_LIST3;
 	}
 }
 
@@ -1883,7 +1873,7 @@ static void check_spinlock_acquired(kmem
 {
 #ifdef CONFIG_SMP
 	check_irq_off();
-	BUG_ON(spin_trylock(&list3_data(cachep)->list_lock));
+	assert_spin_locked(&list3_data(cachep)->list_lock);
 #endif
 }
 
@@ -1891,7 +1881,7 @@ static inline void check_spinlock_acquir
 {
 #ifdef CONFIG_SMP
 	check_irq_off();
-	BUG_ON(spin_trylock(&(cachep->nodelists[node])->list_lock));
+	assert_spin_locked(&(cachep->nodelists[node])->list_lock);
 #endif
 }
 
@@ -1939,16 +1929,16 @@ static void do_drain(void *arg)
 static void drain_cpu_caches(kmem_cache_t *cachep)
 {
 	struct kmem_list3 *l3;
-	int i;
+	int node;
 
 	smp_call_function_all_cpus(do_drain, cachep);
 	check_irq_on();
 	spin_lock_irq(&cachep->spinlock);
-	for (i = 0; i < MAX_NUMNODES; i++)  {
-		l3 = cachep->nodelists[i];
+	for_each_online_node(node)  {
+		l3 = cachep->nodelists[node];
 		if (l3) {
 			spin_lock(&l3->list_lock);
-			drain_array_locked(cachep, l3->shared, 1, i);
+			drain_array_locked(cachep, l3->shared, 1, node);
 			spin_unlock(&l3->list_lock);
 #ifdef CONFIG_NUMA
 			if (l3->alien)
@@ -3146,14 +3136,14 @@ static void check_redzone(kmem_cache_t *
 {
 	struct list_head *q;
 	struct slab *slabp;
-	int i;
+	int node;
 	struct kmem_list3 *l3;
 
 	check_spinlock_acquired(cachep);
 
-	for( i=0; i<MAX_NUMNODES; i++) {
-		l3 = cachep->nodelists[i];
-		if (!l3 || !is_node_online(i))
+	for_each_online_node(node) {
+		l3 = cachep->nodelists[node];
+		if (!l3)
 			continue;
 
 		list_for_each(q,&l3->slabs_full) {
@@ -3563,16 +3553,16 @@ static int s_show(struct seq_file *m, vo
 	unsigned long	num_slabs, free_objects = 0, shared_avail = 0;
 	const char *name;
 	char *error = NULL;
-	int i;
+	int node;
 	struct kmem_list3 *l3;
 
 	check_irq_on();
 	spin_lock_irq(&cachep->spinlock);
 	active_objs = 0;
 	num_slabs = 0;
-	for( i=0; i<MAX_NUMNODES; i++) {
-		l3 = cachep->nodelists[i];
-		if (!l3 || !is_node_online(i))
+	for_each_online_node(node) {
+		l3 = cachep->nodelists[node];
+		if (!l3)
 			continue;
 
 		spin_lock(&l3->list_lock);
@@ -3683,9 +3673,9 @@ static void do_dump_slabp(kmem_cache_t *
 
 	check_irq_on();
 	spin_lock_irq(&cachep->spinlock);
-	for( node=0; node<MAX_NUMNODES; node++) {
+	for_each_online_node(node) {
 		l3 = cachep->nodelists[node];
-		if (!l3 || !is_node_online(node))
+		if (!l3)
 			continue;
 
 		list_for_each(q,&l3->slabs_full) {
_
