From: Christoph Lameter <christoph@lameter.com>

Here is some ifdef reduction work for the price of having some structures 
contain elements that will not be used for single processor:

Signed-off-by: Christoph Lameter <christoph@lameter.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 mm/slab.c |   55 ++++++-------------------------------------------------
 1 files changed, 6 insertions(+), 49 deletions(-)

diff -puN mm/slab.c~numa-aware-slab-allocator-unifdeffery mm/slab.c
--- 25/mm/slab.c~numa-aware-slab-allocator-unifdeffery	2005-06-21 13:29:40.000000000 -0700
+++ 25-akpm/mm/slab.c	2005-06-21 13:29:40.000000000 -0700
@@ -221,9 +221,7 @@ struct slab {
 	void			*s_mem;		/* including colour offset */
 	unsigned int		inuse;		/* num of objs active in slab */
 	kmem_bufctl_t		free;
-#ifdef CONFIG_NUMA
 	unsigned short          nodeid;
-#endif
 };
 
 /*
@@ -266,9 +264,7 @@ struct array_cache {
 	unsigned int limit;
 	unsigned int batchcount;
 	unsigned int touched;
-#ifdef CONFIG_NUMA
 	spinlock_t lock;
-#endif
 	void *entry[0];		/*
 				 * Must have this definition in here for the proper
 				 * alignment of array_cache. Also simplifies accessing
@@ -299,9 +295,7 @@ struct kmem_list3 {
 	unsigned int 	free_limit;
 	spinlock_t      list_lock;
 	struct array_cache	*shared;
-#ifdef CONFIG_NUMA
 	struct array_cache	**alien;
-#endif
 };
 
 /*
@@ -342,8 +336,6 @@ static inline int index_of(const size_t 
 #define INDEX_AC index_of(sizeof(struct arraycache_init))
 #define INDEX_L3 index_of(sizeof(struct kmem_list3))
 
-#ifdef CONFIG_NUMA
-
 #define LIST3_INIT(parent) \
 	do {	\
 		INIT_LIST_HEAD(&(parent)->slabs_full);	\
@@ -355,19 +347,6 @@ static inline int index_of(const size_t 
 		(parent)->free_objects = 0;	\
 		(parent)->free_touched = 0;	\
 	} while (0)
-#else
-
-#define LIST3_INIT(parent) \
-	do {	\
-		INIT_LIST_HEAD(&(parent)->slabs_full);	\
-		INIT_LIST_HEAD(&(parent)->slabs_partial);	\
-		INIT_LIST_HEAD(&(parent)->slabs_free);	\
-		(parent)->shared = NULL; \
-		(parent)->list_lock = SPIN_LOCK_UNLOCKED;	\
-		(parent)->free_objects = 0;	\
-		(parent)->free_touched = 0;	\
-	} while (0)
-#endif
 
 #define MAKE_LIST(cachep, listp, slab, nodeid)	\
 	do {	\
@@ -771,12 +750,11 @@ static struct array_cache *alloc_arrayca
 		nc->limit = entries;
 		nc->batchcount = batchcount;
 		nc->touched = 0;
-#ifdef CONFIG_NUMA
 		spin_lock_init(&nc->lock);
-#endif
 	}
 	return nc;
 }
+
 #ifdef CONFIG_NUMA
 static inline struct array_cache **alloc_alien_cache(int node, int limit)
 {
@@ -845,6 +823,10 @@ static void drain_alien_cache(kmem_cache
 		}
 	}
 }
+#else
+#define alloc_alien_cache(node, limit) do { } while (0)
+#define free_alien_cache(ac_ptr) do { } while (0)
+#define drain_alien_cache(cachep, l3) do { } while (0)
 #endif
 
 static int __devinit cpuup_callback(struct notifier_block *nfb,
@@ -954,13 +936,11 @@ static int __devinit cpuup_callback(stru
 				kfree(l3->shared);
 				l3->shared = NULL;
 			}
-#ifdef CONFIG_NUMA
 			if (l3->alien) {
 				drain_alien_cache(cachep, l3);
 				free_alien_cache(l3->alien);
 				l3->alien = NULL;
 			}
-#endif
 
 			/* free slabs belonging to this node */
 			if (__node_shrink(cachep, node)) {
@@ -1923,10 +1903,8 @@ static void drain_cpu_caches(kmem_cache_
 			spin_lock(&l3->list_lock);
 			drain_array_locked(cachep, l3->shared, 1, node);
 			spin_unlock(&l3->list_lock);
-#ifdef CONFIG_NUMA
 			if (l3->alien)
 				drain_alien_cache(cachep, l3);
-#endif
 		}
 	}
 	spin_unlock_irq(&cachep->spinlock);
@@ -2052,9 +2030,7 @@ int kmem_cache_destroy(kmem_cache_t * ca
 	for_each_online_node(i) {
 		if ((l3 = cachep->nodelists[i])) {
 			kfree(l3->shared);
-#ifdef CONFIG_NUMA
 			free_alien_cache(l3->alien);
-#endif
 			kfree(l3);
 		}
 	}
@@ -2230,9 +2206,7 @@ static int cache_grow(kmem_cache_t *cach
 	if (!(slabp = alloc_slabmgmt(cachep, objp, offset, local_flags)))
 		goto opps1;
 
-#ifdef CONFIG_NUMA
 	slabp->nodeid = nodeid;
-#endif
 	set_slab_attr(cachep, slabp, objp);
 
 	cache_init_objs(cachep, slabp, ctor_flags);
@@ -2642,9 +2616,7 @@ static void free_block(kmem_cache_t *cac
 		int nodeid = 0;
 
 		slabp = GET_PAGE_SLAB(virt_to_page(objp));
-#ifdef CONFIG_NUMA
 		nodeid = slabp->nodeid;
-#endif
 		l3 = cachep->nodelists[nodeid];
 		list_del(&slabp->list);
 		objnr = (objp - slabp->s_mem) / cachep->objsize;
@@ -3093,9 +3065,8 @@ static int alloc_kmemlist(kmem_cache_t *
 
 	for_each_online_node(node) {
 		struct array_cache *nc = NULL, *new;
-#ifdef CONFIG_NUMA
 		struct array_cache **new_alien = NULL;
-
+#ifdef CONFIG_NUMA
 		if (!(new_alien = alloc_alien_cache(node, cachep->limit)))
 			goto fail;
 #endif
@@ -3111,22 +3082,15 @@ static int alloc_kmemlist(kmem_cache_t *
 							nc->avail);
 
 			l3->shared = new;
-#ifdef CONFIG_NUMA
 			if (!cachep->nodelists[node]->alien) {
 				l3->alien = new_alien;
 				new_alien = NULL;
 			}
 			l3->free_limit = (1 + nr_cpus_node(node))*
 				cachep->batchcount + cachep->num;
-#else
-			l3->free_limit = (1 + num_online_cpus())*
-				cachep->batchcount + cachep->num;
-#endif
 			spin_unlock_irq(&l3->list_lock);
 			kfree(nc);
-#ifdef CONFIG_NUMA
 			free_alien_cache(new_alien);
-#endif
 			continue;
 		}
 		if (!(l3 = kmalloc_node(sizeof(struct kmem_list3),
@@ -3137,14 +3101,9 @@ static int alloc_kmemlist(kmem_cache_t *
 		l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
 			((unsigned long)cachep)%REAPTIMEOUT_LIST3;
 		l3->shared = new;
-#ifdef CONFIG_NUMA
 		l3->alien = new_alien;
 		l3->free_limit = (1 + nr_cpus_node(node))*
 			cachep->batchcount + cachep->num;
-#else
-		l3->free_limit = (1 + num_online_cpus())*
-			cachep->batchcount + cachep->num;
-#endif
 		cachep->nodelists[node] = l3;
 	}
 	return err;
@@ -3324,10 +3283,8 @@ static void cache_reap(void *unused)
 		check_irq_on();
 
 		l3 = searchp->nodelists[numa_node_id()];
-#ifdef CONFIG_NUMA
 		if (l3->alien)
 			drain_alien_cache(searchp, l3);
-#endif
 		spin_lock_irq(&l3->list_lock);
 
 		drain_array_locked(searchp, ac_data(searchp), 0,
_