Index: linux-2.6.10-rc2-bk13-Percpu/mm/slab.c =================================================================== --- linux-2.6.10-rc2-bk13-Percpu.orig/mm/slab.c 2004-12-02 17:39:29.000000000 +1100 +++ linux-2.6.10-rc2-bk13-Percpu/mm/slab.c 2004-12-03 17:26:46.585593664 +1100 @@ -233,6 +233,7 @@ unsigned int limit; unsigned int batchcount; unsigned int touched; + void *entries[0]; }; /* bootstrap: The caches do not work without cpuarrays anymore, @@ -241,7 +242,7 @@ #define BOOT_CPUCACHE_ENTRIES 1 struct arraycache_init { struct array_cache cache; - void * entries[BOOT_CPUCACHE_ENTRIES]; + void *entries[BOOT_CPUCACHE_ENTRIES]; }; /* @@ -551,15 +552,10 @@ static DEFINE_PER_CPU(struct work_struct, reap_work); -static void free_block(kmem_cache_t* cachep, void** objpp, int len); +static void free_block(kmem_cache_t* cachep, struct array_cache *ac, int len); static void enable_cpucache (kmem_cache_t *cachep); static void cache_reap (void *unused); -static inline void ** ac_entry(struct array_cache *ac) -{ - return (void**)(ac+1); -} - static inline struct array_cache *ac_data(kmem_cache_t *cachep) { return cachep->array[smp_processor_id()]; @@ -703,7 +699,7 @@ nc = cachep->array[cpu]; cachep->array[cpu] = NULL; cachep->free_limit -= cachep->batchcount; - free_block(cachep, ac_entry(nc), nc->avail); + free_block(cachep, nc, nc->avail); spin_unlock_irq(&cachep->spinlock); kfree(nc); } @@ -1513,7 +1509,7 @@ check_irq_off(); ac = ac_data(cachep); spin_lock(&cachep->spinlock); - free_block(cachep, &ac_entry(ac)[0], ac->avail); + free_block(cachep, ac, ac->avail); spin_unlock(&cachep->spinlock); ac->avail = 0; } @@ -1981,8 +1977,9 @@ batchcount = shared_array->avail; shared_array->avail -= batchcount; ac->avail = batchcount; - memcpy(ac_entry(ac), &ac_entry(shared_array)[shared_array->avail], - sizeof(void*)*batchcount); + memcpy(ac->entries, + shared_array->entries + shared_array->avail, + sizeof(void*)*batchcount); shared_array->touched = 1; goto alloc_done; } @@ -2009,7 +2006,7 @@ STATS_SET_HIGH(cachep); /* get obj pointer */ - ac_entry(ac)[ac->avail++] = slabp->s_mem + slabp->free*cachep->objsize; + ac->entries[ac->avail++] = slabp->s_mem + slabp->free*cachep->objsize; slabp->inuse++; next = slab_bufctl(slabp)[slabp->free]; @@ -2046,7 +2043,7 @@ goto retry; } ac->touched = 1; - return ac_entry(ac)[--ac->avail]; + return ac->entries[--ac->avail]; } static inline void @@ -2118,7 +2115,7 @@ if (likely(ac->avail)) { STATS_INC_ALLOCHIT(cachep); ac->touched = 1; - objp = ac_entry(ac)[--ac->avail]; + objp = ac->entries[--ac->avail]; } else { STATS_INC_ALLOCMISS(cachep); objp = cache_alloc_refill(cachep, flags); @@ -2133,7 +2130,8 @@ * the l3 structure */ -static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects) +static void free_block(kmem_cache_t *cachep, struct array_cache *ac, + int nr_objects) { int i; @@ -2143,18 +2141,17 @@ cachep->lists.free_objects += nr_objects; for (i = 0; i < nr_objects; i++) { - void *objp = objpp[i]; struct slab *slabp; unsigned int objnr; - slabp = GET_PAGE_SLAB(virt_to_page(objp)); + slabp = GET_PAGE_SLAB(virt_to_page(ac->entries[i])); list_del(&slabp->list); - objnr = (objp - slabp->s_mem) / cachep->objsize; + objnr = (ac->entries[i] - slabp->s_mem) / cachep->objsize; check_slabp(cachep, slabp); #if DEBUG if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) { printk(KERN_ERR "slab: double free detected in cache '%s', objp %p.\n", - cachep->name, objp); + cachep->name, ac->entries[i]); BUG(); } #endif @@ -2171,7 +2168,7 @@ slab_destroy(cachep, slabp); } else { list_add(&slabp->list, - &list3_data_ptr(cachep, objp)->slabs_free); + &list3_data_ptr(cachep, ac->entries[i])->slabs_free); } } else { /* Unconditionally move a slab to the end of the @@ -2179,7 +2176,7 @@ * other objects to be freed, too. */ list_add_tail(&slabp->list, - &list3_data_ptr(cachep, objp)->slabs_partial); + &list3_data_ptr(cachep, ac->entries[i])->slabs_partial); } } } @@ -2200,15 +2197,15 @@ if (max) { if (batchcount > max) batchcount = max; - memcpy(&ac_entry(shared_array)[shared_array->avail], - &ac_entry(ac)[0], - sizeof(void*)*batchcount); + memcpy(shared_array->entries + shared_array->avail, + ac->entries, + sizeof(void*)*batchcount); shared_array->avail += batchcount; goto free_done; } } - free_block(cachep, &ac_entry(ac)[0], batchcount); + free_block(cachep, ac, batchcount); free_done: #if STATS { @@ -2230,8 +2227,8 @@ #endif spin_unlock(&cachep->spinlock); ac->avail -= batchcount; - memmove(&ac_entry(ac)[0], &ac_entry(ac)[batchcount], - sizeof(void*)*ac->avail); + memmove(ac->entries, ac->entries + batchcount, + sizeof(void*)*ac->avail); } /* @@ -2250,12 +2247,12 @@ if (likely(ac->avail < ac->limit)) { STATS_INC_FREEHIT(cachep); - ac_entry(ac)[ac->avail++] = objp; + ac->entries[ac->avail++] = objp; return; } else { STATS_INC_FREEMISS(cachep); cache_flusharray(cachep, ac); - ac_entry(ac)[ac->avail++] = objp; + ac->entries[ac->avail++] = objp; } } @@ -2568,7 +2565,7 @@ if (!ccold) continue; spin_lock_irq(&cachep->spinlock); - free_block(cachep, ac_entry(ccold), ccold->avail); + free_block(cachep, ccold, ccold->avail); spin_unlock_irq(&cachep->spinlock); kfree(ccold); } @@ -2580,7 +2577,7 @@ old = cachep->lists.shared; cachep->lists.shared = new_shared; if (old) - free_block(cachep, ac_entry(old), old->avail); + free_block(cachep, old, old->avail); spin_unlock_irq(&cachep->spinlock); kfree(old); } @@ -2654,10 +2651,10 @@ if (tofree > ac->avail) { tofree = (ac->avail+1)/2; } - free_block(cachep, ac_entry(ac), tofree); + free_block(cachep, ac, tofree); ac->avail -= tofree; - memmove(&ac_entry(ac)[0], &ac_entry(ac)[tofree], - sizeof(void*)*ac->avail); + memmove(ac->entries, ac->entires + tofree, + sizeof(void*)*ac->avail); } }