Name: Remove ____cacheline_aligned from tlb_state Status: Untested Signed-off-by: Rusty Russell Generally, per-cpu variables will cosy up nicely to their CPU-affine neighbours: forcing cacheline alignment is unneccesary bloat. If the variable is often referenced from other CPUs, it should not use the per-cpu stuff, which on enlightened archs will be NUMA-aware. Index: linux-2.6.10-rc2-bk13-Percpu/arch/i386/kernel/smp.c =================================================================== --- linux-2.6.10-rc2-bk13-Percpu.orig/arch/i386/kernel/smp.c 2004-11-30 12:45:10.000000000 +1100 +++ linux-2.6.10-rc2-bk13-Percpu/arch/i386/kernel/smp.c 2004-12-02 13:16:06.732143384 +1100 @@ -103,7 +103,7 @@ * about nothing of note with C stepping upwards. */ -DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, }; +DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) = { &init_mm, 0, }; /* * the following functions deal with sending IPIs between CPUs. Index: linux-2.6.10-rc2-bk13-Percpu/arch/i386/mach-voyager/voyager_smp.c =================================================================== --- linux-2.6.10-rc2-bk13-Percpu.orig/arch/i386/mach-voyager/voyager_smp.c 2004-11-30 12:45:10.000000000 +1100 +++ linux-2.6.10-rc2-bk13-Percpu/arch/i386/mach-voyager/voyager_smp.c 2004-12-02 13:15:37.494588168 +1100 @@ -35,7 +35,7 @@ int reboot_smp = 0; /* TLB state -- visible externally, indexed physically */ -DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 }; +DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) = { &init_mm, 0 }; /* CPU IRQ affinity -- set to all ones initially */ static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1] = ~0UL };