diff -urpN -X /home/fletch/.diff.exclude 640-per_node_idt/arch/i386/Kconfig 696-config_numasched/arch/i386/Kconfig
--- 640-per_node_idt/arch/i386/Kconfig	Fri May 30 19:32:01 2003
+++ 696-config_numasched/arch/i386/Kconfig	Fri May 30 22:02:10 2003
@@ -726,6 +726,11 @@ config NUMA
 	default n if X86_PC
 	default y if (X86_NUMAQ || X86_SUMMIT)
 
+config NUMA_SCHED
+	bool "Numa Scheduling Support"
+	depends on NUMA
+	default y
+
 # Need comments to help the hapless user trying to turn on NUMA support
 comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support"
 	depends on X86_NUMAQ && (!HIGHMEM64G || !SMP)
diff -urpN -X /home/fletch/.diff.exclude 640-per_node_idt/include/linux/sched.h 696-config_numasched/include/linux/sched.h
--- 640-per_node_idt/include/linux/sched.h	Fri May 30 19:29:30 2003
+++ 696-config_numasched/include/linux/sched.h	Fri May 30 22:02:10 2003
@@ -509,7 +509,7 @@ extern void set_cpus_allowed(task_t *p, 
 # define set_cpus_allowed(p, new_mask) do { } while (0)
 #endif
 
-#ifdef CONFIG_NUMA
+#ifdef CONFIG_NUMA_SCHED
 extern void sched_balance_exec(void);
 extern void node_nr_running_init(void);
 #else
diff -urpN -X /home/fletch/.diff.exclude 640-per_node_idt/kernel/sched.c 696-config_numasched/kernel/sched.c
--- 640-per_node_idt/kernel/sched.c	Fri May 30 19:31:41 2003
+++ 696-config_numasched/kernel/sched.c	Fri May 30 22:02:10 2003
@@ -33,7 +33,7 @@
 #include <linux/timer.h>
 #include <linux/rcupdate.h>
 
-#ifdef CONFIG_NUMA
+#ifdef CONFIG_NUMA_SCHED
 #define cpu_to_node_mask(cpu) node_to_cpumask(cpu_to_node(cpu))
 #else
 #define cpu_to_node_mask(cpu) (cpu_online_map)
@@ -182,7 +182,7 @@ struct runqueue {
 	struct mm_struct *prev_mm;
 	prio_array_t *active, *expired, arrays[2];
 	int prev_cpu_load[NR_CPUS];
-#ifdef CONFIG_NUMA
+#ifdef CONFIG_NUMA_SCHED
 	atomic_t *node_nr_running;
 	int prev_node_load[MAX_NUMNODES];
 #endif
@@ -211,7 +211,7 @@ static struct runqueue runqueues[NR_CPUS
 # define task_running(rq, p)		((rq)->curr == (p))
 #endif
 
-#ifdef CONFIG_NUMA
+#ifdef CONFIG_NUMA_SCHED
 
 /*
  * Keep track of running tasks.
@@ -245,13 +245,13 @@ __init void node_nr_running_init(void)
 		cpu_rq(i)->node_nr_running = &node_nr_running[cpu_to_node(i)];
 }
 
-#else /* !CONFIG_NUMA */
+#else /* !CONFIG_NUMA_SCHED */
 
 # define nr_running_init(rq)   do { } while (0)
 # define nr_running_inc(rq)    do { (rq)->nr_running++; } while (0)
 # define nr_running_dec(rq)    do { (rq)->nr_running--; } while (0)
 
-#endif /* CONFIG_NUMA */
+#endif /* CONFIG_NUMA_SCHED */
 
 
 struct schedstat {
@@ -953,7 +953,7 @@ static inline void double_rq_unlock(runq
 		spin_unlock(&rq2->lock);
 }
 
-#ifdef CONFIG_NUMA
+#ifdef CONFIG_NUMA_SCHED
 /*
  * If dest_cpu is allowed for this process, migrate the task to it.
  * This is accomplished by forcing the cpu_allowed mask to only
@@ -1081,7 +1081,7 @@ static int find_busiest_node(int this_no
 	return node;
 }
 
-#endif /* CONFIG_NUMA */
+#endif /* CONFIG_NUMA_SCHED */
 
 int idle_node_rebalance_ratio = 10;
 int busy_node_rebalance_ratio = 2;
@@ -1326,7 +1326,7 @@ out:
 #define IDLE_NODE_REBALANCE_TICK (IDLE_REBALANCE_TICK * idle_node_rebalance_ratio)
 #define BUSY_NODE_REBALANCE_TICK (BUSY_REBALANCE_TICK * busy_node_rebalance_ratio)
 
-#ifdef CONFIG_NUMA
+#ifdef CONFIG_NUMA_SCHED
 static void balance_node(runqueue_t *this_rq, int idle, int this_cpu)
 {
 	int node = find_busiest_node(cpu_to_node(this_cpu));
@@ -1359,7 +1359,7 @@ static void rebalance_tick(runqueue_t *t
 	 * are not balanced.)
 	 */
 	if (idle) {
-#ifdef CONFIG_NUMA
+#ifdef CONFIG_NUMA_SCHED
 		if (!(j % IDLE_NODE_REBALANCE_TICK))
 			balance_node(this_rq, idle, this_cpu);
 #endif
@@ -1371,7 +1371,7 @@ static void rebalance_tick(runqueue_t *t
 		}
 		return;
 	}
-#ifdef CONFIG_NUMA
+#ifdef CONFIG_NUMA_SCHED
 	if (!(j % BUSY_NODE_REBALANCE_TICK))
 		balance_node(this_rq, idle, this_cpu);
 #endif