patch-2.4.13 linux/arch/i386/kernel/smp.c
Next file: linux/arch/ia64/config.in
Previous file: linux/arch/i386/kernel/setup.c
Back to the patch index
Back to the overall index
- Lines: 98
- Date:
Tue Oct 23 14:17:10 2001
- Orig file:
v2.4.12/linux/arch/i386/kernel/smp.c
- Orig date:
Tue Oct 9 17:06:51 2001
diff -u --recursive --new-file v2.4.12/linux/arch/i386/kernel/smp.c linux/arch/i386/kernel/smp.c
@@ -507,10 +507,9 @@
atomic_t started;
atomic_t finished;
int wait;
-} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
+};
static struct call_data_struct * call_data;
-static struct call_data_struct call_data_array[NR_CPUS];
/*
* this function sends a 'generic call function' IPI to all other CPUs
@@ -532,45 +531,33 @@
* hardware interrupt handler, you may call it from a bottom half handler.
*/
{
- struct call_data_struct *data;
- int cpus = (cpu_online_map & ~(1 << smp_processor_id()));
+ struct call_data_struct data;
+ int cpus = smp_num_cpus-1;
if (!cpus)
return 0;
- data = &call_data_array[smp_processor_id()];
-
- data->func = func;
- data->info = info;
- data->wait = wait;
+ data.func = func;
+ data.info = info;
+ atomic_set(&data.started, 0);
+ data.wait = wait;
if (wait)
- atomic_set(&data->finished, 0);
- /* We have do to this one last to make sure that the IPI service
- * code desn't get confused if it gets an unexpected repeat
- * trigger of an old IPI while we're still setting up the new
- * one. */
- atomic_set(&data->started, 0);
-
- local_bh_disable();
- spin_lock(&call_lock);
- call_data = data;
+ atomic_set(&data.finished, 0);
+
+ spin_lock_bh(&call_lock);
+ call_data = &data;
+ wmb();
/* Send a message to all other CPUs and wait for them to respond */
send_IPI_allbutself(CALL_FUNCTION_VECTOR);
/* Wait for response */
- while (atomic_read(&data->started) != cpus)
+ while (atomic_read(&data.started) != cpus)
barrier();
- /* It is now safe to reuse the "call_data" global, but we need
- * to keep local bottom-halves disabled until after waiters have
- * been acknowledged to prevent reuse of the per-cpu call data
- * entry. */
- spin_unlock(&call_lock);
-
if (wait)
- while (atomic_read(&data->finished) != cpus)
+ while (atomic_read(&data.finished) != cpus)
barrier();
- local_bh_enable();
+ spin_unlock_bh(&call_lock);
return 0;
}
@@ -620,17 +607,18 @@
ack_APIC_irq();
/*
- * Notify initiating CPU that I've grabbed the data and am about
- * to execute the function (and avoid servicing any single IPI
- * twice)
+ * Notify initiating CPU that I've grabbed the data and am
+ * about to execute the function
*/
- if (test_and_set_bit(smp_processor_id(), &call_data->started))
- return;
+ mb();
+ atomic_inc(&call_data->started);
/*
* At this point the info structure may be out of scope unless wait==1
*/
(*func)(info);
- if (wait)
- set_bit(smp_processor_id(), &call_data->finished);
+ if (wait) {
+ mb();
+ atomic_inc(&call_data->finished);
+ }
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)