patch-2.3.2 linux/arch/alpha/kernel/time.c
Next file: linux/arch/alpha/kernel/traps.c
Previous file: linux/arch/alpha/kernel/smp.c
Back to the patch index
Back to the overall index
- Lines: 165
- Date:
Fri May 14 12:41:23 1999
- Orig file:
v2.3.1/linux/arch/alpha/kernel/time.c
- Orig date:
Sat Apr 24 17:54:08 1999
diff -u --recursive --new-file v2.3.1/linux/arch/alpha/kernel/time.c linux/arch/alpha/kernel/time.c
@@ -42,6 +42,9 @@
#include "proto.h"
#include "irq.h"
+extern rwlock_t xtime_lock;
+extern volatile unsigned long lost_ticks; /*kernel/sched.c*/
+
static int set_rtc_mmss(unsigned long);
@@ -86,15 +89,15 @@
long nticks;
#ifdef __SMP__
- extern void smp_percpu_timer_interrupt(struct pt_regs *);
- extern unsigned int boot_cpu_id;
- /* when SMP, do this for *all* CPUs,
- but only do the rest for the boot CPU */
+ /* When SMP, do this for *all* CPUs, but only do the rest for
+ the boot CPU. */
smp_percpu_timer_interrupt(regs);
- if (smp_processor_id() != boot_cpu_id)
- return;
+ if (smp_processor_id() != smp_boot_cpuid)
+ return;
#endif
+ write_lock(&xtime_lock);
+
/*
* Calculate how many ticks have passed since the last update,
* including any previous partial leftover. Save any resulting
@@ -124,6 +127,8 @@
int tmp = set_rtc_mmss(xtime.tv_sec);
state.last_rtc_update = xtime.tv_sec - (tmp ? 600 : 0);
}
+
+ write_unlock(&xtime_lock);
}
/*
@@ -226,7 +231,8 @@
{
void (*irq_handler)(int, void *, struct pt_regs *);
unsigned int year, mon, day, hour, min, sec, cc1, cc2;
- unsigned long cycle_freq, diff, one_percent;
+ unsigned long cycle_freq, one_percent;
+ long diff;
/*
* The Linux interpretation of the CMOS clock register contents:
@@ -242,7 +248,7 @@
if (!est_cycle_freq) {
/* Sometimes the hwrpb->cycle_freq value is bogus.
- Go another round to check up on it and see. */
+ Go another round to check up on it and see. */
do { } while (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP));
do { } while (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP);
cc2 = rpcc();
@@ -279,8 +285,7 @@
mon = CMOS_READ(RTC_MONTH);
year = CMOS_READ(RTC_YEAR);
- if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
- {
+ if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
BCD_TO_BIN(sec);
BCD_TO_BIN(min);
BCD_TO_BIN(hour);
@@ -328,18 +333,24 @@
void
do_gettimeofday(struct timeval *tv)
{
- unsigned long flags, delta_cycles, delta_usec;
- unsigned long sec, usec;
- __u32 now;
- extern volatile unsigned long lost_ticks; /*kernel/sched.c*/
+ unsigned long sec, usec, lost, flags;
+ unsigned long delta_cycles, delta_usec, partial_tick;
- now = rpcc();
- save_and_cli(flags);
+ read_lock_irqsave(&xtime_lock, flags);
+
+ delta_cycles = rpcc() - state.last_time;
sec = xtime.tv_sec;
usec = xtime.tv_usec;
- delta_cycles = now - state.last_time;
- restore_flags(flags);
+ partial_tick = state.partial_tick;
+ lost = lost_ticks;
+
+ read_unlock_irqrestore(&xtime_lock, flags);
+#ifdef __SMP__
+ /* Until and unless we figure out how to get cpu cycle counters
+ in sync and keep them there, we can't use the rpcc tricks. */
+ delta_usec = lost * (1000000 / HZ);
+#else
/*
* usec = cycles * ticks_per_cycle * 2**48 * 1e6 / (2**48 * ticks)
* = cycles * (s_t_p_c) * 1e6 / (2**48 * ticks)
@@ -354,13 +365,10 @@
*/
delta_usec = (delta_cycles * state.scaled_ticks_per_cycle
- + state.partial_tick
- + (lost_ticks << FIX_SHIFT) ) * 15625;
+ + partial_tick
+ + (lost << FIX_SHIFT)) * 15625;
delta_usec = ((delta_usec / ((1UL << (FIX_SHIFT-6-1)) * HZ)) + 1) / 2;
-
- /* the 'lost_tics' term above implements this:
- * delta_usec += lost_ticks * (1000000 / HZ);
- */
+#endif
usec += delta_usec;
if (usec >= 1000000) {
@@ -375,13 +383,41 @@
void
do_settimeofday(struct timeval *tv)
{
- cli();
- xtime = *tv;
+ unsigned long delta_usec;
+ long sec, usec;
+
+ write_lock_irq(&xtime_lock);
+
+ /* The offset that is added into time in do_gettimeofday above
+ must be subtracted out here to keep a coherent view of the
+ time. Without this, a full-tick error is possible. */
+
+#ifdef __SMP__
+ delta_usec = lost_ticks * (1000000 / HZ);
+#else
+ delta_usec = rpcc() - state.last_time;
+ delta_usec = (delta_usec * state.scaled_ticks_per_cycle
+ + state.partial_tick
+ + (lost_ticks << FIX_SHIFT)) * 15625;
+ delta_usec = ((delta_usec / ((1UL << (FIX_SHIFT-6-1)) * HZ)) + 1) / 2;
+#endif
+
+ sec = tv->tv_sec;
+ usec = tv->tv_usec;
+ usec -= delta_usec;
+ if (usec < 0) {
+ usec += 1000000;
+ sec -= 1;
+ }
+
+ xtime.tv_sec = sec;
+ xtime.tv_usec = usec;
time_adjust = 0; /* stop active adjtime() */
time_status |= STA_UNSYNC;
time_maxerror = NTP_PHASE_LIMIT;
time_esterror = NTP_PHASE_LIMIT;
- sti();
+
+ write_unlock_irq(&xtime_lock);
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)