patch-2.4.22 linux-2.4.22/arch/mips64/kernel/traps.c
Next file: linux-2.4.22/arch/mips64/kernel/unaligned.c
Previous file: linux-2.4.22/arch/mips64/kernel/time.c
Back to the patch index
Back to the overall index
- Lines: 707
- Date:
2003-08-25 04:44:40.000000000 -0700
- Orig file:
linux-2.4.21/arch/mips64/kernel/traps.c
- Orig date:
2002-11-28 15:53:10.000000000 -0800
diff -urN linux-2.4.21/arch/mips64/kernel/traps.c linux-2.4.22/arch/mips64/kernel/traps.c
@@ -3,11 +3,13 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1994 - 1999 by Ralf Baechle
+ * Copyright (C) 1994 - 1999, 2000, 01 Ralf Baechle
* Copyright (C) 1995, 1996 Paul M. Antoine
* Copyright (C) 1998 Ulf Carlsson
* Copyright (C) 1999 Silicon Graphics, Inc.
- * Copyright (C) 2002 Maciej W. Rozycki
+ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000, 01 MIPS Technologies, Inc.
+ * Copyright (C) 2002, 2003 Maciej W. Rozycki
*/
#include <linux/config.h>
#include <linux/init.h>
@@ -21,12 +23,14 @@
#include <asm/bootinfo.h>
#include <asm/branch.h>
#include <asm/cpu.h>
+#include <asm/fpu.h>
#include <asm/module.h>
#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/ptrace.h>
#include <asm/watch.h>
#include <asm/system.h>
+#include <asm/tlbdebug.h>
#include <asm/traps.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
@@ -47,6 +51,7 @@
extern asmlinkage void handle_ov(void);
extern asmlinkage void handle_tr(void);
extern asmlinkage void handle_fpe(void);
+extern asmlinkage void handle_mdmx(void);
extern asmlinkage void handle_watch(void);
extern asmlinkage void handle_mcheck(void);
extern asmlinkage void handle_reserved(void);
@@ -54,12 +59,8 @@
extern int fpu_emulator_cop1Handler(int xcptno, struct pt_regs *xcp,
struct mips_fpu_soft_struct *ctx);
-void fpu_emulator_init_fpu(void);
-
-char watch_available = 0;
-char dedicated_iv_available = 0;
-
-int (*be_board_handler)(struct pt_regs *regs, int is_fixup);
+void (*board_be_init)(void);
+int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
int kstack_depth_to_print = 24;
@@ -69,8 +70,6 @@
*/
#define MODULE_RANGE (8*1024*1024)
-#define OPCODE 0xfc000000
-
/*
* If the address is either in the .text section of the
* kernel, or in the vmalloc'ed module regions, it *may*
@@ -190,7 +189,6 @@
show_trace((long *)tsk->thread.reg29);
}
-
void show_code(unsigned int *pc)
{
long i;
@@ -214,15 +212,15 @@
printk("$0 : %016lx %016lx %016lx %016lx\n",
0UL, regs->regs[1], regs->regs[2], regs->regs[3]);
printk("$4 : %016lx %016lx %016lx %016lx\n",
- regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]);
+ regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]);
printk("$8 : %016lx %016lx %016lx %016lx\n",
regs->regs[8], regs->regs[9], regs->regs[10], regs->regs[11]);
printk("$12 : %016lx %016lx %016lx %016lx\n",
- regs->regs[12], regs->regs[13], regs->regs[14], regs->regs[15]);
+ regs->regs[12], regs->regs[13], regs->regs[14], regs->regs[15]);
printk("$16 : %016lx %016lx %016lx %016lx\n",
regs->regs[16], regs->regs[17], regs->regs[18], regs->regs[19]);
printk("$20 : %016lx %016lx %016lx %016lx\n",
- regs->regs[20], regs->regs[21], regs->regs[22], regs->regs[23]);
+ regs->regs[20], regs->regs[21], regs->regs[22], regs->regs[23]);
printk("$24 : %016lx %016lx\n",
regs->regs[24], regs->regs[25]);
printk("$28 : %016lx %016lx %016lx %016lx\n",
@@ -336,7 +334,7 @@
spin_lock_irqsave(&modlist_lock, flags);
for (mp = module_list; mp != NULL; mp = mp->next) {
if (!mod_member_present(mp, archdata_end) ||
- !mod_archdata_member_present(mp, struct archdata,
+ !mod_archdata_member_present(mp, struct archdata,
dbe_table_end))
continue;
ap = (struct archdata *)(mp->archdata_start);
@@ -367,8 +365,8 @@
if (fixup)
action = MIPS_BE_FIXUP;
- if (be_board_handler)
- action = be_board_handler(regs, fixup != 0);
+ if (board_be_handler)
+ action = board_be_handler(regs, fixup != 0);
switch (action) {
case MIPS_BE_DISCARD:
@@ -395,12 +393,151 @@
force_sig(SIGBUS, current);
}
-asmlinkage void do_ov(struct pt_regs *regs)
+static inline int get_insn_opcode(struct pt_regs *regs, unsigned int *opcode)
{
- siginfo_t info;
+ unsigned int *epc;
+
+ epc = (unsigned int *) regs->cp0_epc +
+ ((regs->cp0_cause & CAUSEF_BD) != 0);
+ if (!get_user(*opcode, epc))
+ return 0;
+
+ force_sig(SIGSEGV, current);
+ return 1;
+}
+
+/*
+ * ll/sc emulation
+ */
+
+#define OPCODE 0xfc000000
+#define BASE 0x03e00000
+#define RT 0x001f0000
+#define OFFSET 0x0000ffff
+#define LL 0xc0000000
+#define SC 0xe0000000
+
+/*
+ * The ll_bit is cleared by r*_switch.S
+ */
+
+unsigned long ll_bit;
+
+static struct task_struct *ll_task = NULL;
+
+static inline void simulate_ll(struct pt_regs *regs, unsigned int opcode)
+{
+ unsigned long value, *vaddr;
+ long offset;
+ int signal = 0;
+
+ /*
+ * analyse the ll instruction that just caused a ri exception
+ * and put the referenced address to addr.
+ */
+
+ /* sign extend offset */
+ offset = opcode & OFFSET;
+ offset <<= 16;
+ offset >>= 16;
+
+ vaddr = (unsigned long *)((long)(regs->regs[(opcode & BASE) >> 21]) + offset);
+
+ if ((unsigned long)vaddr & 3) {
+ signal = SIGBUS;
+ goto sig;
+ }
+ if (get_user(value, vaddr)) {
+ signal = SIGSEGV;
+ goto sig;
+ }
+
+ if (ll_task == NULL || ll_task == current) {
+ ll_bit = 1;
+ } else {
+ ll_bit = 0;
+ }
+ ll_task = current;
- if (compute_return_epc(regs))
+ regs->regs[(opcode & RT) >> 16] = value;
+
+ compute_return_epc(regs);
+ return;
+
+sig:
+ force_sig(signal, current);
+}
+
+static inline void simulate_sc(struct pt_regs *regs, unsigned int opcode)
+{
+ unsigned long *vaddr, reg;
+ long offset;
+ int signal = 0;
+
+ /*
+ * analyse the sc instruction that just caused a ri exception
+ * and put the referenced address to addr.
+ */
+
+ /* sign extend offset */
+ offset = opcode & OFFSET;
+ offset <<= 16;
+ offset >>= 16;
+
+ vaddr = (unsigned long *)((long)(regs->regs[(opcode & BASE) >> 21]) + offset);
+ reg = (opcode & RT) >> 16;
+
+ if ((unsigned long)vaddr & 3) {
+ signal = SIGBUS;
+ goto sig;
+ }
+ if (ll_bit == 0 || ll_task != current) {
+ regs->regs[reg] = 0;
+ compute_return_epc(regs);
return;
+ }
+
+ if (put_user(regs->regs[reg], vaddr)) {
+ signal = SIGSEGV;
+ goto sig;
+ }
+
+ regs->regs[reg] = 1;
+
+ compute_return_epc(regs);
+ return;
+
+sig:
+ force_sig(signal, current);
+}
+
+/*
+ * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both
+ * opcodes are supposed to result in coprocessor unusable exceptions if
+ * executed on ll/sc-less processors. That's the theory. In practice a
+ * few processors such as NEC's VR4100 throw reserved instruction exceptions
+ * instead, so we're doing the emulation thing in both exception handlers.
+ */
+static inline int simulate_llsc(struct pt_regs *regs)
+{
+ unsigned int opcode;
+
+ if (unlikely(get_insn_opcode(regs, &opcode)))
+ return -EFAULT;
+
+ if ((opcode & OPCODE) == LL) {
+ simulate_ll(regs, opcode);
+ return 0;
+ }
+ if ((opcode & OPCODE) == SC) {
+ simulate_sc(regs, opcode);
+ return 0;
+ }
+}
+
+asmlinkage void do_ov(struct pt_regs *regs)
+{
+ siginfo_t info;
info.si_code = FPE_INTOVF;
info.si_signo = SIGFPE;
@@ -444,36 +581,14 @@
/* If something went wrong, signal */
if (sig)
- {
- /*
- * Return EPC is not calculated in the FPU emulator,
- * if a signal is being send. So we calculate it here.
- */
- compute_return_epc(regs);
force_sig(sig, current);
- }
return;
}
- if (compute_return_epc(regs))
- return;
force_sig(SIGFPE, current);
}
-static inline int get_insn_opcode(struct pt_regs *regs, unsigned int *opcode)
-{
- unsigned long *epc;
-
- epc = (unsigned long *) regs->cp0_epc +
- ((regs->cp0_cause & CAUSEF_BD) != 0);
- if (!get_user(opcode, epc))
- return 0;
-
- force_sig(SIGSEGV, current);
- return 1;
-}
-
asmlinkage void do_bp(struct pt_regs *regs)
{
unsigned int opcode, bcode;
@@ -520,7 +635,7 @@
if (get_insn_opcode(regs, &opcode))
return;
- /* Immediate versions don't provide a code. */
+ /* Immediate versions don't provide a code. */
if (!(opcode & OPCODE))
tcode = ((opcode >> 6) & ((1 << 20) - 1));
@@ -551,8 +666,9 @@
{
die_if_kernel("Reserved instruction in kernel code", regs);
- if (compute_return_epc(regs))
- return;
+ if (!cpu_has_llsc)
+ if (!simulate_llsc(regs))
+ return;
force_sig(SIGILL, current);
}
@@ -560,69 +676,53 @@
asmlinkage void do_cpu(struct pt_regs *regs)
{
unsigned int cpid;
- void fpu_emulator_init_fpu(void);
- int sig;
+
+ die_if_kernel("do_cpu invoked from kernel context!", regs);
cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
- if (cpid != 1)
- goto bad_cid;
- if (!(mips_cpu.options & MIPS_CPU_FPU))
- goto fp_emul;
+ switch (cpid) {
+ case 0:
+ if (cpu_has_llsc)
+ break;
- regs->cp0_status |= ST0_CU1;
+ if (!simulate_llsc(regs))
+ return;
+ break;
+
+ case 1:
+ own_fpu();
+ if (current->used_math) { /* Using the FPU again. */
+ restore_fp(current);
+ } else { /* First time FPU user. */
+ init_fpu();
+ current->used_math = 1;
+ }
+
+ if (!cpu_has_fpu) {
+ int sig = fpu_emulator_cop1Handler(0, regs,
+ ¤t->thread.fpu.soft);
+ if (sig)
+ force_sig(sig, current);
+ }
-#ifdef CONFIG_SMP
- if (current->used_math) {
- lazy_fpu_switch(0, current);
- } else {
- init_fpu();
- current->used_math = 1;
- }
- current->flags |= PF_USEDFPU;
-#else
- if (last_task_used_math == current)
return;
- if (current->used_math) { /* Using the FPU again. */
- lazy_fpu_switch(last_task_used_math, current);
- } else { /* First time FPU user. */
- lazy_fpu_switch(last_task_used_math, 0);
- init_fpu();
- current->used_math = 1;
+ case 2:
+ case 3:
+ break;
}
- last_task_used_math = current;
-#endif
- return;
-fp_emul:
- if (last_task_used_math != current) {
- if (!current->used_math) {
- fpu_emulator_init_fpu();
- current->used_math = 1;
- }
- }
- sig = fpu_emulator_cop1Handler(0, regs, ¤t->thread.fpu.soft);
- last_task_used_math = current;
- if (sig) {
- /*
- * Return EPC is not calculated in the FPU emulator, if
- * a signal is being send. So we calculate it here.
- */
- compute_return_epc(regs);
- force_sig(sig, current);
- }
- return;
+ force_sig(SIGILL, current);
+}
-bad_cid:
- compute_return_epc(regs);
+asmlinkage void do_mdmx(struct pt_regs *regs)
+{
force_sig(SIGILL, current);
}
asmlinkage void do_watch(struct pt_regs *regs)
{
- extern void dump_tlb_all(void);
-
/*
* We use the watch exception where available to detect stack
* overflows.
@@ -652,26 +752,9 @@
* caused by a new unknown cpu type or after another deadly
* hard/software error.
*/
+ show_regs(regs);
panic("Caught reserved exception %ld - should not happen.",
- (regs->cp0_cause & 0x1f) >> 2);
-}
-
-static inline void watch_init(unsigned long cputype)
-{
- switch(cputype) {
- case CPU_R10000:
- case CPU_R4000MC:
- case CPU_R4400MC:
- case CPU_R4000SC:
- case CPU_R4400SC:
- case CPU_R4000PC:
- case CPU_R4400PC:
- case CPU_R4200:
- case CPU_R4300:
- set_except_vector(23, handle_watch);
- watch_available = 1;
- break;
- }
+ (regs->cp0_cause & 0x7f) >> 2);
}
unsigned long exception_handlers[32];
@@ -687,7 +770,7 @@
unsigned long old_handler = exception_handlers[n];
exception_handlers[n] = handler;
- if (n == 0 && mips_cpu.options & MIPS_CPU_DIVEC) {
+ if (n == 0 && cpu_has_divec) {
*(volatile u32 *)(KSEG0+0x200) = 0x08000000 |
(0x03ffffff & (handler >> 2));
flush_icache_range(KSEG0+0x200, KSEG0 + 0x204);
@@ -697,164 +780,149 @@
asmlinkage int (*save_fp_context)(struct sigcontext *sc);
asmlinkage int (*restore_fp_context)(struct sigcontext *sc);
+
+asmlinkage int (*save_fp_context32)(struct sigcontext32 *sc);
+asmlinkage int (*restore_fp_context32)(struct sigcontext32 *sc);
+
extern asmlinkage int _save_fp_context(struct sigcontext *sc);
extern asmlinkage int _restore_fp_context(struct sigcontext *sc);
+extern asmlinkage int _save_fp_context32(struct sigcontext32 *sc);
+extern asmlinkage int _restore_fp_context32(struct sigcontext32 *sc);
+
extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc);
extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc);
+extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 *sc);
+extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 *sc);
+
void __init per_cpu_trap_init(void)
{
unsigned int cpu = smp_processor_id();
/* Some firmware leaves the BEV flag set, clear it. */
- clear_cp0_status(ST0_CU1|ST0_CU2|ST0_CU3|ST0_BEV);
- set_cp0_status(ST0_CU0|ST0_FR|ST0_KX|ST0_SX|ST0_UX);
+ clear_c0_status(ST0_CU1|ST0_CU2|ST0_CU3|ST0_BEV);
+ set_c0_status(ST0_CU0|ST0_FR|ST0_KX|ST0_SX|ST0_UX);
+
+ if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV)
+ set_c0_status(ST0_XX);
/*
* Some MIPS CPUs have a dedicated interrupt vector which reduces the
* interrupt processing overhead. Use it where available.
*/
- if (mips_cpu.options & MIPS_CPU_DIVEC)
- set_cp0_cause(CAUSEF_IV);
+ if (cpu_has_divec)
+ set_c0_cause(CAUSEF_IV);
cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
- set_context(((long)(&pgd_current[cpu])) << 23);
- set_wired(0);
+ write_c0_context(((long)(&pgd_current[cpu])) << 23);
+ write_c0_wired(0);
+
+ atomic_inc(&init_mm.mm_count);
+ current->active_mm = &init_mm;
+ if (current->mm)
+ BUG();
+ enter_lazy_tlb(&init_mm, current, cpu);
}
void __init trap_init(void)
{
- extern char except_vec0;
- extern char except_vec1_r4k;
- extern char except_vec1_r10k;
- extern char except_vec2_generic;
+ extern char except_vec0_generic;
extern char except_vec3_generic, except_vec3_r4000;
extern char except_vec4;
unsigned long i;
- int dummy;
per_cpu_trap_init();
/* Copy the generic exception handlers to their final destination. */
- memcpy((void *)(KSEG0 + 0x100), &except_vec2_generic, 0x80);
+ memcpy((void *) KSEG0 , &except_vec0_generic, 0x80);
memcpy((void *)(KSEG0 + 0x180), &except_vec3_generic, 0x80);
/*
* Setup default vectors
*/
- for(i = 0; i <= 31; i++)
+ for (i = 0; i <= 31; i++)
set_except_vector(i, handle_reserved);
/*
* Only some CPUs have the watch exceptions or a dedicated
* interrupt vector.
*/
- watch_init(mips_cpu.cputype);
+ if (cpu_has_watch)
+ set_except_vector(23, handle_watch);
/*
* Some MIPS CPUs have a dedicated interrupt vector which reduces the
* interrupt processing overhead. Use it where available.
*/
- memcpy((void *)(KSEG0 + 0x200), &except_vec4, 8);
-
- if (mips_cpu.options & MIPS_CPU_MCHECK)
- set_except_vector(24, handle_mcheck);
+ if (cpu_has_divec)
+ memcpy((void *)(KSEG0 + 0x200), &except_vec4, 0x8);
/*
* The Data Bus Errors / Instruction Bus Errors are signaled
* by external hardware. Therefore these two exceptions
* may have board specific handlers.
*/
- bus_error_init();
+ if (board_be_init)
+ board_be_init();
- /*
- * Handling the following exceptions depends mostly of the cpu type
- */
- switch(mips_cpu.cputype) {
- case CPU_SB1:
-#ifdef CONFIG_SB1_CACHE_ERROR
- {
- /* Special cache error handler for SB1 */
- extern char except_vec2_sb1;
- memcpy((void *)(KSEG0 + 0x100), &except_vec2_sb1, 0x80);
- memcpy((void *)(KSEG1 + 0x100), &except_vec2_sb1, 0x80);
- }
-#endif
- /* Enable timer interrupt and scd mapped interrupt */
- clear_cp0_status(0xf000);
- set_cp0_status(0xc00);
-
- /* Fall through. */
- case CPU_R10000:
- case CPU_R4000MC:
- case CPU_R4400MC:
- case CPU_R4000SC:
- case CPU_R4400SC:
- case CPU_R4000PC:
- case CPU_R4400PC:
- case CPU_R4200:
- case CPU_R4300:
- case CPU_R4600:
- case CPU_R5000:
- case CPU_NEVADA:
- case CPU_5KC:
- case CPU_20KC:
- case CPU_RM7000:
- /* Debug TLB refill handler. */
- memcpy((void *)KSEG0, &except_vec0, 0x80);
- if ((mips_cpu.options & MIPS_CPU_4KEX)
- && (mips_cpu.options & MIPS_CPU_4KTLB)) {
- memcpy((void *)KSEG0 + 0x080, &except_vec1_r4k, 0x80);
- } else {
- memcpy((void *)KSEG0 + 0x080, &except_vec1_r10k, 0x80);
- }
- if (mips_cpu.options & MIPS_CPU_VCE) {
- memcpy((void *)(KSEG0 + 0x180), &except_vec3_r4000,
- 0x80);
- } else {
- memcpy((void *)(KSEG0 + 0x180), &except_vec3_generic,
- 0x80);
- }
+ set_except_vector(1, __xtlb_mod);
+ set_except_vector(2, __xtlb_tlbl);
+ set_except_vector(3, __xtlb_tlbs);
+ set_except_vector(4, handle_adel);
+ set_except_vector(5, handle_ades);
+
+ set_except_vector(6, handle_ibe);
+ set_except_vector(7, handle_dbe);
+
+ set_except_vector(8, handle_sys);
+ set_except_vector(9, handle_bp);
+ set_except_vector(10, handle_ri);
+ set_except_vector(11, handle_cpu);
+ set_except_vector(12, handle_ov);
+ set_except_vector(13, handle_tr);
+ set_except_vector(22, handle_mdmx);
- set_except_vector(1, __xtlb_mod);
- set_except_vector(2, __xtlb_tlbl);
- set_except_vector(3, __xtlb_tlbs);
- set_except_vector(4, handle_adel);
- set_except_vector(5, handle_ades);
-
- set_except_vector(6, handle_ibe);
- set_except_vector(7, handle_dbe);
-
- set_except_vector(8, handle_sys);
- set_except_vector(9, handle_bp);
- set_except_vector(10, handle_ri);
- set_except_vector(11, handle_cpu);
- set_except_vector(12, handle_ov);
- set_except_vector(13, handle_tr);
+ if (cpu_has_fpu && !cpu_has_nofpuex)
set_except_vector(15, handle_fpe);
- break;
- case CPU_R8000:
- panic("R8000 is unsupported");
- break;
+ if (cpu_has_mcheck)
+ set_except_vector(24, handle_mcheck);
- case CPU_UNKNOWN:
- default:
- panic("Unknown CPU type");
+ if (cpu_has_vce)
+ memcpy((void *)(KSEG0 + 0x180), &except_vec3_r4000, 0x100);
+ else if (cpu_has_4kex)
+ memcpy((void *)(KSEG0 + 0x180), &except_vec3_generic, 0x80);
+ else
+ memcpy((void *)(KSEG0 + 0x080), &except_vec3_generic, 0x80);
+
+ if (current_cpu_data.cputype == CPU_R6000 ||
+ current_cpu_data.cputype == CPU_R6000A) {
+ /*
+ * The R6000 is the only R-series CPU that features a machine
+ * check exception (similar to the R4000 cache error) and
+ * unaligned ldc1/sdc1 exception. The handlers have not been
+ * written yet. Well, anyway there is no R6000 machine on the
+ * current list of targets for Linux/MIPS.
+ * (Duh, crap, there is someone with a tripple R6k machine)
+ */
+ //set_except_vector(14, handle_mc);
+ //set_except_vector(15, handle_ndc);
}
- flush_icache_range(KSEG0, KSEG0 + 0x200);
- if (mips_cpu.options & MIPS_CPU_FPU) {
- save_fp_context = _save_fp_context;
+ if (cpu_has_fpu) {
+ save_fp_context = _save_fp_context;
restore_fp_context = _restore_fp_context;
+ save_fp_context32 = _save_fp_context32;
+ restore_fp_context32 = _restore_fp_context32;
} else {
save_fp_context = fpu_emulator_save_context;
restore_fp_context = fpu_emulator_restore_context;
+ save_fp_context32 = fpu_emulator_save_context32;
+ restore_fp_context32 = fpu_emulator_restore_context32;
}
- if (mips_cpu.isa_level == MIPS_CPU_ISA_IV)
- set_cp0_status(ST0_XX);
+ flush_icache_range(KSEG0, KSEG0 + 0x400);
atomic_inc(&init_mm.mm_count); /* XXX UP? */
current->active_mm = &init_mm;
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)