patch-2.1.48 linux/arch/ppc/kernel/head.S
Next file: linux/arch/ppc/kernel/irq.c
Previous file: linux/arch/ppc/kernel/checks.c
Back to the patch index
Back to the overall index
- Lines: 3061
- Date:
Thu Jul 31 13:09:17 1997
- Orig file:
v2.1.47/linux/arch/ppc/kernel/head.S
- Orig date:
Wed Dec 18 00:49:52 1996
diff -u --recursive --new-file v2.1.47/linux/arch/ppc/kernel/head.S linux/arch/ppc/kernel/head.S
@@ -1,1040 +1,1144 @@
+/*
+ * arch/ppc/kernel/head.S
+ *
+ * PowerPC version
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
+ * Adapted for Power Macintosh by Paul Mackerras.
+ * Low-level exception handlers and MMU support
+ * rewritten by Paul Mackerras.
+ * Copyright (C) 1996 Paul Mackerras.
+ *
+ * This file contains the low-level support and setup for the
+ * PowerPC platform, including trap and interrupt dispatch.
+ * Also included here is low-level thread/task switch support.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
#include "ppc_asm.tmpl"
#include "ppc_defs.h"
-#include <linux/errno.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
#include <linux/sys.h>
-#include <asm/ppc_machine.h>
-
-#define NEWMM 1
-#define SYNC() \
- isync; \
- sync
-
-#define STATS
-/*
- * Increment a [64 bit] statistic counter
- * Uses R2, R3
- */
-#define BUMP(ctr) \
- lis r2,ctr@h; \
- ori r2,r2,ctr@l; \
- lwz r3,4(r2); \
- addic r3,r3,1; \
- stw r3,4(r2); \
- lwz r3,0(r2); \
- addze r3,r3; \
- stw r3,0(r2)
-
-/* The same as 'BUMP' but running unmapped (TLB code) */
-#define BUMP_UNMAPPED(ctr) \
- mfspr r0,XER; \
- lis r2,ctr@h; \
- ori r2,r2,ctr@l; \
- lis r3,0xF000; \
- andc r2,r2,r3; \
- lwz r3,4(r2); \
- addic r3,r3,1; \
- stw r3,4(r2); \
- lwz r3,0(r2); \
- addze r3,r3; \
- mtspr XER,r0; \
- stw r3,0(r2)
-
-#define DO_RFI_TRACE_UNMAPPED(mark)
-#define DO_RFI_TRACE_MAPPED(mark)
-
-#define DEFAULT_TRAP(offset) \
- li r13,0; \
- ori r13,r13,HID0_ICE; \
- mtspr HID0,r13; \
- lis r13,0xFFF00000>>16; \
- ori r13,r13,offset; \
- mtlr r13; \
- blr
-#define TRACE_TRAP(offset)
+#include <linux/errno.h>
+#include <linux/config.h>
-#define DATA_CACHE_OFF() \
- mfspr r2,HID0; \
- li r3,0; \
- ori r3,r3,HID0_DCE; \
- andc r2,r2,r3; \
- mtspr HID0,r2;
-
-#define DATA_CACHE_ON() \
- mfspr r2,HID0; \
- ori r2,r2,HID0_DCE; \
- mtspr HID0,r2;
+#define SYNC() \
+ sync; \
+ isync
-/* This instruction is not implemented on the PPC 603 */
+/* This instruction is not implemented on the PPC 603 or 601 */
#define tlbia \
- li r4,64; \
- mtspr CTR,r4; \
- lis r4,0x9000; \
+ li r4,128; \
+ mtctr r4; \
+ lis r4,0xC000; \
0: tlbie r4; \
addi r4,r4,0x1000; \
bdnz 0b
-/* Validate kernel stack - check for overflow */
-/* all regs are considered scratch since the C function will stomp them */
-#define CHECK_STACK() \
- /*lis r3,current_set@ha; \
- lwz r3,current_set@l(r3); \
- bl _EXTERN(check_stack)*/
-#if 0
-#define _CHECK_STACK() \
- mtspr SPR0,r3; \
- mtspr SPR1,r4; /* use r3,4 as scratch */ \
- lis r2,current_set@ha; \
- lwz r2,current_set@l(r2); \
- lwz r2,KERNEL_STACK_PAGE(r2); \
- /* if kernel stack is sys_stack skip check */ \
- /*lis r3,sys_stack@h; \
- ori r3,r3,sys_stack@l; \
- cmpl 0,r1,r3;*/ \
- /* check for STACK_MAGIC on kernel stack page */ \
- lis r3, 0xdead; /* STACK_MAGIC */ \
- ori r3,r3,0xbeef; \
- lwz r4,0(r2); /* get *kernel_stack_page */ \
- cmpl 0,r4,r3; \
- bne 01f; \
- /* check that ksp is > kernel page */ \
- /*li r3,0x0FFF; \
- andc r2,r2,r3; \
- andc r3,r1,r3; \
- cmp 0,r3,r2; \
- beq 02f;*/ \
- /* check that ksp and kernel stack page are on same page */ \
- cmp 0,r1,r2; \
- bge 02f; \
-01: mr r6,r1; /* setup info for call to bad_stack() */ \
- mr r5,r2; \
- bl _EXTERN(bad_stack); \
-02: mfspr r4,SPR1; \
- mfspr r3,SPR0
-#endif
+#define TOPHYS(x) (x - KERNELBASE)
+
+
+/* this is a very kludgey way of loading up the BATs on the
+ prep system. I'll kill this horrible macro and write
+ something clean when I have a chance -- Cort
+ */
+#define LOAD_BATS(RA,RB) \
+ mfspr RA,PVR ; \
+ srwi r5,r5,16 ; \
+ cmpi 0,RA,1 ; \
+ beq 199f ; \
+ /* load bats for 60x */ ; \
+ lis RA,BAT0@h ; \
+ ori RA,RA,BAT0@l ; \
+ addis RA,RA,-KERNELBASE@h;\
+ lwz RB,0(RA) ; \
+ mtspr IBAT0U,RB ; \
+ mtspr DBAT0U,RB ; \
+ lwz RB,4(RA) ; \
+ mtspr IBAT0L,RB ; \
+ mtspr DBAT0L,RB ; \
+ lis RA,BAT1@h ; \
+ ori RA,RA,BAT1@l ; \
+ addis RA,RA,-KERNELBASE@h;\
+ lwz RB,0(RA) ; \
+ mtspr IBAT1U,RB ; \
+ mtspr DBAT1U,RB ; \
+ lwz RB,4(RA) ; \
+ mtspr IBAT1L,RB ; \
+ mtspr DBAT1L,RB ; \
+ lis RA,BAT2@h ; \
+ ori RA,RA,BAT2@l ; \
+ addis RA,RA,-KERNELBASE@h;\
+ lwz RB,0(RA) ; \
+ mtspr IBAT2U,RB ; \
+ mtspr DBAT2U,RB ; \
+ lwz RB,4(RA) ; \
+ mtspr IBAT2L,RB ; \
+ mtspr DBAT2L,RB ; \
+ lis RA,BAT3@h ; \
+ ori RA,RA,BAT3@l ; \
+ addis RA,RA,-KERNELBASE@h;\
+ lwz RB,0(RA) ; \
+ mtspr IBAT3U,RB ; \
+ mtspr DBAT3U,RB ; \
+ lwz RB,4(RA) ; \
+ mtspr IBAT3L,RB ; \
+ mtspr DBAT3L,RB ; \
+ b 200f ; \
+199: /*load bats for 601 */ ; \
+ lis RA,BAT0_601@h ; \
+ ori RA,RA,BAT0_601@l; \
+ addis RA,RA,-KERNELBASE@h;\
+ lwz RB,0(RA) ; \
+ mtspr IBAT0U,RB ; \
+ mtspr DBAT0U,RB ; \
+ lwz RB,4(RA) ; \
+ mtspr IBAT0L,RB ; \
+ mtspr DBAT0L,RB ; \
+ lis RA,BAT1_601@h ; \
+ ori RA,RA,BAT1_601@l; \
+ addis RA,RA,-KERNELBASE@h;\
+ lwz RB,0(RA) ; \
+ mtspr IBAT1U,RB ; \
+ mtspr DBAT1U,RB ; \
+ lwz RB,4(RA) ; \
+ mtspr IBAT1L,RB ; \
+ mtspr DBAT1L,RB ; \
+ lis RA,BAT2_601@h ; \
+ ori RA,RA,BAT2_601@l; \
+ addis RA,RA,-KERNELBASE@h;\
+ lwz RB,0(RA) ; \
+ mtspr IBAT2U,RB ; \
+ mtspr DBAT2U,RB ; \
+ lwz RB,4(RA) ; \
+ mtspr IBAT2L,RB ; \
+ mtspr DBAT2L,RB ; \
+ lis RA,BAT3_601@h ; \
+ ori RA,RA,BAT3_601@l; \
+ addis RA,RA,-KERNELBASE@h;\
+ lwz RB,0(RA) ; \
+ mtspr IBAT3U,RB ; \
+ mtspr DBAT3U,RB ; \
+ lwz RB,4(RA) ; \
+ mtspr IBAT3L,RB ; \
+ mtspr DBAT3L,RB ; \
+200:
-/* save fp regs if fp is used */
-/* assumes that r1 contains ptr to regs of task and r2 is scratch
- -- Cort */
-#define SAVE_FP_REGS() \
- /* check if fp has been used by checking msr_fp bit */ \
- lwz r2,_MSR(r1); \
- andi. r2,r2,MSR_FP; \
- bne 00f; \
- /* floating point has been used -- save fp regs */ \
- lis r2,current_set@h; \
- ori r2,r2,current_set@l; \
- addi r2,r2,TSS; \
- /*mr r2,r1;*/ \
- stfd fr0,TSS_FPR0(r2); \
- stfd fr1,TSS_FPR1(r2); \
- stfd fr2,TSS_FPR2(r2); \
- stfd fr3,TSS_FPR3(r2); \
- stfd fr4,TSS_FPR4(r2); \
- stfd fr5,TSS_FPR5(r2); \
- stfd fr6,TSS_FPR6(r2); \
- stfd fr7,TSS_FPR7(r2); \
- stfd fr8,TSS_FPR8(r2); \
- stfd fr9,TSS_FPR9(r2); \
- stfd fr10,TSS_FPR10(r2); \
- stfd fr11,TSS_FPR11(r2); \
- stfd fr12,TSS_FPR12(r2); \
- stfd fr13,TSS_FPR13(r2); \
- stfd fr14,TSS_FPR14(r2); \
- stfd fr15,TSS_FPR15(r2); \
- stfd fr16,TSS_FPR16(r2); \
- stfd fr17,TSS_FPR17(r2); \
- stfd fr18,TSS_FPR18(r2); \
- stfd fr19,TSS_FPR19(r2); \
- stfd fr20,TSS_FPR20(r2); \
- stfd fr21,TSS_FPR21(r2); \
- stfd fr22,TSS_FPR22(r2); \
- stfd fr23,TSS_FPR23(r2); \
- stfd fr24,TSS_FPR24(r2); \
- stfd fr25,TSS_FPR25(r2); \
- stfd fr26,TSS_FPR26(r2); \
- stfd fr27,TSS_FPR27(r2); \
- stfd fr28,TSS_FPR28(r2); \
- stfd fr29,TSS_FPR29(r2); \
- stfd fr30,TSS_FPR30(r2); \
- stfd fr31,TSS_FPR31(r2); \
-00:
-
-
-/* restores fp regs if fp has been used -- always restores fpscr */
-/* assumes that r1 contains ptr to regs, r2 is scratch and srr1 holds
- what will become the msr when this process executes -- Cort*/
-#define RESTORE_FP_REGS(mark) \
- /* check if restoring from _switch() */ \
- li r2, mark; \
- cmpi 0,r2,0x0f0f; \
- bne 00f; /* only need to save if called from _switch() with 0x0f0f */\
- /* check if fp has been used by checking msr_fp bit */ \
- /* srr1 contains msr */ \
- mfspr r2,SRR1; \
- andi. r2,r2,MSR_FP; \
- bne 00f; \
- /* floating point has been used -- restore fp regs */ \
- /* Hey, Rocky! Watch me pull fp regs from my stack! */ \
- lis r2,current_set@h; \
- ori r2,r2,current_set@l; \
- addi r2,r2,TSS; \
- /*mr r2,r1;*/\
- lfd fr0,TSS_FPR0(r2); \
- lfd fr1,TSS_FPR1(r2); \
- lfd fr2,TSS_FPR2(r2); \
- lfd fr3,TSS_FPR3(r2); \
- lfd fr4,TSS_FPR4(r2); \
- lfd fr5,TSS_FPR5(r2); \
- lfd fr6,TSS_FPR6(r2); \
- lfd fr7,TSS_FPR7(r2); \
- lfd fr8,TSS_FPR8(r2); \
- lfd fr9,TSS_FPR9(r2); \
- lfd fr10,TSS_FPR10(r2); \
- lfd fr11,TSS_FPR11(r2); \
- lfd fr12,TSS_FPR12(r2); \
- lfd fr13,TSS_FPR13(r2); \
- lfd fr14,TSS_FPR14(r2); \
- lfd fr15,TSS_FPR15(r2); \
- lfd fr16,TSS_FPR16(r2); \
- lfd fr17,TSS_FPR17(r2); \
- lfd fr18,TSS_FPR18(r2); \
- lfd fr19,TSS_FPR19(r2); \
- lfd fr20,TSS_FPR20(r2); \
- lfd fr21,TSS_FPR21(r2); \
- lfd fr22,TSS_FPR22(r2); \
- lfd fr23,TSS_FPR23(r2); \
- lfd fr24,TSS_FPR24(r2); \
- lfd fr25,TSS_FPR25(r2); \
- lfd fr26,TSS_FPR26(r2); \
- lfd fr27,TSS_FPR27(r2); \
- lfd fr28,TSS_FPR28(r2); \
- lfd fr29,TSS_FPR29(r2); \
- lfd fr30,TSS_FPR30(r2); \
- lfd fr31,TSS_FPR31(r2); \
-00:
-
-/* save all registers */
-#define SAVE_ALL_REGS(mark) \
- subi r1,r1,INT_FRAME_SIZE; /* Make room for frame */ \
- stmw r3,GPR3(r1); /* Save R3..R31 */ \
- stw r3,ORIG_GPR3(r1); \
- stw r0,GPR0(r1); \
- mfspr r2,SPR0; \
- stw r2,GPR1(r1); \
- mfspr r2,SPR1; \
- stw r2,GPR2(r1); \
- mfspr r2,SPR2; \
- stw r2,_NIP(r1); \
- mfspr r2,SPR3; \
- stw r2,_MSR(r1); \
- mfctr r2; \
- stw r2,_CTR(r1); \
- mflr r2; \
- stw r2,_LINK(r1); \
- mfcr r2; \
- stw r2,_CCR(r1); \
- mfspr r2,XER; \
- stw r2,_XER(r1); \
- mffs fr0; \
- stfd fr0,FPCSR(r1); \
- lis r2,_break_lwarx@h; \
- ori r2,r2,_break_lwarx@l; \
- stwcx. r2,0,r2; \
- li r2,mark; \
- stw r2,TRAP(r1); \
- lis r2,0xDEAD; \
- ori r2,r2,0xDEAD; \
- stw r2,MARKER(r1); \
- li r2,0; \
- stw r2,RESULT(r1)
-
-
-/* save registers clobbered by a page fault handler */
-#define SAVE_PAGE_FAULT_REGS(offset) \
- mfspr r2,DAR; \
- stw r2,_DAR(r1); \
- mfspr r2,DSISR; \
- stw r2,_DSISR(r1); \
- mfspr r2,PVR; /* Check for 603/603e */ \
- srwi r2,r2,16; \
- cmpi 0,r2,3; /* 603 */ \
- beq 22f; \
- cmpi 0,r2,6; /* 603e */ \
- bne 24f; \
-22: mfspr r2,HASH1; /* Note: these registers exist only on 603 */ \
- stw r2,_HASH1(r1); \
- mfspr r2,HASH2; \
- stw r2,_HASH2(r1); \
- mfspr r2,IMISS; \
- stw r2,_IMISS(r1); \
- mfspr r2,DMISS; \
- stw r2,_DMISS(r1); \
- mfspr r2,ICMP; \
- stw r2,_ICMP(r1); \
- mfspr r2,DCMP; \
- stw r2,_DCMP(r1); \
-24:
-
-#define SAVE_INT_REGS(mark) \
- mtspr SPR0,r1; /* Save current stack pointer */ \
- mtspr SPR1,r2; /* Scratch */ \
- mfcr r2; \
- mtspr SPR2,r2; \
- mfspr r2,SRR1; /* Interrupt from user/system mode */ \
- andi. r2,r2,MSR_PR; \
- beq+ 10f; /* Jump if system - already have stack */ \
- mfspr r2,SPR2; /* Restore CCR */ \
- mtcrf 0xFF,r2; \
- mfspr r2,SRR0; /* Preserve interrupt registers */ \
- mtspr SPR2,r2; \
- mfspr r2,SRR1; \
- mtspr SPR3,r2; \
- lis r2,05f@h; \
- ori r2,r2,05f@l; \
- mtspr SRR0,r2; \
- mfmsr r2; \
- ori r2,r2,MSR_|MSR_DR|MSR_IR; \
- mtspr SRR1,r2; \
- rfi; \
-05: lis r2,current_set@ha; \
- lwz r2,current_set@l(r2); \
- mfspr r1,SPR2; \
- stw r1,TSS+LAST_PC(r2); \
- mfspr r1,SPR0; \
- stw r1,TSS+USER_STACK(r2); \
- lwz r1,TSS+KSP(r2); \
- subi r1,r1,INT_FRAME_SIZE; /* Make room for frame */ \
- stw r1,TSS+PT_REGS(r2); /* Save regs pointer for 'ptrace' */ \
- lwz r1,TSS+KSP(r2); \
- b 20f; \
-10: mfspr r2,SPR2; /* Restore CCR */ \
- mtcrf 0xFF,r2; \
- mfspr r2,SRR0; /* Preserve interrupt registers */ \
- mtspr SPR2,r2; \
- mfspr r2,SRR1; \
- mtspr SPR3,r2; \
- lis r2,20f@h; \
- ori r2,r2,20f@l; \
- mtspr SRR0,r2; \
- mfmsr r2; \
- ori r2,r2,MSR_|MSR_DR|MSR_IR; \
- mtspr SRR1,r2; \
- SYNC(); \
- rfi; \
-20: SAVE_ALL_REGS(mark); \
- CHECK_STACK()
-
-#define RETURN_FROM_INT(mark) \
-90: mfmsr r0; /* Disable interrupts */ \
- li r4,0; \
- ori r4,r4,MSR_EE; \
- andc r0,r0,r4; \
- sync; /* Some chip revs need this... */ \
- mtmsr r0; \
- lis r2,intr_count@ha; /* Need to run 'bottom half' */ \
- lwz r3,intr_count@l(r2); \
- cmpi 0,r3,0; \
- bne 00f; \
- lis r4,bh_mask@ha; \
- lwz r4,bh_mask@l(r4); \
- lis r5,bh_active@ha; \
- lwz r5,bh_active@l(r5); \
- and. r4,r4,r5; \
- beq 00f; \
- addi r3,r3,1; \
- stw r3,intr_count@l(r2); \
- bl _EXTERN(_do_bottom_half); \
- lis r2,intr_count@ha; \
- lwz r3,intr_count@l(r2); \
- subi r3,r3,1; \
- stw r3,intr_count@l(r2); \
-00: lwz r2,_MSR(r1); /* Returning to user mode? */ \
- andi. r2,r2,MSR_PR; \
- beq+ 10f; /* no - no need to mess with stack */ \
-/* lis r2,kernel_pages_are_copyback@ha; \
- lwz r2,kernel_pages_are_copyback@l(r2); \
- cmpi 0,r2,0; \
- beq 05f; \
- bl _EXTERN(flush_instruction_cache); */ \
-05: lis r3,current_set@ha; /* need to save kernel stack pointer */ \
- lwz r3,current_set@l(r3); \
- /*addi r4,r1,INT_FRAME_SIZE*/; /* size of frame */ \
- lwz r4, KERNEL_STACK_PAGE(r3); \
- addi r4,r4,KERNEL_STACK_SIZE; /* reset stack pointer to top of stack page */ \
- /* stack isn't 0'd so show_task():sched.c shows highwater of stack */ \
- stw r4,TSS+KSP(r3); \
- lwz r4,STATE(r3); /* If state != 0, can't run */ \
- cmpi 0,r4,0; \
- beq 06f; \
- bl _EXTERN(schedule); \
- b 90b; \
-06: lwz r4,COUNTER(r3); /* Time quantum expired? */ \
- cmpi 0,r4,0; \
- bne 07f; \
- bl _EXTERN(schedule); \
- b 90b; \
-07: lwz r4,BLOCKED(r3); /* Check for pending unblocked signals */ \
- lwz r5,SIGNAL(r3); \
- andc. r0,r5,r4; /* Lets thru any unblocked */ \
- beq 10f; \
- mr r3,r4; \
- mr r4,r1; \
- bl _EXTERN(do_signal); \
-10: lwz r2,_NIP(r1); /* Restore environment */ \
- mtspr SRR0,r2; \
- lwz r2,_MSR(r1); \
- mtspr SRR1,r2; \
- lmw r3,GPR3(r1); \
- lwz r2,_CTR(r1); \
- mtctr r2; \
- lwz r2,_LINK(r1); \
- mtlr r2; \
- lwz r2,_XER(r1); \
- mtspr XER,r2; \
- lfd fr0,FPCSR(r1); \
- mtfsf 0xFF,fr0; \
- RESTORE_FP_REGS(mark) ; \
- lwz r2,_CCR(r1); \
- mtcrf 0xFF,r2; \
- lwz r0,GPR0(r1); \
- lwz r2,GPR2(r1); \
- lwz r1,GPR1(r1); \
- SYNC(); \
- rfi
+
-_TEXT()
-/*
- * This code may be executed by a bootstrap process. If so, the
- * purpose is to relocate the loaded image to it's final location
- * in memory.
- * R3: End of image
- * R4: Start of image - 0x400
- * R11: Start of command line string
- * R12: End of command line string
- * R30: 'BeBx' if this is a BeBox
- *
- */
- .globl _start
+ .text
.globl _stext
_stext:
-_start:
- addi r4,r4,0x400 /* Point at start of image */
- li r5,0 /* Load address */
- subi r4,r4,4 /* Adjust for auto-increment */
- subi r5,r5,4
- subi r3,r3,4
-00: lwzu r0,4(r4) /* Fast move */
- stwu r0,4(r5)
- cmp 0,r3,r4
- bne 00b
- li r5,0x100 /* Actual code starts here */
- mtlr r5
- blr
-hang:
- ori r0,r0,0
- b hang
+#ifdef CONFIG_PREP
+ . = 0x100
+_GLOBAL(HardReset)
+ b _start
-/*
- * BeBox CPU #1 vector & code
- */
-_ORG(0x0080)
- .globl BeBox_CPU1_vector
-BeBox_CPU1_vector:
- .long 0
-BeBox_CPU1_reset:
- li r1,BeBox_CPU1_vector@l
- li r2,0
- stw r2,0(r1)
-00: lwz r2,0(r1)
- cmpi 0,r2,0
- bne 10f
- li r2,10000
- mtctr r2
-02: nop
- bdnz 02b
- b 00b
-10: mtlr r1
- blr
-
-_ORG(0x0100)
+#endif /* CONFIG_PREP */
-/* Hard Reset */
- .globl HardReset
-HardReset:
- b Reset
+#ifdef CONFIG_PMAC
+/*
+ * _start is defined this way because the XCOFF loader in the OpenFirmware
+ * on the powermac expects the entry point to be a procedure descriptor.
+ */
+ .text
+ .globl _start
+_start:
+ .long TOPHYS(__start),0,0
-_ORG(0x0200)
- b MachineCheck
+/*
+ * Enter here with the kernel text, data and bss loaded starting at
+ * 0, running with virtual == physical mapping.
+ * r5 points to the prom entry point (the client interface handler
+ * address). Address translation is turned on, with the prom
+ * managing the hash table. Interrupts are disabled. The stack
+ * pointer (r1) points to just below the end of the half-meg region
+ * from 0x380000 - 0x400000, which is mapped in already.
+ */
+ .globl __start
+__start:
+
+/*
+ * Use the first pair of BAT registers to map the 1st 8MB
+ * of RAM to KERNELBASE.
+ */
+ mfspr r9,PVR
+ rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
+ cmpi 0,r9,1
+ lis r7,KERNELBASE@h
+ bne 4f
+ ori r7,r7,4 /* set up BAT registers for 601 */
+ li r8,0x7f
+ b 5f
+4: ori r7,r7,0xff /* set up BAT registers for 604 */
+ li r8,2
+ mtspr DBAT0U,r7
+ mtspr DBAT0L,r8
+5: mtspr IBAT0U,r7
+ mtspr IBAT0L,r8
+ isync
+
+/*
+ * Now we have the 1st 8M of RAM mapped at KERNELBASE, so we can
+ * refer to addresses of data items, procedures, etc. normally.
+ */
+ lis r7,start_here@ha /* jump up to our copy at KERNELBASE */
+ addi r7,r7,start_here@l
+ mtlr r7
+ blr
+#endif /* CONFIG_PMAC */
-_ORG(0x0300)
- b DataAccess
-_ORG(0x0400)
- b InstructionAccess
-_ORG(0x0500)
- b HardwareInterrupt
-_ORG(0x0600)
- b Alignment
+/*
+ * Macros for storing registers into and loading registers from
+ * exception frames.
+ */
+#define SAVE_GPR(n, base) stw n,GPR0+4*(n)(base)
+#define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base)
+#define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base)
+#define SAVE_8GPRS(n, base) SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base)
+#define SAVE_10GPRS(n, base) SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base)
+#define REST_GPR(n, base) lwz n,GPR0+4*(n)(base)
+#define REST_2GPRS(n, base) REST_GPR(n, base); REST_GPR(n+1, base)
+#define REST_4GPRS(n, base) REST_2GPRS(n, base); REST_2GPRS(n+2, base)
+#define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base)
+#define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base)
+
+#define SAVE_FPR(n, base) stfd n,TSS_FPR0+8*(n)(base)
+#define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base)
+#define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
+#define SAVE_8FPRS(n, base) SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base)
+#define SAVE_16FPRS(n, base) SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base)
+#define SAVE_32FPRS(n, base) SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base)
+#define REST_FPR(n, base) lfd n,TSS_FPR0+8*(n)(base)
+#define REST_2FPRS(n, base) REST_FPR(n, base); REST_FPR(n+1, base)
+#define REST_4FPRS(n, base) REST_2FPRS(n, base); REST_2FPRS(n+2, base)
+#define REST_8FPRS(n, base) REST_4FPRS(n, base); REST_4FPRS(n+4, base)
+#define REST_16FPRS(n, base) REST_8FPRS(n, base); REST_8FPRS(n+8, base)
+#define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base)
+
+/*
+ * Exception entry code. This code runs with address translation
+ * turned off, i.e. using physical addresses.
+ * We assume sprg3 has the physical address of the current
+ * task's thread_struct.
+ */
+#define EXCEPTION_PROLOG \
+0: mtspr SPRG0,r20; \
+ mtspr SPRG1,r21; \
+ mfcr r20; \
+ mfspr r21,SRR1; /* test whether from user or kernel */\
+ andi. r21,r21,MSR_PR; \
+ mr r21,r1; /* from kernel - use current sp */\
+ beq 1f; \
+ mfspr r21,SPRG3; /* from user - load kernel sp */\
+ lwz r21,KSP(r21); \
+1: addis r21,r21,-KERNELBASE@h; /* convert sp to physical */ \
+ subi r21,r21,INT_FRAME_SIZE+STACK_UNDERHEAD; /* alloc exc. frame */\
+ stw r1,GPR1(r21); \
+ stw r1,0(r21); \
+ addis r1,r21,KERNELBASE@h; /* set new kernel sp */ \
+ stw r20,_CCR(r21); /* save registers */ \
+ stw r22,GPR22(r21); \
+ stw r23,GPR23(r21); \
+ mfspr r20,SPRG0; \
+ stw r20,GPR20(r21); \
+ mfspr r22,SPRG1; \
+ stw r22,GPR21(r21); \
+ mflr r20; \
+ stw r20,_LINK(r21); \
+ mfctr r22; \
+ stw r22,_CTR(r21); \
+ mfspr r20,XER; \
+ stw r20,_XER(r21); \
+ mfspr r22,SRR0; \
+ mfspr r23,SRR1; /* we can now take exceptions */\
+ stw r0,GPR0(r21); \
+ stw r2,GPR2(r21); \
+ SAVE_4GPRS(3, r21);
+/*
+ * Note: code which follows this uses cr0.eq (set if from kernel),
+ * r21, r22 (SRR0), and r23 (SRR1).
+ */
+
+/*
+ * Exception vectors.
+ */
+#define STD_EXCEPTION(n, label, hdlr) \
+ . = n; \
+label: \
+ EXCEPTION_PROLOG; \
+ addi r3,r1,STACK_FRAME_OVERHEAD; \
+ li r20,MSR_KERNEL; \
+ bl transfer_to_handler; \
+ .long hdlr; \
+ .long int_return
+
+#ifndef CONFIG_PREP
+/* System reset */
+ STD_EXCEPTION(0x100, Reset, UnknownException)
+#endif /* ndef CONFIG_PREP */
-_ORG(0x0700)
- b ProgramCheck
+/* Machine check */
+ STD_EXCEPTION(0x200, MachineCheck, MachineCheckException)
-_ORG(0x0800)
- b FloatingPointCheck
+/* Data access exception */
+ . = 0x300
+DataAccess:
+ EXCEPTION_PROLOG
+ mfspr r20,DSISR
+ andis. r0,r20,0x8470 /* weird error? */
+ bne 1f /* if not, try to put a PTE */
+ mfspr r3,DAR /* into the hash table */
+ rlwinm r4,r23,32-13,30,30 /* MSR_PR -> _PAGE_USER */
+ rlwimi r4,r20,32-23,29,29 /* DSISR_STORE -> _PAGE_RW */
+ mfspr r5,SPRG3 /* phys addr of TSS */
+ bl hash_page
+1: stw r20,_DSISR(r21)
+ mr r5,r20
+ mfspr r4,DAR
+ stw r4,_DAR(r21)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ li r20,MSR_KERNEL
+ rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
+ bl transfer_to_handler
+ .long do_page_fault
+ .long int_return
+
+/* Instruction access exception */
+ . = 0x400
+InstructionAccess:
+ EXCEPTION_PROLOG
+ andis. r0,r23,0x4000 /* no pte found? */
+ beq 1f /* if so, try to put a PTE */
+ mr r3,r22 /* into the hash table */
+ rlwinm r4,r23,32-13,30,30 /* MSR_PR -> _PAGE_USER */
+ mr r20,r23 /* SRR1 has reason bits */
+ mfspr r5,SPRG3 /* phys addr of TSS */
+ bl hash_page
+1: addi r3,r1,STACK_FRAME_OVERHEAD
+ mr r4,r22
+ mr r5,r23
+ li r20,MSR_KERNEL
+ rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
+ bl transfer_to_handler
+ .long do_page_fault
+ .long int_return
+
+/* External interrupt */
+ STD_EXCEPTION(0x500, HardwareInterrupt, handle_IRQ)
+
+/* Alignment exception */
+ . = 0x600
+Alignment:
+ EXCEPTION_PROLOG
+ mfspr r4,DAR
+ stw r4,_DAR(r21)
+ mfspr r5,DSISR
+ stw r5,_DSISR(r21)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ li r20,MSR_KERNEL
+ rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
+ bl transfer_to_handler
+ .long AlignmentException
+ .long int_return
-/* Decrementer register - ignored for now... */
+/* Program check exception */
+ . = 0x700
+ProgramCheck:
+ EXCEPTION_PROLOG
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ li r20,MSR_KERNEL
+ rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
+ bl transfer_to_handler
+ .long ProgramCheckException
+ .long int_return
+
+/* Floating-point unavailable */
+ . = 0x800
+FPUnavailable:
+ EXCEPTION_PROLOG
+ bne load_up_fpu /* if from user, just load it up */
+ li r20,MSR_KERNEL
+ bl transfer_to_handler /* if from kernel, take a trap */
+ .long KernelFP
+ .long int_return
+
+/* Decrementer */
+#ifdef CONFIG_PREP
+/* - ignored for now... */
_ORG(0x0900)
-/* TRACE_TRAP(0x900) */
- mtspr SPR0,r1
+ mtspr SPRG0,r1
lis r1,0x7FFF
ori r1,r1,0xFFFF
mtspr DEC,r1
- mfspr r1,SPR0
-#if 0
- SYNC
-#endif
+ mfspr r1,SPRG0
rfi
-
-_ORG(0x0A00)
-DEFAULT_TRAP(0x0A00)
-_ORG(0x0B00)
-DEFAULT_TRAP(0x0B00)
+#endif /* CONFIG_PREP */
+#ifdef CONFIG_PMAC
+ STD_EXCEPTION(0x900, Decrementer, timer_interrupt)
+#endif /* CONFIG_PMAC */
-/*
- * System call
- */
-_ORG(0x0C00)
- b SystemCall
+ STD_EXCEPTION(0xa00, Trap_0a, UnknownException)
+ STD_EXCEPTION(0xb00, Trap_0b, UnknownException)
-_ORG(0x0D00)
- b SingleStep
+/* System call */
+ . = 0xc00
+SystemCall:
+ EXCEPTION_PROLOG
+ stw r3,ORIG_GPR3(r21)
+ li r20,MSR_KERNEL
+ rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
+ bl transfer_to_handler
+ .long DoSyscall
+ .long int_return
+
+/* Single step - not used on 601 */
+ STD_EXCEPTION(0xd00, SingleStep, SingleStepException)
-_ORG(0x0E00)
-DEFAULT_TRAP(0x0E00)
-_ORG(0x0F00)
-DEFAULT_TRAP(0x0F00)
+ STD_EXCEPTION(0xe00, Trap_0e, UnknownException)
+ STD_EXCEPTION(0xf00, Trap_0f, UnknownException)
/*
- * Handle TLB Miss on an instruction load
+ * Handle TLB miss for instruction on 603/603e.
+ * Note: we get an alternate set of r0 - r3 to use automatically.
*/
-_ORG(0x1000)
-/* Note: It is *unsafe* to use the TRACE TRAP macro here since there */
-/* could be a 'trace' in progress when the TLB miss occurs. */
-/* TRACE_TRAP(0x1000) */
- b InstructionTLBMiss
+ . = 0x1000
+InstructionTLBMiss:
+ mfctr r0 /* Need to save this - CTR can't be touched! */
+ mfspr r2,HASH1 /* Get PTE pointer */
+ mfspr r3,ICMP /* Partial item compare value */
+00: li r1,8 /* 8 items / bucket */
+ mtctr r1
+ subi r2,r2,8 /* Preset pointer */
+10: lwzu r1,8(r2) /* Get next PTE */
+ cmp 0,r1,r3 /* Found entry yet? */
+ bdnzf 2,10b /* Jump back if not, until CTR==0 */
+ bne 30f /* Try secondary hash if CTR==0 */
+ lwz r1,4(r2) /* Get second word of entry */
+20: mtctr r0 /* Restore CTR */
+ mfspr r3,SRR1 /* Need to restore CR0 */
+ mtcrf 0x80,r3
+ mfspr r0,IMISS /* Set to update TLB */
+ mtspr RPA,r1
+ tlbli r0
+ rfi /* All done */
+/* Secondary hash */
+30: andi. r1,r3,0x40 /* Already doing secondary hash? */
+ bne InstructionAddressInvalid /* Yes - item not in hash table */
+ mfspr r2,HASH2 /* Get hash table pointer */
+ ori r3,r3,0x40 /* Set secondary hash */
+ b 00b /* Try lookup again */
+InstructionAddressInvalid:
+ mfspr r3,SRR1
+ rlwinm r1,r3,9,6,6 /* Get load/store bit */
+ addis r1,r1,0x4000 /* Set bit 1 -> PTE not found */
+ mtspr DSISR,r1
+ mtctr r0 /* Restore CTR */
+ andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
+ or r2,r2,r1
+ mtspr SRR1,r2
+ mfspr r1,IMISS /* Get failing address */
+ rlwinm. r2,r2,0,31,31 /* Check for little endian access */
+ beq 20f /* Jump if big endian */
+ xori r1,r1,3
+20: mtspr DAR,r1 /* Set fault address */
+ mfmsr r0 /* Restore "normal" registers */
+ xoris r0,r0,MSR_TGPR>>16
+ mtcrf 0x80,r3 /* Restore CR0 */
+ sync /* Some chip revs have problems here... */
+ mtmsr r0
+ b InstructionAccess
/*
- * Handle TLB Miss on a data item load
+ * Handle TLB miss for DATA Load operation on 603/603e
*/
-_ORG(0x1100)
-/* TRACE_TRAP(0x1100) */
- b DataLoadTLBMiss
+ . = 0x1100
+DataLoadTLBMiss:
+ mfctr r0 /* Need to save this - CTR can't be touched! */
+ mfspr r2,HASH1 /* Get PTE pointer */
+ mfspr r3,DCMP /* Partial item compare value */
+00: li r1,8 /* 8 items / bucket */
+ mtctr r1
+ subi r2,r2,8 /* Preset pointer */
+10: lwzu r1,8(r2) /* Get next PTE */
+ cmp 0,r1,r3 /* Found entry yet? */
+ bdnzf 2,10b /* Jump back if not, until CTR==0 */
+ bne 30f /* Try secondary hash if CTR==0 */
+ lwz r1,4(r2) /* Get second word of entry */
+20: mtctr r0 /* Restore CTR */
+ mfspr r3,SRR1 /* Need to restore CR0 */
+ mtcrf 0x80,r3
+ mfspr r0,DMISS /* Set to update TLB */
+ mtspr RPA,r1
+ tlbld r0
+ rfi /* All done */
+/* Secondary hash */
+30: andi. r1,r3,0x40 /* Already doing secondary hash? */
+ bne DataAddressInvalid /* Yes - item not in hash table */
+ mfspr r2,HASH2 /* Get hash table pointer */
+ ori r3,r3,0x40 /* Set secondary hash */
+ b 00b /* Try lookup again */
+DataAddressInvalid:
+ mfspr r3,SRR1
+ rlwinm r1,r3,9,6,6 /* Get load/store bit */
+ addis r1,r1,0x4000 /* Set bit 1 -> PTE not found */
+ mtspr DSISR,r1
+ mtctr r0 /* Restore CTR */
+ andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
+ mtspr SRR1,r2
+ mfspr r1,DMISS /* Get failing address */
+ rlwinm. r2,r2,0,31,31 /* Check for little endian access */
+ beq 20f /* Jump if big endian */
+ xori r1,r1,3
+20: mtspr DAR,r1 /* Set fault address */
+ mfmsr r0 /* Restore "normal" registers */
+ xoris r0,r0,MSR_TGPR>>16
+ mtcrf 0x80,r3 /* Restore CR0 */
+ sync /* Some chip revs have problems here... */
+ mtmsr r0
+ b DataAccess
/*
- * Handle TLB Miss on a store operation
+ * Handle TLB miss for DATA Store on 603/603e
*/
-_ORG(0x1200)
-/* TRACE_TRAP(0x1200) */
- b DataStoreTLBMiss
+ . = 0x1200
+DataStoreTLBMiss:
+ mfctr r0 /* Need to save this - CTR can't be touched! */
+ mfspr r2,HASH1 /* Get PTE pointer */
+ mfspr r3,DCMP /* Partial item compare value */
+00: li r1,8 /* 8 items / bucket */
+ mtctr r1
+ subi r2,r2,8 /* Preset pointer */
+10: lwzu r1,8(r2) /* Get next PTE */
+ cmp 0,r1,r3 /* Found entry yet? */
+ bdnzf 2,10b /* Jump back if not, until CTR==0 */
+ bne 30f /* Try secondary hash if CTR==0 */
+ lwz r1,4(r2) /* Get second word of entry */
+20: mtctr r0 /* Restore CTR */
+ mfspr r3,SRR1 /* Need to restore CR0 */
+ mtcrf 0x80,r3
+ mfspr r0,DMISS /* Set to update TLB */
+ mtspr RPA,r1
+ tlbld r0
+ rfi /* All done */
+/* Secondary hash */
+30: andi. r1,r3,0x40 /* Already doing secondary hash? */
+ bne DataAddressInvalid /* Yes - item not in hash table */
+ mfspr r2,HASH2 /* Get hash table pointer */
+ ori r3,r3,0x40 /* Set secondary hash */
+ b 00b /* Try lookup again */
-_ORG(0x1300)
-InstructionAddressBreakpoint:
- DEFAULT_TRAP(0x1300)
+/* Instruction address breakpoint exception (on 603/604) */
+ STD_EXCEPTION(0x1300, Trap_13, InstructionBreakpoint)
-_ORG(0x1400)
-SystemManagementInterrupt:
- DEFAULT_TRAP(0x1400)
+/* System management exception (603?) */
+ STD_EXCEPTION(0x1400, Trap_14, UnknownException)
-_ORG(0x1500)
+ STD_EXCEPTION(0x1500, Trap_15, UnknownException)
+ STD_EXCEPTION(0x1600, Trap_16, UnknownException)
+ STD_EXCEPTION(0x1700, Trap_17, UnknownException)
+ STD_EXCEPTION(0x1800, Trap_18, UnknownException)
+ STD_EXCEPTION(0x1900, Trap_19, UnknownException)
+ STD_EXCEPTION(0x1a00, Trap_1a, UnknownException)
+ STD_EXCEPTION(0x1b00, Trap_1b, UnknownException)
+ STD_EXCEPTION(0x1c00, Trap_1c, UnknownException)
+ STD_EXCEPTION(0x1d00, Trap_1d, UnknownException)
+ STD_EXCEPTION(0x1e00, Trap_1e, UnknownException)
+ STD_EXCEPTION(0x1f00, Trap_1f, UnknownException)
+
+/* Run mode exception */
+ STD_EXCEPTION(0x2000, RunMode, RunModeException)
+
+ STD_EXCEPTION(0x2100, Trap_21, UnknownException)
+ STD_EXCEPTION(0x2200, Trap_22, UnknownException)
+ STD_EXCEPTION(0x2300, Trap_23, UnknownException)
+ STD_EXCEPTION(0x2400, Trap_24, UnknownException)
+ STD_EXCEPTION(0x2500, Trap_25, UnknownException)
+ STD_EXCEPTION(0x2600, Trap_26, UnknownException)
+ STD_EXCEPTION(0x2700, Trap_27, UnknownException)
+ STD_EXCEPTION(0x2800, Trap_28, UnknownException)
+ STD_EXCEPTION(0x2900, Trap_29, UnknownException)
+ STD_EXCEPTION(0x2a00, Trap_2a, UnknownException)
+ STD_EXCEPTION(0x2b00, Trap_2b, UnknownException)
+ STD_EXCEPTION(0x2c00, Trap_2c, UnknownException)
+ STD_EXCEPTION(0x2d00, Trap_2d, UnknownException)
+ STD_EXCEPTION(0x2e00, Trap_2e, UnknownException)
+ STD_EXCEPTION(0x2f00, Trap_2f, UnknownException)
+
+ . = 0x3000
+
+/*
+ * This code finishes saving the registers to the exception frame
+ * and jumps to the appropriate handler for the exception, turning
+ * on address translation.
+ */
+ .globl transfer_to_handler
+transfer_to_handler:
+ stw r22,_NIP(r21)
+ stw r23,_MSR(r21)
+ SAVE_GPR(7, r21)
+ SAVE_4GPRS(8, r21)
+ SAVE_8GPRS(12, r21)
+ SAVE_8GPRS(24, r21)
+ andi. r23,r23,MSR_PR
+ mfspr r23,SPRG3 /* if from user, fix up tss */
+ beq 2f
+#ifdef CONFIG_PMAC
+ lwz r24,GPR1(r21)
+ stw r22,LAST_PC(r23)
+ stw r24,USER_STACK(r23)
+#endif /* CONFIG_PMAC */
+ addi r24,r1,STACK_FRAME_OVERHEAD
+ stw r24,PT_REGS(r23)
+2: addi r2,r23,-TSS /* set r2 to current */
+ addis r2,r2,KERNELBASE@h
+ mflr r23
+ andi. r24,r23,0x3f00 /* get vector offset */
+ stw r24,TRAP(r21)
+ li r22,0
+ stw r22,RESULT(r21)
+ lwz r24,0(r23) /* virtual address of handler */
+ lwz r23,4(r23) /* where to go when done */
+ mtspr SRR0,r24
+ mtspr SRR1,r20
+ mtlr r23
+ SYNC
+ rfi /* jump to handler, enable MMU */
/*
- * This space [buffer] is used to forceably flush the data cache when
- * running in copyback mode. This is necessary IFF the data cache could
- * contain instructions for which the instruction cache has stale data.
- * Since the instruction cache NEVER snoops the data cache, memory must
- * be made coherent with the data cache to insure that the instruction
- * cache gets a valid instruction stream. Note that this flushing is
- * only performed when switching from system to user mode since this is
- * the only juncture [as far as the OS goes] where the data cache may
- * contain instructions, e.g. after a disk read.
+ * Continuation of the floating-point unavailable handler.
*/
-#define NUM_CACHE_LINES 128*4
-#define CACHE_LINE_SIZE 32
-cache_flush_buffer:
- .space NUM_CACHE_LINES*CACHE_LINE_SIZE /* CAUTION! these need to match hardware */
+load_up_fpu:
+ bl giveup_fpu_unmapped
+ ori r23,r23,MSR_FP /* enable use of FP after return */
+ mfspr r5,SPRG3 /* current task's TSS (phys) */
+ lfd fr0,TSS_FPSCR-4(r5)
+ mtfsf 0xff,fr0
+ REST_32FPRS(0, r5)
+
+/* use last_task_used_math instead of fpu_tss */
+ lis r3,last_task_used_math@h/*a*/
+ addis r3,r3,-KERNELBASE@h
+ subi r4,r5,TSS
+ addis r4,r4,KERNELBASE@h
+ stw r4,last_task_used_math@l(r3)
+#if 0
+ lis r3,fpu_tss@ha
+ addis r4,r5,KERNELBASE@h
+ addis r3,r3,-KERNELBASE@h
+ stw r4,fpu_tss@l(r3)
+#endif
+ /* restore registers and return */
+ lwz r3,_CCR(r21)
+ lwz r4,_LINK(r21)
+ mtcrf 0xff,r3
+ mtlr r4
+ REST_GPR(1, r21)
+ REST_4GPRS(3, r21)
+ /* we haven't used ctr or xer */
+ mtspr SRR1,r23
+ mtspr SRR0,r22
+ REST_GPR(20, r21)
+ REST_2GPRS(22, r21)
+ lwz r21,GPR21(r21)
+ SYNC
+ rfi
-#if NUM_CACHE_LINES < 512
-_ORG(0x4000)
-#endif
+/*
+ * Load a PTE into the hash table, if possible.
+ * The address is in r3, and r4 contains access flags:
+ * _PAGE_USER (4) if a user-mode access, ored with
+ * _PAGE_RW (2) if a write. r20 contains DSISR or SRR1,
+ * so bit 1 (0x40000000) is set if the exception was due
+ * to no matching PTE being found in the hash table.
+ * r5 contains the physical address of the current task's tss.
+ *
+ * Returns to the caller if the access is illegal or there is no
+ * mapping for the address. Otherwise it places an appropriate PTE
+ * in the hash table and returns from the exception.
+ * Uses r0, r2 - r6, ctr, lr.
+ *
+ * For speed, 4 of the instructions get patched once the size and
+ * physical address of the hash table are known. These definitions
+ * of Hash_base and Hash_bits below are just an example.
+ */
+Hash_base = 0x180000
+Hash_bits = 12 /* e.g. 256kB hash table */
+Hash_msk = (((1 << Hash_bits) - 1) * 64)
+
+ .globl hash_page
+hash_page:
+ /* Get PTE (linux-style) and check access */
+ lwz r5,PG_TABLES(r5) /* task's page tables */
+ lis r2,-KERNELBASE@h
+ add r5,r5,r2 /* convert to phys addr */
+ rlwimi r5,r3,12,20,29 /* insert top 10 bits of address */
+ lwz r5,0(r5) /* get pmd entry */
+ rlwinm. r5,r5,0,0,19 /* extract address of pte page */
+ beqlr- /* return if no mapping */
+ add r2,r5,r2
+ rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
+ lwz r6,0(r2) /* get linux-style pte */
+ ori r4,r4,1 /* set _PAGE_PRESENT bit in access */
+ andc. r0,r4,r6 /* check access & ~permission */
+ bnelr- /* return if access not permitted */
+ ori r6,r6,0x100 /* set _PAGE_ACCESSED in pte */
+ rlwinm r5,r4,5,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */
+ rlwimi r5,r4,7,22,22 /* _PAGE_RW -> _PAGE_HWWRITE */
+ or r6,r6,r5
+ stw r6,0(r2) /* update PTE (accessed/dirty bits) */
+
+ /* Convert linux-style PTE to low word of PPC-style PTE */
+ rlwinm r4,r6,32-9,31,31 /* _PAGE_HWWRITE -> PP lsb */
+ rlwimi r6,r6,32-1,31,31 /* _PAGE_USER -> PP (both bits now) */
+ ori r4,r4,0xe04 /* clear out reserved bits */
+ andc r6,r6,r4 /* PP=2 or 0, when _PAGE_HWWRITE */
+
+ /* Construct the high word of the PPC-style PTE */
+ mfsrin r5,r3 /* get segment reg for segment */
+ rlwinm r5,r5,7,1,24 /* put VSID in 0x7fffff80 bits */
+ oris r5,r5,0x8000 /* set V (valid) bit */
+ rlwimi r5,r3,10,26,31 /* put in API (abbrev page index) */
+
+ /* Get the address of the primary PTE group in the hash table */
+ .globl hash_page_patch_A
+hash_page_patch_A:
+ lis r4,Hash_base@h /* base address of hash table */
+ rlwimi r4,r5,32-1,26-Hash_bits,25 /* (VSID & hash_mask) << 6 */
+ rlwinm r0,r3,32-6,26-Hash_bits,25 /* (PI & hash_mask) << 6 */
+ xor r4,r4,r0 /* make primary hash */
+
+ /* See whether it was a PTE not found exception or a
+ protection violation. */
+ andis. r0,r20,0x4000
+ li r2,8 /* PTEs/group */
+ bne 10f /* no PTE: go look for an empty slot */
+ tlbie r3 /* invalidate TLB entry */
+
+ /* Search the primary PTEG for a PTE whose 1st word matches r5 */
+ mtctr r2
+ addi r3,r4,-8
+1: lwzu r0,8(r3) /* get next PTE */
+ cmp 0,r0,r5
+ bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
+ beq+ found_slot
+
+ /* Search the secondary PTEG for a matching PTE */
+ ori r5,r5,0x40 /* set H (secondary hash) bit */
+ .globl hash_page_patch_B
+hash_page_patch_B:
+ xoris r3,r4,Hash_msk>>16 /* compute secondary hash */
+ xori r3,r3,0xffc0
+ addi r3,r3,-8
+ mtctr r2
+2: lwzu r0,8(r3)
+ cmp 0,r0,r5
+ bdnzf 2,2b
+ beq+ found_slot
+ xori r5,r5,0x40 /* clear H bit again */
+
+ /* Search the primary PTEG for an empty slot */
+10: mtctr r2
+ addi r3,r4,-8 /* search primary PTEG */
+1: lwzu r0,8(r3) /* get next PTE */
+ cmpi 0,r0,0 /* empty? */
+ bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
+ beq+ found_empty
+
+ /* Search the secondary PTEG for an empty slot */
+ ori r5,r5,0x40 /* set H (secondary hash) bit */
+ .globl hash_page_patch_C
+hash_page_patch_C:
+ xoris r3,r4,Hash_msk>>16 /* compute secondary hash */
+ xori r3,r3,0xffc0
+ addi r3,r3,-8
+ mtctr r2
+2: lwzu r0,8(r3)
+ cmpi 0,r0,0
+ bdnzf 2,2b
+ beq+ found_empty
+
+ /* Choose an arbitrary slot in the primary PTEG to overwrite */
+ xori r5,r5,0x40 /* clear H bit again */
+ lwz r2,next_slot@l(0)
+ addi r2,r2,8
+ andi. r2,r2,0x38
+ stw r2,next_slot@l(0)
+ add r3,r4,r2
+
+ /* Store PTE in PTEG */
+found_empty:
+ stw r5,0(r3)
+found_slot:
+ stw r6,4(r3)
+ SYNC
+ /* Return from the exception */
+ lwz r3,_CCR(r21)
+ lwz r4,_LINK(r21)
+ lwz r5,_CTR(r21)
+ mtcrf 0xff,r3
+ mtlr r4
+ mtctr r5
+ REST_GPR(0, r21)
+ REST_2GPRS(1, r21)
+ REST_4GPRS(3, r21)
+ /* we haven't used xer */
+ mtspr SRR1,r23
+ mtspr SRR0,r22
+ REST_GPR(20, r21)
+ REST_2GPRS(22, r21)
+ lwz r21,GPR21(r21)
+ SYNC
+ rfi
+next_slot:
+ .long 0
-/* changed to use r3 as residual pointer (as firmware does), that's all -- Cort */
/*
- * Hardware reset [actually from bootstrap]
- * Initialize memory management & call secondary init
- * Registers initialized by bootstrap:
- * R11: Start of command line string
- * R12: End of command line string
- * R28: Residual data
- * R29: Total Memory Size
- * R30: 'BeBx' if this is a BeBox
- */
-Reset:
- lis r7,0xF000 /* To mask upper 4 bits */
-/* set pointer to residual data */
- lis r1,resptr@h
- ori r1,r1,resptr@l
- andc r1,r1,r7
-/* changed to use r3 as residual pointer (as firmware does) -- Cort */
-/* this is only a ptr, the actual data is copied in mmu_init */
- stw r3,0(r1)
-
-/* Copy argument string */
- li r0,0 /* Null terminate string */
- stb r0,0(r12)
- lis r1,cmd_line@h
- ori r1,r1,cmd_line@l
- andc r1,r1,r7 /* No MMU yet - need unmapped address */
- subi r1,r1,1
- subi r11,r11,1
-00: lbzu r0,1(r11)
- cmpi 0,r0,0
- stbu r0,1(r1)
- bne 00b
+ * This is where the main kernel code starts.
+ */
-#define IS_BE_BOX 0x42654278 /* 'BeBx' */
- lis r1,isBeBox@h
- ori r1,r1,isBeBox@l
- andc r1,r1,r7
-/* See if this is a CPU other than CPU#1 */
-/* This [currently] happens on the BeBox */
- lwz r2,0(r1)
- cmpi 0,r2,0
- bne Reset_BeBox_CPU1
-/* Save machine type indicator */
- li r2,0
- lis r3,IS_BE_BOX>>16
- ori r3,r3,IS_BE_BOX&0xFFFF
- cmp 0,r30,r3
- bne 00f
- li r2,1
- mr r11,r28
- mr r12,r29
- lis r5,BeBox_CPU1_vector@h
- ori r5,r5,BeBox_CPU1_vector@l
- andc r5,r5,r7 /* Tell CPU #1 where to go */
-00: stw r2,0(r1)
- stw r30,4(r1)
-
+start_here:
+ /*
+ * Enable caches and 604-specific features if necessary.
+ */
+ mfspr r9,PVR
+ rlwinm r9,r9,16,16,31
+ cmpi 0,r9,1
+ beq 4f /* not needed for 601 */
+ mfspr r7,HID0
+ andi. r0,r7,HID0_DCE
+ ori r7,r7,HID0_ICE|HID0_DCE
+ ori r8,r7,HID0_ICFI
+ bne 3f /* don't invalidate the D-cache */
+ ori r8,r8,HID0_DCI /* unless it wasn't enabled */
+3: sync
+ mtspr HID0,r8 /* enable and invalidate caches */
+ sync
+ mtspr HID0,r7 /* enable caches */
+ sync
+ isync
+ cmpi 0,r9,4 /* check for 604 */
+ cmpi 1,r9,9 /* or 604e */
+ cror 2,2,6
+ bne 4f
+ ori r7,r7,HID0_SIED|HID0_BHTE /* for 604[e], enable */
+ mtspr HID0,r7 /* superscalar exec & br history tbl */
+4:
+ /* ptr to current */
+ lis r2,init_task_union@h
+ ori r2,r2,init_task_union@l
+ /* ptr to phys current tss */
+ addis r3,r2,-KERNELBASE@h
+ addi r3,r3,TSS /* init task's TSS */
+ mtspr SPRG3,r3
+ /* stack */
+ addi r1,r2,TASK_UNION_SIZE
+ li r0,0
+ stwu r0,-STACK_FRAME_OVERHEAD(r1)
-#if 0
- lis r1,sys_stack@h
- ori r1,r1,sys_stack@l
-#else
- lis r1,init_kernel_stack@h
- ori r1,r1,init_kernel_stack@l
-#endif
- addi r1,r1,0x1000 /* top of stack */
-#if 0
- li r2,0x0FFF /* Mask stack address down to page boundary */
-#endif
- andc r1,r1,r2
- subi r1,r1,INT_FRAME_SIZE /* Padding for first frame */
- li r2,0 /* TOC pointer for nanokernel */
- li r0,MSR_ /* Make sure FPU enabled */
- mtmsr r0
- lis r3,_edata@h /* Clear BSS */
- ori r3,r3,_edata@l
- andc r3,r3,r7 /* make unmapped address */
- lis r4,_end@h
- ori r4,r4,_end@l
- andc r4,r4,r7 /* make unmapped address */
- subi r3,r3,4
+ /* Clear out the BSS */
+ lis r7,_end@ha
+ addi r7,r7,_end@l
+ lis r8,__bss_start@ha
+ addi r8,r8,__bss_start@l
+ subf r7,r8,r7
+ addi r7,r7,3
+ rlwinm. r7,r7,30,2,31
+ beq 2f
+ addi r8,r8,-4
+ mtctr r7
li r0,0
-00: stwu r0,4(r3)
- cmp 0,r3,r4
- blt 00b
-#if 0
-/* Save total memory size (passed from bootstrap) */
- lis r3,_TotalMemory@h
- ori r3,r3,_TotalMemory@l
- andc r3,r3,r7 /* make unmapped address */
- stw r29,0(r3)
-#endif
-/* Initialize BAT registers */
- lis r3,BAT0@h
- ori r3,r3,BAT0@l
- andc r3,r3,r7 /* make unmapped address */
- lwz r0,0(r3)
+3: stwu r0,4(r8)
+ bdnz 3b
+2:
+/*
+ * Initialize the prom stuff (powermacs only) and the MMU.
+ */
+#ifdef CONFIG_PMAC
+ bl prom_init
+#endif /* CONFIG_PMAC */
+ bl MMU_init
+
+/*
+ * Go back to running unmapped so we can load up new values
+ * for SDR1 (hash table pointer) and the segment registers
+ * and change to using our exception vectors.
+ */
+ lis r6,_SDR1@ha
+ lwz r6,_SDR1@l(r6)
+ li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
+ lis r4,2f@h
+ addis r4,r4,-KERNELBASE@h
+ ori r4,r4,2f@l
+ mtspr SRR0,r4
+ mtspr SRR1,r3
+ rfi
+/* Load up the kernel context */
+2:
+#ifdef CONFIG_PREP
+ /* reload the bats now that MMU_init() has setup them up -- Cort */
+ LOAD_BATS(r3,r0)
+#endif
+
+ SYNC /* Force all PTE updates to finish */
+ tlbia /* Clear all TLB entries */
+ mtspr SDR1,r6
+ li r0,16 /* load up segment register values */
+ mtctr r0 /* for context 0 */
+ lis r3,0x2000 /* Ku = 1, VSID = 0 */
+ li r4,0
+3: mtsrin r3,r4
+ addi r3,r3,1 /* increment VSID */
+ addis r4,r4,0x1000 /* address of next segment */
+ bdnz 3b
+#ifdef CONFIG_PMAC
+ li r0,0 /* zot the BATs */
+#if 1
mtspr IBAT0U,r0
- mtspr DBAT0U,r0
- lwz r0,4(r3)
mtspr IBAT0L,r0
+ mtspr DBAT0U,r0
mtspr DBAT0L,r0
- lis r3,BAT1@h
- ori r3,r3,BAT1@l
- andc r3,r3,r7 /* make unmapped address */
- lwz r0,0(r3)
+#endif
mtspr IBAT1U,r0
- mtspr DBAT1U,r0
- lwz r0,4(r3)
mtspr IBAT1L,r0
+ mtspr DBAT1U,r0
mtspr DBAT1L,r0
-/* this BAT mapping will cover all of kernel space */
-#ifdef NEWMM
- lis r3,BAT2@h
- ori r3,r3,BAT2@l
-#else
- lis r3,TMP_BAT2@h
- ori r3,r3,TMP_BAT2@l
-#endif
- andc r3,r3,r7 /* make unmapped address */
- lwz r0,0(r3)
mtspr IBAT2U,r0
- mtspr DBAT2U,r0
- lwz r0,4(r3)
mtspr IBAT2L,r0
+ mtspr DBAT2U,r0
mtspr DBAT2L,r0
-#if 1
- lis r3,BAT3@h
- ori r3,r3,BAT3@l
- andc r3,r3,r7 /* make unmapped address */
- lwz r0,0(r3)
mtspr IBAT3U,r0
- mtspr DBAT3U,r0
- lwz r0,4(r3)
mtspr IBAT3L,r0
+ mtspr DBAT3U,r0
mtspr DBAT3L,r0
-#endif
-/* Now we can turn on the MMU */
- mfmsr r3
- ori r3,r3,MSR_DR|MSR_IR
- mtspr SRR1,r3
- lis r3,10f@h
- ori r3,r3,10f@l
- mtspr SRR0,r3
-DO_RFI_TRACE_UNMAPPED(0xDEAD0000)
- SYNC
- rfi /* enables MMU */
-10: bl _EXTERN(MMU_init) /* initialize MMU environment */
-DO_RFI_TRACE_MAPPED(0xDEAD0100)
-/* Withdraw BAT2->RAM mapping */
- lis r7,0xF000 /* To mask upper 4 bits */
- lis r3,20f@h
- ori r3,r3,20f@l
- andc r3,r3,r7 /* make unmapped address */
- mtspr SRR0,r3
- mfmsr r3
- li r4,MSR_DR|MSR_IR
- andc r3,r3,r4
- mtspr SRR1,r3
- SYNC
-DO_RFI_TRACE_MAPPED(0xDEAD0200)
- SYNC
- rfi
-20:
-
-DO_RFI_TRACE_UNMAPPED(0xDEAD0400)
-20: lis r3,BAT2@h
- ori r3,r3,BAT2@l
- andc r3,r3,r7 /* make unmapped address */
- lwz r0,0(r3)
- mtspr IBAT2U,r0
- mtspr DBAT2U,r0
- lwz r0,4(r3)
- mtspr IBAT2L,r0
- mtspr DBAT2L,r0
-/* Load up the kernel context */
- lis r2,init_task@h
- ori r2,r2,init_task@l
- addi r2,r2,TSS
- andc r2,r2,r7 /* make unmapped address */
- SYNC /* Force all PTE updates to finish */
- tlbia /* Clear all TLB entries */
- lis r3,_SDR1@h
- ori r3,r3,_SDR1@l
- andc r3,r3,r7 /* make unmapped address */
- lwz r3,0(r3)
- mtspr SDR1,r3
- lwz r0,MMU_SEG0(r2)
- mtsr SR0,r0
- lwz r0,MMU_SEG1(r2)
- mtsr SR1,r0
- lwz r0,MMU_SEG2(r2)
- mtsr SR2,r0
- lwz r0,MMU_SEG3(r2)
- mtsr SR3,r0
- lwz r0,MMU_SEG4(r2)
- mtsr SR4,r0
- lwz r0,MMU_SEG5(r2)
- mtsr SR5,r0
- lwz r0,MMU_SEG6(r2)
- mtsr SR6,r0
- lwz r0,MMU_SEG7(r2)
- mtsr SR7,r0
- lwz r0,MMU_SEG8(r2)
- mtsr SR8,r0
- lwz r0,MMU_SEG9(r2)
- mtsr SR9,r0
- lwz r0,MMU_SEG10(r2)
- mtsr SR10,r0
- lwz r0,MMU_SEG11(r2)
- mtsr SR11,r0
- lwz r0,MMU_SEG12(r2)
- mtsr SR12,r0
- lwz r0,MMU_SEG13(r2)
- mtsr SR13,r0
- lwz r0,MMU_SEG14(r2)
- mtsr SR14,r0
- lwz r0,MMU_SEG15(r2)
- mtsr SR15,r0
+#endif
/* Now turn on the MMU for real! */
- mfmsr r3
- ori r3,r3,MSR_DR|MSR_IR
- mtspr SRR1,r3
- lis r3,30f@h
- ori r3,r3,30f@l
+ li r4,MSR_KERNEL
+ lis r3,start_kernel@h
+ ori r3,r3,start_kernel@l
mtspr SRR0,r3
-DO_RFI_TRACE_UNMAPPED(0xDEAD0500)
- SYNC
- rfi /* enables MMU */
-30:
-/* Turn on L1 Data Cache */
- mfspr r3,HID0 /* Caches are controlled by this register */
- ori r4,r3,(HID0_ICE|HID0_ICFI)
- ori r3,r3,(HID0_ICE)
- ori r4,r4,(HID0_DCE|HID0_DCI)
- ori r3,r3,(HID0_DCE)
- sync
- mtspr HID0,r4
- mtspr HID0,r3
-/* L1 cache enable */
- mfspr r2,PVR /* Check for 603/603e */
- srwi r2,r2,16
- cmpi 0,r2,4 /* 604 */
- bne 40f
- mfspr r3,HID0 /* Turn on 604 specific features */
- ori r3,r3,(HID0_SIED|HID0_BHTE)
- mtspr HID0,r3
-40: b _EXTERN(start_kernel) /* call main code */
- .long 0 # Illegal!
+ mtspr SRR1,r4
+ rfi /* enable MMU and jump to start_kernel */
+#ifdef CONFIG_PREP
/*
- * BeBox CPU #2 runs here
- */
-Reset_BeBox_CPU1:
- lis r1,CPU1_stack@h
- ori r1,r1,CPU1_stack@l
- li r2,0x0FFF /* Mask stack address down to page boundary */
- andc r1,r1,r2
- subi r1,r1,INT_FRAME_SIZE /* Padding for first frame */
- lis r30,CPU1_trace@h
- ori r30,r30,CPU1_trace@l
- andc r30,r30,r7
- li r5,1
- stw r5,0(r30)
- li r2,0 /* TOC pointer for nanokernel */
- li r0,MSR_ /* Make sure FPU enabled */
+ * This is jumped to on prep systems right after the kernel is relocated
+ * to its proper place in memory by the boot loader. The expected layout
+ * of the regs is:
+ * R3: End of image
+ * R4: Start of image - 0x400
+ * R11: Start of command line string
+ * R12: End of command line string
+ *
+ * This just gets a minimal mmu environment setup so we can call
+ * start_here() to do the real work.
+ * -- Cort
+ */
+ .globl __start
+__start:
+ .globl _start
+_start:
+ lis r7,0xF000 /* To mask upper 4 bits */
+/* save pointer to residual data */
+ lis r1,resptr@h
+ ori r1,r1,resptr@l
+ addis r1,r1,-KERNELBASE@h
+ stw r3,0(r1)
+/* save argument string */
+ li r0,0 /* Null terminate string */
+ stb r0,0(r12)
+ lis r1,cmd_line@h
+ ori r1,r1,cmd_line@l
+ addis r1,r1,-KERNELBASE@h
+ subi r1,r1,1
+ subi r11,r11,1
+00: lbzu r0,1(r11)
+ cmpi 0,r0,0
+ stbu r0,1(r1)
+ bne 00b
+/* setup the msr with sane values */
+ li r0,MSR_
mtmsr r0
-/* Initialize BAT registers */
- lis r3,BAT0@h
- ori r3,r3,BAT0@l
- andc r3,r3,r7 /* make unmapped address */
- lwz r0,0(r3)
- mtspr IBAT0U,r0
- mtspr DBAT0U,r0
- lwz r0,4(r3)
- mtspr IBAT0L,r0
- mtspr DBAT0L,r0
- lis r3,BAT1@h
- ori r3,r3,BAT1@l
- andc r3,r3,r7 /* make unmapped address */
- lwz r0,0(r3)
- mtspr IBAT1U,r0
- mtspr DBAT1U,r0
- lwz r0,4(r3)
- mtspr IBAT1L,r0
- mtspr DBAT1L,r0
- lis r3,TMP_BAT2@h
- ori r3,r3,TMP_BAT2@l
- andc r3,r3,r7 /* make unmapped address */
- lwz r0,0(r3)
- mtspr IBAT2U,r0
- mtspr DBAT2U,r0
- lwz r0,4(r3)
- mtspr IBAT2L,r0
- mtspr DBAT2L,r0
-/* Now we can turn on the MMU */
+/* turn on the mmu with bats covering kernel enough to get started */
+ LOAD_BATS(r3,r0)
mfmsr r3
ori r3,r3,MSR_DR|MSR_IR
mtspr SRR1,r3
lis r3,10f@h
ori r3,r3,10f@l
mtspr SRR0,r3
- li r5,2
- stw r5,0(r30)
SYNC
rfi /* enables MMU */
-10:
- lis r30,CPU1_trace@h
- ori r30,r30,CPU1_trace@l
- li r5,3
- stw r5,0(r30)
- bl _EXTERN(BeBox_CPU1)
-
-/*
- * Machine Check (Bus Errors, etc)
- */
-MachineCheck:
- TRACE_TRAP(0x0200)
- SAVE_INT_REGS(0x0200)
- mr r3,r1 /* Set pointer to saved regs */
- bl _EXTERN(MachineCheckException)
- RETURN_FROM_INT(0x0200)
-
-/*
- * Data Access exception
- */
-DataAccess:
- SAVE_INT_REGS(0x0300)
-#if 1
- mfspr r3, DAR
- mfspr r4, DSISR
- li r5, 0 /* not a text fault */
- mr r6, r1
- bl _EXTERN(new_page_fault)
-#else
- SAVE_PAGE_FAULT_REGS(0x0D00)
- mr r3,r1
- bl _EXTERN(DataAccessException)
-#endif
- RETURN_FROM_INT(0x0300)
-
-/*
- * Instruction Access Exception
- */
-InstructionAccess:
- SAVE_INT_REGS(0x0400)
-#if 1
- mfspr r3, SPR2 /* srr0 was saved here */
- mfspr r4, SPR3 /* srr1 was saved here */
- li r5, 1 /* a text fault */
- mr r6, r1
- bl _EXTERN(new_page_fault)
-#else
- SAVE_PAGE_FAULT_REGS(0x0D00)
- mr r3,r1
- bl _EXTERN(InstructionAccessException)
-#endif
- RETURN_FROM_INT(0x0400)
-
-/*
- * Hardware Interrupt
- */
-HardwareInterrupt:
- SAVE_INT_REGS(0x0500)
- BUMP(__Hardware_Interrupts)
- mr r3,r1 /* Set pointer to saved regs */
- bl _EXTERN(handle_IRQ)
- RETURN_FROM_INT(0x0500)
-
-/*
- * Alignment
- */
-Alignment:
- TRACE_TRAP(0x0600)
- SAVE_INT_REGS(0x0600)
- mr r3,r1 /* Set pointer to saved regs */
- bl _EXTERN(AlignmentException)
- RETURN_FROM_INT(0x0600)
-
+10: lis r7,start_here@ha /* jump up to our copy at KERNELBASE */
+ addi r7,r7,start_here@l
+ mtlr r7
+ blr
+#endif /* CONFIG_PREP */
+
/*
- * Illegal instruction
+ * FP unavailable trap from kernel - print a message, but let
+ * the task use FP in the kernel until it returns to user mode.
*/
-ProgramCheck:
- TRACE_TRAP(0x0700)
- SAVE_INT_REGS(0x0700)
- mr r3,r1 /* Set pointer to saved regs */
- bl _EXTERN(ProgramCheckException)
- RETURN_FROM_INT(0x0700)
-
-/*
- * Single Step Exception
- */
-SingleStep:
- SAVE_INT_REGS(0x0D00)
- SAVE_PAGE_FAULT_REGS(0x0D00)
- mr r3,r1 /* Set pointer to saved regs */
- bl _EXTERN(SingleStepException)
-#if 0
- bl _EXTERN(flush_instruction_cache)
+KernelFP:
+ lwz r3,_MSR(r1)
+ ori r3,r3,MSR_FP
+ stw r3,_MSR(r1) /* enable use of FP after return */
+ lis r3,86f@h
+ ori r3,r3,86f@l
+ mr r4,r2 /* current */
+ lwz r5,_NIP(r1)
+ bl printk
+ b int_return
+86: .string "floating point used in kernel (task=%p, pc=%x)\n"
+ .align 4
+
+/*
+ * Disable FP for the task which had the FPU previously,
+ * and save its floating-point registers in its thread_struct.
+ * Enables the FPU for use in the kernel on return.
+ * (If giveup_fpu_unmapped uses any integer registers other than
+ * r3 - r6, the return code at load_up_fpu above will have
+ * to be adjusted.)
+ */
+giveup_fpu_unmapped:
+ lis r6,-KERNELBASE@h
+ b 1f
+
+ .globl giveup_fpu
+giveup_fpu:
+ li r6,0
+1:
+ addis r3,r6,last_task_used_math@h/*a*/
+ lwz r4,last_task_used_math@l(r3)
+#if 0
+ addis r3,r6,fpu_tss@ha
+ lwz r4,fpu_tss@l(r3)
#endif
- RETURN_FROM_INT(0x0D00)
-
-/*
- * Floating point [not available, etc]
- */
-FloatingPointCheck:
- SAVE_INT_REGS(0x0800)
- mr r3,r1 /* Set pointer to saved regs */
- bl _EXTERN(FloatingPointCheckException)
- cmpi 0,r3,MSR_FP /* check if fp was turned on by handler */
- bne 00f
- RETURN_FROM_INT(0x0f0f) /* 0xf0f tells to restore fp regs */
-00: RETURN_FROM_INT(0x0200)
+ mfmsr r5
+ ori r5,r5,MSR_FP
+ SYNC
+ mtmsr r5 /* enable use of fpu now */
+ SYNC
+ cmpi 0,r4,0
+ add r4,r4,r6
+ beqlr /* if no previous owner, done */
+ addi r4,r4,TSS /* want TSS of last_task_used_math */
+ li r5,0
+ stw r5,last_task_used_math@l(r3)
+#if 0
+ stw r5,fpu_tss@l(r3)
+#endif
+ SAVE_32FPRS(0, r4)
+ mffs fr0
+ stfd fr0,TSS_FPSCR-4(r4)
+ lwz r5,PT_REGS(r4)
+ lwz r5,PT_REGS(r4)
+ add r5,r5,r6
+ lwz r3,_MSR-STACK_FRAME_OVERHEAD(r5)
+ li r4,MSR_FP
+ andc r3,r3,r4 /* disable FP for previous task */
+ stw r3,_MSR-STACK_FRAME_OVERHEAD(r5)
+ blr
/*
- * System Call exception
+ * Handle a system call.
*/
-SystemCall:
- SAVE_INT_REGS(0x0C00)
- lwz r2,_CCR(r1) /* Clear SO bit in CR */
- lis r9,0x1000
- andc r2,r2,r9
- stw r2,_CCR(r1)
+DoSyscall:
+ stw r0,TSS+LAST_SYSCALL(r2)
+ lwz r11,_CCR(r1) /* Clear SO bit in CR */
+ lis r10,0x1000
+ andc r11,r11,r10
+ stw r11,_CCR(r1)
+#ifdef SHOW_SYSCALLS
+#ifdef SHOW_SYSCALLS_TASK
+ lis r31,show_syscalls_task@ha
+ lwz r31,show_syscalls_task@l(r31)
+ cmp 0,r2,r31
+ bne 1f
+#endif
+ lis r3,7f@ha
+ addi r3,r3,7f@l
+ lwz r4,GPR0(r1)
+ lwz r5,GPR3(r1)
+ lwz r6,GPR4(r1)
+ lwz r7,GPR5(r1)
+ lwz r8,GPR6(r1)
+ mr r9,r2
+ bl printk
+ lwz r0,GPR0(r1)
+ lwz r3,GPR3(r1)
+ lwz r4,GPR4(r1)
+ lwz r5,GPR5(r1)
+ lwz r6,GPR6(r1)
+ lwz r7,GPR7(r1)
+ lwz r8,GPR8(r1)
+1:
+#endif /* SHOW_SYSCALLS */
cmpi 0,r0,0x7777 /* Special case for 'sys_sigreturn' */
- bne+ 10f
- mr r3,r1
- bl _EXTERN(sys_sigreturn)
- cmpi 0,r3,0 /* Check for restarted system call */
- bge 99f
- b 20f
-10: lis r2,current_set@ha
- lwz r2,current_set@l(r2)
- lwz r2,TASK_FLAGS(r2)
- andi. r2,r2,PF_TRACESYS
- bne 50f
-
- lis r2,sys_call_table@h
- ori r2,r2,sys_call_table@l
+ beq- 10f
+ lwz r10,TASK_FLAGS(r2)
+ andi. r10,r10,PF_TRACESYS
+ bne- 50f
+ cmpli 0,r0,NR_syscalls
+ bge- 66f
+ lis r10,sys_call_table@h
+ ori r10,r10,sys_call_table@l
slwi r0,r0,2
- lwzx r2,r2,r0 /* Fetch system call handler [ptr] */
-#if 1
- cmpi 0,r2,0 /* make sure syscall handler not 0 */
- beq 99f
- cmpi 0,r0,NR_syscalls<<2 /* make sure syscallnum in bounds */
- bgt 99f
-#endif
- mtlr r2
- mr r9,r1
+ lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
+ cmpi 0,r10,0
+ beq- 66f
+ mtlr r10
+ addi r9,r1,STACK_FRAME_OVERHEAD
blrl /* Call handler */
-
-20: stw r3,RESULT(r1) /* Save result */
- cmpi 0,r3,0
- bge 30f
+ .globl syscall_ret_1
+syscall_ret_1:
+20: stw r3,RESULT(r1) /* Save result */
+#ifdef SHOW_SYSCALLS
+#ifdef SHOW_SYSCALLS_TASK
+ cmp 0,r2,r31
+ bne 91f
+#endif
+ mr r4,r3
+ lis r3,79f@ha
+ addi r3,r3,79f@l
+ bl printk
+ lwz r3,RESULT(r1)
+91:
+#endif
+ li r10,-_LAST_ERRNO
+ cmpl 0,r3,r10
+ blt 30f
neg r3,r3
cmpi 0,r3,ERESTARTNOHAND
bne 22f
li r3,EINTR
-22: lwz r2,_CCR(r1) /* Set SO bit in CR */
- oris r2,r2,0x1000
- stw r2,_CCR(r1)
+22: lwz r10,_CCR(r1) /* Set SO bit in CR */
+ oris r10,r10,0x1000
+ stw r10,_CCR(r1)
30: stw r3,GPR3(r1) /* Update return value */
- b 99f
+ b int_return
+66: li r3,ENOSYS
+ b 22b
+/* sys_sigreturn */
+10: addi r3,r1,STACK_FRAME_OVERHEAD
+ bl _EXTERN(sys_sigreturn)
+ cmpi 0,r3,0 /* Check for restarted system call */
+ bge int_return
+ b 20b
/* Traced system call support */
50: bl _EXTERN(syscall_trace)
lwz r0,GPR0(r1) /* Restore original registers */
@@ -1045,369 +1149,371 @@
lwz r7,GPR7(r1)
lwz r8,GPR8(r1)
lwz r9,GPR9(r1)
- lis r2,sys_call_table@h
- ori r2,r2,sys_call_table@l
+ cmpli 0,r0,NR_syscalls
+ bge- 66f
+ lis r10,sys_call_table@h
+ ori r10,r10,sys_call_table@l
slwi r0,r0,2
- lwzx r2,r2,r0 /* Fetch system call handler [ptr] */
- mtlr r2
- mr r9,r1
+ lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
+ cmpi 0,r10,0
+ beq- 66f
+ mtlr r10
+ addi r9,r1,STACK_FRAME_OVERHEAD
blrl /* Call handler */
+ .globl syscall_ret_2
+syscall_ret_2:
stw r3,RESULT(r1) /* Save result */
- cmpi 0,r3,0
- bge 60f
+ stw r3,GPR0(r1) /* temporary gross hack to make strace work */
+ li r10,-_LAST_ERRNO
+ cmpl 0,r3,r10
+ blt 60f
neg r3,r3
cmpi 0,r3,ERESTARTNOHAND
bne 52f
li r3,EINTR
-52: lwz r2,_CCR(r1) /* Set SO bit in CR */
- oris r2,r2,0x1000
- stw r2,_CCR(r1)
+52: lwz r10,_CCR(r1) /* Set SO bit in CR */
+ oris r10,r10,0x1000
+ stw r10,_CCR(r1)
60: stw r3,GPR3(r1) /* Update return value */
bl _EXTERN(syscall_trace)
-99:
- RETURN_FROM_INT(0x0C00)
-
-/*
- * Handle TLB miss for instruction
- */
-InstructionTLBMiss:
- BUMP_UNMAPPED(__Instruction_TLB_Misses)
- mfctr r0 /* Need to save this - CTR can't be touched! */
- mfspr r2,HASH1 /* Get PTE pointer */
- mfspr r3,ICMP /* Partial item compare value */
-00: li r1,8 /* 8 items / bucket */
- mtctr r1
- subi r2,r2,8 /* Preset pointer */
-10: lwzu r1,8(r2) /* Get next PTE */
- cmp 0,r1,r3 /* Found entry yet? */
- bdne 10b /* Jump back if not, until CTR==0 */
- bne 30f /* Try secondary hash if CTR==0 */
- lwz r1,4(r2) /* Get second word of entry */
-#if 0
- andi. r3,r1,0x08 /* Check guard bit - invalid access if set */
- bne InstructionFetchError
-#endif
- andi. r3,r1,0x100 /* Check R bit (referenced) */
- bne 20f /* If set, all done */
- ori r1,r1,0x100 /* Set bit */
- stw r1,4(r2) /* Update memory image */
-20: mtctr r0 /* Restore CTR */
- mfspr r3,SRR1 /* Need to restore CR0 */
- mtcrf 0x80,r3
- mfspr r0,IMISS /* Set to update TLB */
- mtspr RPA,r1
- tlbli r0
-#if 0
- SYNC
+ b int_return
+66: li r3,ENOSYS
+ b 52b
+#ifdef SHOW_SYSCALLS
+7: .string "syscall %d(%x, %x, %x, %x), current=%p\n"
+79: .string " -> %x\n"
+ .align 2
#endif
- rfi /* All done */
-/* Secondary hash */
-30: andi. r1,r3,0x40 /* Already doing secondary hash? */
- bne InstructionAddressInvalid /* Yes - item not in hash table */
- mfspr r2,HASH2 /* Get hash table pointer */
- ori r3,r3,0x40 /* Set secondary hash */
- b 00b /* Try lookup again */
/*
- * Handle TLB miss for DATA Load operation
+ * This routine switches between two different tasks. The process
+ * state of one is saved on its kernel stack. Then the state
+ * of the other is restored from its kernel stack. The memory
+ * management hardware is updated to the second process's state.
+ * Finally, we can return to the second process, via int_return.
+ * On entry, r3 points to the TSS for the current task, r4
+ * points to the TSS for the new task, and r5 contains the
+ * MMU context number for the new task.
+ *
+ * Note: there are two ways to get to the "going out" portion
+ * of this code; either by coming in via the entry (_switch)
+ * or via "fork" which must set up an environment equivalent
+ * to the "_switch" path. If you change this (or in particular, the
+ * SAVE_REGS macro), you'll have to change the fork code also.
+ *
+ * The code which creates the new task context is in 'copy_thread'
+ * in arch/ppc/kernel/process.c
*/
-DataLoadTLBMiss:
- mfctr r0 /* Need to save this - CTR can't be touched! */
- mfspr r2,HASH1 /* Get PTE pointer */
- mfspr r3,DCMP /* Partial item compare value */
-00: li r1,8 /* 8 items / bucket */
- mtctr r1
- subi r2,r2,8 /* Preset pointer */
-10: lwzu r1,8(r2) /* Get next PTE */
- cmp 0,r1,r3 /* Found entry yet? */
- bdne 10b /* Jump back if not, until CTR==0 */
- bne 30f /* Try secondary hash if CTR==0 */
- lwz r1,4(r2) /* Get second word of entry */
- andi. r3,r1,0x100 /* Check R bit (referenced) */
- ori r1,r1,0x100 /* Set bit */
- bne 20f /* If set, all done */
- stw r1,4(r2) /* Update memory image */
-20: mtctr r0 /* Restore CTR */
- mfspr r3,SRR1 /* Need to restore CR0 */
- mtcrf 0x80,r3
- mfspr r0,DMISS /* Set to update TLB */
- mtspr RPA,r1
-/* SYNC() */
- tlbld r0
-#if 0
+_GLOBAL(_switch)
+ stwu r1,-INT_FRAME_SIZE-STACK_UNDERHEAD(r1)
+ stw r0,GPR0(r1)
+ lwz r0,0(r1)
+ stw r0,GPR1(r1)
+ SAVE_10GPRS(2, r1)
+ SAVE_10GPRS(12, r1)
+ SAVE_10GPRS(22, r1)
+ mflr r20 /* Return to switch caller */
+ mfmsr r22
+ li r0,MSR_FP /* Disable floating-point */
+ andc r22,r22,r0
+ stw r20,_NIP(r1)
+ stw r22,_MSR(r1)
+ stw r20,_LINK(r1)
+ mfcr r20
+ mfctr r22
+ mfspr r23,XER
+ stw r20,_CCR(r1)
+ stw r22,_CTR(r1)
+ stw r23,_XER(r1)
+ li r0,0x0ff0
+ stw r0,TRAP(r1)
+ stw r1,KSP(r3) /* Set old stack pointer */
+ sync
+ addis r0,r4,-KERNELBASE@h
+ mtspr SPRG3,r0 /* Update current TSS phys addr */
SYNC
-#endif
- rfi /* All done */
-/* Secondary hash */
-30: andi. r1,r3,0x40 /* Already doing secondary hash? */
- bne DataAddressInvalid /* Yes - item not in hash table */
- mfspr r2,HASH2 /* Get hash table pointer */
- ori r3,r3,0x40 /* Set secondary hash */
- b 00b /* Try lookup again */
+ lwz r1,KSP(r4) /* Load new stack pointer */
+ addi r2,r4,-TSS /* Update current */
+ /* Set up segment registers for new task */
+ rlwinm r5,r5,4,8,27 /* VSID = context << 4 */
+ addis r5,r5,0x6000 /* Set Ks, Ku bits */
+ li r0,8 /* TASK_SIZE / SEGMENT_SIZE */
+ mtctr r0
+ li r3,0
+3: mtsrin r5,r3
+ addi r5,r5,1 /* next VSID */
+ addis r3,r3,0x1000 /* address of next segment */
+ bdnz 3b
+ SYNC
+
+/* FALL THROUGH into int_return */
/*
- * Handle TLB miss for DATA STORE
+ * Trap exit.
*/
-DataStoreTLBMiss:
- BUMP_UNMAPPED(__DataStore_TLB_Misses)
- mfctr r0 /* Need to save this - CTR can't be touched! */
- mfspr r2,HASH1 /* Get PTE pointer */
- mfspr r3,DCMP /* Partial item compare value */
-00: li r1,8 /* 8 items / bucket */
- mtctr r1
- subi r2,r2,8 /* Preset pointer */
-10: lwzu r1,8(r2) /* Get next PTE */
- cmp 0,r1,r3 /* Found entry yet? */
- bdne 10b /* Jump back if not, until CTR==0 */
- bne 30f /* Try secondary hash if CTR==0 */
- lwz r1,4(r2) /* Get second word of entry */
- andi. r3,r1,0x80 /* Check C bit (changed) */
-#if 0 /* Note: no validation */
- beq 40f /* If not set (first time) validate access */
-#else
- ori r1,r1,0x180 /* Set changed, accessed */
- bne 20f
- stw r1,4(r2)
-#endif
-20: mtctr r0 /* Restore CTR */
- mfspr r3,SRR1 /* Need to restore CR0 */
- mtcrf 0x80,r3
- mfspr r0,DMISS /* Set to update TLB */
- mtspr RPA,r1
- tlbld r0
-#if 0
+ .globl int_return
+int_return:
+0: mfmsr r30 /* Disable interrupts */
+ li r4,0
+ ori r4,r4,MSR_EE
+ andc r30,r30,r4
+ SYNC /* Some chip revs need this... */
+ mtmsr r30
SYNC
-#endif
- rfi /* All done */
-/* Secondary hash */
-30: andi. r1,r3,0x40 /* Already doing secondary hash? */
- bne DataAddressInvalid /* Yes - item not in hash table */
- mfspr r2,HASH2 /* Get hash table pointer */
- ori r3,r3,0x40 /* Set secondary hash */
- b 00b /* Try lookup again */
-/* PTE found - validate access */
-40: rlwinm. r3,r1,30,0,1 /* Extract PP bits */
- bge- 50f /* Jump if PP=0,1 */
- andi. r3,r1,1
- beq+ 70f /* Access OK */
- b WriteProtectError /* Not OK - fail! */
-50: mfspr r3,SRR1 /* Check privilege */
+ lwz r5,_MSR(r1)
+ and. r5,r5,r4
+ beq 2f
+3: lis r4,lost_interrupts@ha
+ lwz r4,lost_interrupts@l(r4)
+ cmpi 0,r4,0
+ beq+ 1f
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl handle_IRQ
+ b 3b
+1: lis r4,bh_mask@ha
+ lwz r4,bh_mask@l(r4)
+ lis r5,bh_active@ha
+ lwz r5,bh_active@l(r5)
+ and. r4,r4,r5
+ beq+ 2f
+ ori r31,r30,MSR_EE /* re-enable interrupts */
+ SYNC
+ mtmsr r31
+ SYNC
+ bl _EXTERN(do_bottom_half)
+ SYNC
+ mtmsr r30 /* disable interrupts again */
+ SYNC
+2: lwz r3,_MSR(r1) /* Returning to user mode? */
andi. r3,r3,MSR_PR
- beq+ 60f /* Jump if supervisor mode */
- mfspr r3,DMISS /* Get address */
- mfsrin r3,r3 /* Get segment register */
- andis. r3,r3,0x2000 /* If Kp==0, OK */
- beq+ 70f
- b WriteProtectError /* Bad access */
-60: mfspr r3,DMISS /* Get address */
- mfsrin r3,r3 /* Get segment register */
- andis. r3,r3,0x4000 /* If Ks==0, OK */
- beq+ 70f
- b WriteProtectError /* Bad access */
-70: ori r1,r1,0x180 /* Set changed, accessed */
- stw r1,4(r2) /* Update PTE in memory */
- b 20b
-
-/*
- * These routines are error paths/continuations of the exception
- * handlers above. They are placed here to avoid the problems
- * of only 0x100 bytes per exception handler.
- */
-
-/* Invalid address */
-InstructionAddressInvalid:
- mfspr r3,SRR1
- rlwinm r1,r3,9,6,6 /* Get load/store bit */
- addis r1,r1,0x4000 /* Set bit 1 -> PTE not found */
- b 10f
-
-/* Fetch from guarded or no-access page */
-InstructionFetchError:
- mfspr r3,SRR1
- rlwinm r1,r3,9,6,6 /* Get load/store bit */
- addis r1,r1,0x0800 /* Set bit 4 -> protection error */
-10: mtspr DSISR,r1
- mtctr r0 /* Restore CTR */
- andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
- mtspr SRR1,r2
- mfspr r1,IMISS /* Get failing address */
- rlwinm. r2,r2,0,31,31 /* Check for little endian access */
- beq 20f /* Jump if big endian */
- xori r1,r1,3
-20: mtspr DAR,r1 /* Set fault address */
- mfmsr r0 /* Restore "normal" registers */
- xoris r0,r0,MSR_TGPR>>16
- mtcrf 0x80,r3 /* Restore CR0 */
- ori r0,r0,MSR_FP /* Need to keep FP enabled */
- sync /* Some chip revs have problems here... */
- mtmsr r0
- b InstructionAccess
+ beq+ 10f /* no - no need to mess with stack */
+ lis r3,need_resched@ha
+ lwz r3,need_resched@l(r3)
+ cmpi 0,r3,0 /* check need_resched flag */
+ beq+ 7f
+ bl _EXTERN(schedule)
+ b 0b
+7: lwz r3,BLOCKED(r2) /* Check for pending unblocked signals */
+ lwz r5,SIGNAL(r2)
+ andc. r0,r5,r3 /* Lets thru any unblocked */
+ beq+ 8f
+ addi r4,r1,STACK_FRAME_OVERHEAD
+ bl _EXTERN(do_signal)
+ b 0b
+8: addi r4,r1,INT_FRAME_SIZE+STACK_UNDERHEAD /* size of frame */
+ stw r4,TSS+KSP(r2) /* save kernel stack pointer */
+10:
+ lwz r2,_CTR(r1)
+ lwz r0,_LINK(r1)
+ mtctr r2
+ mtlr r0
+ lwz r2,_XER(r1)
+ lwz r0,_CCR(r1)
+ mtspr XER,r2
+ mtcrf 0xFF,r0
+ REST_10GPRS(3, r1)
+ REST_10GPRS(13, r1)
+ REST_8GPRS(23, r1)
+ REST_GPR(31, r1)
+ lwz r2,_NIP(r1) /* Restore environment */
+ lwz r0,_MSR(r1)
+ mtspr SRR0,r2
+ mtspr SRR1,r0
+ lwz r0,GPR0(r1)
+ lwz r2,GPR2(r1)
+ lwz r1,GPR1(r1)
+ SYNC
+ rfi
-/* Invalid address */
-DataAddressInvalid:
- mfspr r3,SRR1
- rlwinm r1,r3,9,6,6 /* Get load/store bit */
- addis r1,r1,0x4000 /* Set bit 1 -> PTE not found */
- b 10f
+/*
+ * Fake an interrupt from kernel mode.
+ * This is used when enable_irq loses an interrupt.
+ * We only fill in the stack frame minimally.
+ */
+_GLOBAL(fake_interrupt)
+ mflr r0
+ stw r0,4(r1)
+ stwu r1,-INT_FRAME_SIZE-STACK_UNDERHEAD(r1)
+ stw r0,_NIP(r1)
+ stw r0,_LINK(r1)
+ mfmsr r3
+ stw r3,_MSR(r1)
+ li r0,0x0fac
+ stw r0,TRAP(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl handle_IRQ
+ addi r1,r1,INT_FRAME_SIZE+STACK_UNDERHEAD
+ lwz r0,4(r1)
+ mtlr r0
+ blr
-/* Write to read-only space */
-WriteProtectError:
- mfspr r3,SRR1
- rlwinm r1,r3,9,6,6 /* Get load/store bit */
- addis r1,r1,0x0800 /* Set bit 4 -> protection error */
-10: mtspr DSISR,r1
- mtctr r0 /* Restore CTR */
- andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
- mtspr SRR1,r2
- mfspr r1,DMISS /* Get failing address */
- rlwinm. r2,r2,0,31,31 /* Check for little endian access */
- beq 20f /* Jump if big endian */
- xori r1,r1,3
-20: mtspr DAR,r1 /* Set fault address */
- mfmsr r0 /* Restore "normal" registers */
- xoris r0,r0,MSR_TGPR>>16
- mtcrf 0x80,r3 /* Restore CR0 */
- ori r0,r0,MSR_FP /* Need to keep FP enabled */
- sync /* Some chip revs have problems here... */
- mtmsr r0
- b DataAccess
+/*
+ * Set up the segment registers for a new context.
+ */
+_GLOBAL(set_context)
+ rlwinm r3,r3,4,8,27 /* VSID = context << 4 */
+ addis r3,r3,0x6000 /* Set Ks, Ku bits */
+ li r0,8 /* TASK_SIZE / SEGMENT_SIZE */
+ mtctr r0
+ li r4,0
+3: mtsrin r3,r4
+ addi r3,r3,1 /* next VSID */
+ addis r4,r4,0x1000 /* address of next segment */
+ bdnz 3b
+ SYNC
+ blr
/*
- * Flush instruction cache
- * *** I'm really paranoid here!
+ * Flush instruction cache.
+ * This is a no-op on the 601.
*/
_GLOBAL(flush_instruction_cache)
- mflr r5
- bl _EXTERN(flush_data_cache)
- mfspr r3,HID0 /* Caches are controlled by this register */
- li r4,0
- ori r4,r4,(HID0_ICE|HID0_ICFI)
- or r3,r3,r4 /* Need to enable+invalidate to clear */
+ mfspr r3,PVR
+ rlwinm r3,r3,16,16,31
+ cmpi 0,r3,1
+ beqlr /* for 601, do nothing */
+ /* 603/604 processor - use invalidate-all bit in HID0 */
+ mfspr r3,HID0
+ ori r3,r3,HID0_ICFI
mtspr HID0,r3
- andc r3,r3,r4
- ori r3,r3,HID0_ICE /* Enable cache */
- mtspr HID0,r3
- mtlr r5
+ SYNC
blr
/*
- * Flush data cache
- * *** I'm really paranoid here!
+ * Write any modified data cache blocks out to memory
+ * and invalidate the corresponding instruction cache blocks.
+ * This is a no-op on the 601.
+ *
+ * store_cache_range(unsigned long start, unsigned long stop)
*/
-_GLOBAL(flush_data_cache)
- BUMP(__Cache_Flushes)
- lis r3,cache_is_copyback@ha
- lwz r3,cache_is_copyback@l(r3)
- cmpi 0,r3,0
- beq 10f
-/* When DATA CACHE is copy-back */
- lis r3,cache_flush_buffer@h
- ori r3,r3,cache_flush_buffer@l
- li r4,NUM_CACHE_LINES
+CACHE_LINE_SIZE = 32
+LG_CACHE_LINE_SIZE = 5
+_GLOBAL(store_cache_range)
+ mfspr r5,PVR
+ rlwinm r5,r5,16,16,31
+ cmpi 0,r5,1
+ beqlr /* for 601, do nothing */
+ li r5,CACHE_LINE_SIZE-1
+ andc r3,r3,r5
+ subf r4,r3,r4
+ add r4,r4,r5
+ srwi. r4,r4,LG_CACHE_LINE_SIZE
+ beqlr
+ mtctr r4
+ mr r6,r3
+1: dcbst 0,r3
+ addi r3,r3,CACHE_LINE_SIZE
+ bdnz 1b
+ sync /* wait for dcbst's to get to ram */
mtctr r4
-00: dcbz 0,r3 /* Flush cache line with minimal BUS traffic */
- addi r3,r3,CACHE_LINE_SIZE /* Next line, please */
- bdnz 00b
-10: blr
+2: icbi 0,r6
+ addi r6,r6,CACHE_LINE_SIZE
+ bdnz 2b
+ sync
+ isync
+ blr
/*
* Flush a particular page from the DATA cache
* Note: this is necessary because the instruction cache does *not*
* snoop from the data cache.
- * void flush_page(void *page)
+ * This is a no-op on the 601 which has a unified cache.
+ *
+ * void flush_page_to_ram(void *page)
*/
-_GLOBAL(flush_page)
+_GLOBAL(flush_page_to_ram)
+ mfspr r5,PVR
+ rlwinm r5,r5,16,16,31
+ cmpi 0,r5,1
+ beqlr /* for 601, do nothing */
li r4,0x0FFF
andc r3,r3,r4 /* Get page base address */
li r4,4096/CACHE_LINE_SIZE /* Number of lines in a page */
mtctr r4
-00: dcbf 0,r3 /* Clear line */
- icbi 0,r3
+ mr r6,r3
+0: dcbst 0,r3 /* Write line to ram */
addi r3,r3,CACHE_LINE_SIZE
- bdnz 00b
+ bdnz 0b
+ sync
+ mtctr r4
+1: icbi 0,r6
+ addi r6,r6,CACHE_LINE_SIZE
+ bdnz 1b
+ sync
+ isync
blr
/*
- * This routine switches between two different tasks. The process
- * state of one is saved on its kernel stack. Then the state
- * of the other is restored from its kernel stack. The memory
- * management hardware is updated to the second process's state.
- * Finally, we can return to the second process, via the 'return'.
- *
- * Note: there are two ways to get to the "going out" portion
- * of this code; either by coming in via the entry (_switch)
- * or via "fork" which must set up an environment equivalent
- * to the "_switch" path. If you change this (or in particular, the
- * SAVE_ALL_REGS macro), you'll have to change the fork code also.
+ * Flush entries from the hash table with VSIDs in the range
+ * given.
+ */
+_GLOBAL(flush_hash_segments)
+ rlwinm r3,r3,7,1,24 /* put VSID lower limit in position */
+ oris r3,r3,0x8000 /* set V bit */
+ rlwinm r4,r4,7,1,24 /* put VSID upper limit in position */
+ oris r4,r4,0x8000
+ ori r4,r4,0x7f
+ lis r5,Hash@ha
+ lwz r5,Hash@l(r5) /* base of hash table */
+ lis r6,Hash_size@ha
+ lwz r6,Hash_size@l(r6) /* size in bytes */
+ srwi r6,r6,3 /* # PTEs */
+ mtctr r6
+ addi r5,r5,-8
+ li r0,0
+1: lwzu r6,8(r5) /* get next tag word */
+ cmplw 0,r6,r3
+ cmplw 1,r6,r4
+ cror 0,0,5 /* set cr0.lt if out of range */
+ blt 2f /* branch if out of range */
+ stw r0,0(r5) /* invalidate entry */
+2: bdnz 1b /* continue with loop */
+ sync
+ tlbia
+ isync
+ blr
+
+/*
+ * Flush the entry for a particular page from the hash table.
*
- * The code which creates the new task context is in 'copy_thread'
- * in arch/ppc/kernel/process.c
- */
-_GLOBAL(_switch)
- mtspr SPR0,r1 /* SAVE_ALL_REGS prologue */
- mtspr SPR1,r2
- mflr r2 /* Return to switch caller */
- mtspr SPR2,r2
- mfmsr r2
- mtspr SPR3,r2
- SAVE_ALL_REGS(0x0FF0)
- SAVE_FP_REGS()
- CHECK_STACK()
- SYNC()
- stw r1,KSP(r3) /* Set old stack pointer */
- BUMP(__Context_Switches)
- lwz r1,KSP(r4) /* Load new stack pointer */
- lwz r0,MMU_SEG0(r4)
- mtsr SR0,r0
- lwz r0,MMU_SEG1(r4)
- mtsr SR1,r0
- lwz r0,MMU_SEG2(r4)
- mtsr SR2,r0
- lwz r0,MMU_SEG3(r4)
- mtsr SR3,r0
- lwz r0,MMU_SEG4(r4)
- mtsr SR4,r0
- lwz r0,MMU_SEG5(r4)
- mtsr SR5,r0
- lwz r0,MMU_SEG6(r4)
- mtsr SR6,r0
- lwz r0,MMU_SEG7(r4)
- mtsr SR7,r0
-#if 0
- /* segs 8-15 are shared by everyone -- don't need to be changed */
- lwz r0,MMU_SEG8(r4)
- mtsr SR8,r0
- lwz r0,MMU_SEG9(r4)
- mtsr SR9,r0
- lwz r0,MMU_SEG10(r4)
- mtsr SR10,r0
- lwz r0,MMU_SEG11(r4)
- mtsr SR11,r0
- lwz r0,MMU_SEG12(r4)
- mtsr SR12,r0
- lwz r0,MMU_SEG13(r4)
- mtsr SR13,r0
- lwz r0,MMU_SEG14(r4)
- mtsr SR14,r0
- lwz r0,MMU_SEG15(r4)
- mtsr SR15,r0
-#endif
- /* no need to invalidate tlb since each process has a distinct
- set of vsid's. -- Cort */
-#if 0
- tlbia /* Invalidate entire TLB */
- BUMP(__TLBIAs)
-#endif
- /* p5.2 603 users manual - with addr transl. enabled,
- the memory access is performed under the control of
- the page table entry. I interpret this to mean that
- it is tagged with the vsid -- so no need to flush here
- since each process has a distinct set of vsid's.
- Of course, my intepretation may be wrong.
- -- Cort */
- /*bl _EXTERN(flush_instruction_cache)*/
- RETURN_FROM_INT(0x0f0f)
-
+ * flush_hash_page(unsigned context, unsigned long va)
+ */
+_GLOBAL(flush_hash_page)
+ rlwinm r3,r3,11,1,20 /* put context into vsid */
+ rlwimi r3,r4,11,21,24 /* put top 4 bits of va into vsid */
+ oris r3,r3,0x8000 /* set V (valid) bit */
+ rlwimi r3,r4,10,26,31 /* put in API (abbrev page index) */
+ rlwinm r7,r4,32-6,10,25 /* get page index << 6 */
+ rlwinm r5,r3,32-1,7,25 /* vsid << 6 */
+ xor r7,r7,r5 /* primary hash << 6 */
+ lis r5,Hash_mask@ha
+ lwz r5,Hash_mask@l(r5) /* hash mask */
+ slwi r5,r5,6 /* << 6 */
+ and r7,r7,r5
+ lis r6,Hash@ha
+ lwz r6,Hash@l(r6) /* hash table base */
+ add r6,r6,r7 /* address of primary PTEG */
+ li r8,8
+ mtctr r8
+ addi r7,r6,-8
+1: lwzu r0,8(r7) /* get next PTE */
+ cmpw 0,r0,r3 /* see if tag matches */
+ bdnzf 2,1b /* while --ctr != 0 && !cr0.eq */
+ beq 3f /* if we found it */
+ ori r3,r3,0x40 /* set H (alt. hash) bit */
+ xor r6,r6,r5 /* address of secondary PTEG */
+ mtctr r8
+ addi r7,r6,-8
+2: lwzu r0,8(r7) /* get next PTE */
+ cmpw 0,r0,r3 /* see if tag matches */
+ bdnzf 2,2b /* while --ctr != 0 && !cr0.eq */
+ bne 4f /* if we didn't find it */
+3: li r0,0
+ stw r0,0(r7) /* invalidate entry */
+4: sync
+ tlbie r4 /* in hw tlb too */
+ isync
+ blr
/*
* This routine is just here to keep GCC happy - sigh...
@@ -1415,18 +1521,212 @@
_GLOBAL(__main)
blr
+#ifdef CONFIG_PMAC
+/*
+ * These exception handlers are used when we have called a prom
+ * routine after we have taken over the exception vectors and MMU.
+ */
+ .globl prom_exc_table
+prom_exc_table:
+ .long TOPHYS(prom_exception) /* 0 */
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception) /* 400 */
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception) /* 800 */
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception) /* c00 */
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception) /* 1000 */
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception) /* 1400 */
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception) /* 1800 */
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception) /* 1c00 */
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception) /* 1000 */
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception) /* 1400 */
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception) /* 1800 */
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception) /* 1c00 */
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception)
+ .long TOPHYS(prom_exception)
+
+/*
+ * When we come in to these prom exceptions, r1 and lr have been
+ * saved in sprg1 and sprg2, and lr points to a word containing
+ * the vector offset.
+ */
+prom_exception:
+ mr r1,r21 /* save r21 */
+ lis r21,prom_sp@ha /* get a stack to use */
+ addis r21,r21,-KERNELBASE@h
+ lwz r21,prom_sp@l(r21)
+ addis r21,r21,-KERNELBASE@h /* convert to physical addr */
+ subi r21,r21,INT_FRAME_SIZE+STACK_UNDERHEAD
+ stw r0,GPR0(r21)
+ stw r2,GPR2(r21)
+ stw r3,GPR3(r21)
+ stw r4,GPR4(r21)
+ stw r5,GPR5(r21)
+ stw r6,GPR6(r21)
+ stw r20,GPR20(r21)
+ stw r1,GPR21(r21)
+ stw r22,GPR22(r21)
+ stw r23,GPR23(r21)
+ mfspr r1,SPRG1
+ stw r1,GPR1(r21)
+ mfcr r3
+ mfspr r4,SPRG2
+ stw r3,_CCR(r21)
+ stw r4,_LINK(r21)
+ mfctr r3
+ mfspr r4,XER
+ stw r3,_CTR(r21)
+ stw r4,_XER(r21)
+ mfspr r22,SRR0
+ mfspr r23,SRR1
+
+ /* at this point we have set things up pretty much exactly
+ how EXCEPTION_PROLOG does */
+ mflr r3
+ lwz r3,0(r3) /* get exception vector */
+ stw r3,TRAP(r21)
+ cmpi 0,r3,0x300 /* was it a dsi? */
+ bne 1f
+
+ mfspr r20,DSISR /* here on data access exc. */
+ andis. r0,r20,0x8470 /* weird error? */
+ bne 3f /* if not, try to put a PTE */
+ mfspr r3,DAR /* into the hash table */
+ rlwinm r4,r23,32-13,30,30 /* MSR_PR -> _PAGE_USER */
+ rlwimi r4,r20,32-23,29,29 /* DSISR_STORE -> _PAGE_RW */
+ b 2f
+
+1: cmpi 0,r3,0x400 /* was it an isi? */
+ bne 3f
+ andis. r0,r23,0x4000 /* if so, check if no pte found */
+ beq 3f /* if so, try to put a PTE */
+ mr r3,r22 /* into the hash table */
+ rlwinm r4,r23,32-13,30,30 /* MSR_PR -> _PAGE_USER */
+ mr r20,r23 /* SRR1 has reason bits */
+2: lis r5,prom_tss@ha /* phys addr of TSS */
+ addis r5,r5,-KERNELBASE@h
+ lwz r5,prom_tss@l(r5)
+ bl hash_page
+
+3: addis r1,r21,KERNELBASE@h /* restore kernel stack ptr */
+ addi r3,r1,INT_FRAME_SIZE+STACK_UNDERHEAD
+ stw r3,0(r21) /* set stack chain pointer */
+ lis r5,prom_tss@ha
+ addis r5,r5,-KERNELBASE@h
+ lwz r5,prom_tss@l(r5)
+ mtspr SPRG3,r5 /* reset phys TSS pointer */
+ lwz r4,TRAP(r21) /* the real exception vector */
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ li r20,MSR_KERNEL
+ bl transfer_to_handler
+ .long PromException
+ .long prom_int_return
+
+ .comm prom_sp,4
+ .comm prom_tss,4
+
+ .globl prom_int_return
+prom_int_return:
+ lis r3,prom_exc_table@ha /* restore sprg3 for prom vectors */
+ addi r3,r3,prom_exc_table@l
+ addis r3,r3,-KERNELBASE@h
+ mtspr SPRG3,r3
+ b int_return
+
+/*
+ * When entering the prom, we have to change to using a different
+ * set of exception vectors.
+ */
+ .globl enter_prom
+enter_prom:
+ stwu r1,-32(r1)
+ mflr r0
+ stw r0,36(r1)
+ stw r29,20(r1)
+ stw r30,24(r1)
+ stw r31,28(r1)
+ lis r8,prom_entry@ha
+ lwz r8,prom_entry@l(r8)
+ mfmsr r31
+ andi. r0,r31,MSR_IP /* using our own vectors yet? */
+ beq 1f /* if so, have to switch */
+ mtlr r8
+ blrl /* if not, can just charge ahead */
+ b 2f
+1: lis r9,prom_sp@ha /* save sp for exception handler */
+ stw r1,prom_sp@l(r9)
+ mfspr r29,SPRG3 /* save physical tss pointer */
+ lis r9,prom_tss@ha
+ stw r29,prom_tss@l(r9)
+ li r9,0
+ ori r9,r9,MSR_EE
+ andc r30,r31,r9
+ lis r9,prom_exc_table@ha /* set pointer to exception table */
+ addi r9,r9,prom_exc_table@l
+ addis r9,r9,-KERNELBASE@h
+ ori r0,r31,MSR_IP
+ sync
+ mtmsr r30 /* disable interrupts */
+ mtspr SPRG3,r9 /* while we update MSR_IP and sprg3 */
+ sync
+ mtmsr r0 /* start using exc. vectors in prom */
+ mtlr r8
+ blrl /* call prom */
+ sync
+ mtmsr r30 /* disable interrupts again */
+ mtspr SPRG3,r29 /* while we restore MSR_IP and sprg3 */
+ sync
+ mtmsr r31 /* reenable interrupts */
+2: lwz r0,36(r1)
+ mtlr r0
+ lwz r29,20(r1)
+ lwz r30,24(r1)
+ lwz r31,28(r1)
+ lwz r1,0(r1)
+ blr
+#endif
+
+/*
+ * We put a few things here that have to be page-aligned.
+ * This stuff goes at the beginning of the data segment,
+ * which is page-aligned.
+ */
.data
.globl sdata
sdata:
.space 2*4096
-
-#if 0
-_GLOBAL(sys_stack)
-sys_stack:
- .space 4096
-#endif
-CPU1_stack:
-
.globl empty_zero_page
empty_zero_page:
.space 4096
@@ -1435,6 +1735,7 @@
swapper_pg_dir:
.space 4096
+#ifdef CONFIG_PREP
/*
* This space gets a copy of optional info passed to us by the bootstrap
* Used to pass parameters into the kernel like root=/dev/sda1, etc.
@@ -1442,54 +1743,5 @@
.globl cmd_line
cmd_line:
.space 512
-
-#ifdef STATS
-/*
- * Miscellaneous statistics - gathered just for performance info
- */
- .globl _INTR_stats
-_INTR_stats:
- .globl __Instruction_TLB_Misses
-__Instruction_TLB_Misses:
- .long 0,0 /* Instruction TLB misses */
- .globl __DataLoad_TLB_Misses
-__DataLoad_TLB_Misses:
- .long 0,0 /* Data [load] TLB misses */
- .globl __DataStore_TLB_Misses
-__DataStore_TLB_Misses:
- .long 0,0 /* Data [store] TLB misses */
- .globl __Instruction_Page_Faults
-__Instruction_Page_Faults:
- .long 0,0 /* Instruction page faults */
- .globl __Data_Page_Faults
-__Data_Page_Faults:
- .long 0,0 /* Data page faults */
- .globl __Cache_Flushes
-__Cache_Flushes:
- .long 0,0 /* Explicit cache flushes */
- .globl __Context_Switches
-__Context_Switches:
- .long 0,0 /* Context switches */
- .globl __Hardware_Interrupts
-__Hardware_Interrupts:
- .long 0,0 /* I/O interrupts (disk, timer, etc) */
- .globl __TLBIAs
- .globl __TLBIAs
-__TLBIAs:
- .long 0,0 /* TLB cache forceably flushed */
- .globl __TLBIEs
-__TLBIEs:
- .long 0,0 /* Specific TLB entry flushed */
-#endif
-
- .globl _TotalMemory
-_TotalMemory:
- .long 0,0
-
-/*
- * This location is used to break any outstanding "lock"s when
- * changing contexts.
- */
-_break_lwarx: .long 0
-
+#endif /* CONFIG_PREP */
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov