patch-2.0.33 linux/include/asm-i386/pgtable.h
Next file: linux/include/linux/ax25.h
Previous file: linux/include/asm-i386/locks.h
Back to the patch index
Back to the overall index
- Lines: 32
- Date:
Tue Dec 2 14:18:13 1997
- Orig file:
v2.0.32/linux/include/asm-i386/pgtable.h
- Orig date:
Wed Oct 15 15:22:05 1997
diff -u --recursive --new-file v2.0.32/linux/include/asm-i386/pgtable.h linux/include/asm-i386/pgtable.h
@@ -42,11 +42,28 @@
#define __flush_tlb() \
do { unsigned long tmpreg; __asm__ __volatile__("movl %%cr3,%0\n\tmovl %0,%%cr3":"=r" (tmpreg) : :"memory"); } while (0)
+/*
+ * NOTE! The intel "invlpg" semantics are extremely strange. The
+ * chip will add the segment base to the memory address, even though
+ * no segment checking is done. We correct for this by using an
+ * offset of 0x40000000 that will wrap around the kernel segment base
+ * of 0xC0000000 to get the correct address (it will always be outside
+ * the kernel segment, but we're only interested in the final linear
+ * address.
+ */
+#define __invlpg_mem(addr) \
+ (((char *)(addr))[0x40000000])
+#define __invlpg(addr) \
+ __asm__ __volatile__("invlpg %0": :"m" (__invlpg_mem(addr)))
+
+/*
+ * The i386 doesn't have a page-granular invalidate. Invalidate
+ * everything for it.
+ */
#ifdef CONFIG_M386
-#define __flush_tlb_one(addr) flush_tlb()
+ #define __flush_tlb_one(addr) __flush_tlb()
#else
-#define __flush_tlb_one(addr) \
-__asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
+ #define __flush_tlb_one(addr) __invlpg(addr)
#endif
#ifndef __SMP__
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov