patch-2.0.37 linux/include/asm-i386/pgtable.h
Next file: linux/include/asm-i386/processor.h
Previous file: linux/include/asm-i386/page.h
Back to the patch index
Back to the overall index
- Lines: 60
- Date:
Sun Jun 13 10:21:03 1999
- Orig file:
v2.0.36/linux/include/asm-i386/pgtable.h
- Orig date:
Tue Mar 10 13:19:09 1998
diff -u --recursive --new-file v2.0.36/linux/include/asm-i386/pgtable.h linux/include/asm-i386/pgtable.h
@@ -19,6 +19,8 @@
* the i386 page table tree.
*/
+#ifndef __ASSEMBLY__
+
/* Caches aren't brain-dead on the intel. */
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
@@ -46,13 +48,13 @@
* NOTE! The intel "invlpg" semantics are extremely strange. The
* chip will add the segment base to the memory address, even though
* no segment checking is done. We correct for this by using an
- * offset of 0x40000000 that will wrap around the kernel segment base
- * of 0xC0000000 to get the correct address (it will always be outside
+ * offset of -__PAGE_OFFSET that will wrap around the kernel segment base
+ * of __PAGE_OFFSET to get the correct address (it will always be outside
* the kernel segment, but we're only interested in the final linear
* address.
*/
#define __invlpg_mem(addr) \
- (((char *)(addr))[0x40000000])
+ (*((char *)(addr)-__PAGE_OFFSET))
#define __invlpg(addr) \
__asm__ __volatile__("invlpg %0": :"m" (__invlpg_mem(addr)))
@@ -177,6 +179,7 @@
}
#endif
#endif
+#endif /* !__ASSEMBLY__ */
/* Certain architectures need to do special things when pte's
@@ -203,6 +206,17 @@
#define PTRS_PER_PMD 1
#define PTRS_PER_PGD 1024
+/*
+ * pgd entries used up by user/kernel:
+ */
+
+#define USER_PGD_PTRS ((unsigned long)__PAGE_OFFSET >> PGDIR_SHIFT)
+#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
+#define __USER_PGD_PTRS (__PAGE_OFFSET >> PGDIR_SHIFT)
+#define __KERNEL_PGD_PTRS (PTRS_PER_PGD-__USER_PGD_PTRS)
+
+#ifndef __ASSEMBLY__
+
/* Just any arbitrary offset to the start of the vmalloc VM area: the
* current 8MB value just means that there will be a 8MB "hole" after the
* physical memory until the kernel virtual memory starts. That means that
@@ -507,5 +521,7 @@
#define SWP_TYPE(entry) (((entry) >> 1) & 0x7f)
#define SWP_OFFSET(entry) ((entry) >> 8)
#define SWP_ENTRY(type,offset) (((type) << 1) | ((offset) << 8))
+
+#endif /* !__ASSEMBLY__ */
#endif /* _I386_PAGE_H */
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov