patch-2.1.106 linux/arch/i386/mm/init.c
Next file: linux/arch/m68k/Makefile
Previous file: linux/arch/i386/mm/fault.c
Back to the patch index
Back to the overall index
- Lines: 86
- Date:
Sat Jun 13 12:48:10 1998
- Orig file:
v2.1.105/linux/arch/i386/mm/init.c
- Orig date:
Sun Jun 7 11:16:27 1998
diff -u --recursive --new-file v2.1.105/linux/arch/i386/mm/init.c linux/arch/i386/mm/init.c
@@ -27,11 +27,66 @@
#include <asm/pgtable.h>
#include <asm/dma.h>
-const char bad_pmd_string[] = "Bad pmd in pte_alloc: %08lx\n";
-
extern void die_if_kernel(char *,struct pt_regs *,long);
extern void show_net_buffers(void);
+void __bad_pte_kernel(pmd_t *pmd)
+{
+ printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
+ pmd_val(*pmd) = _KERNPG_TABLE + __pa(BAD_PAGETABLE);
+}
+
+void __bad_pte(pmd_t *pmd)
+{
+ printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
+ pmd_val(*pmd) = _PAGE_TABLE + __pa(BAD_PAGETABLE);
+}
+
+pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long offset)
+{
+ pte_t *pte;
+
+ pte = (pte_t *) __get_free_page(GFP_KERNEL);
+ if (pmd_none(*pmd)) {
+ if (pte) {
+ clear_page((unsigned long)pte);
+ pmd_val(*pmd) = _KERNPG_TABLE + __pa(pte);
+ return pte + offset;
+ }
+ pmd_val(*pmd) = _KERNPG_TABLE + __pa(BAD_PAGETABLE);
+ return NULL;
+ }
+ free_page((unsigned long)pte);
+ if (pmd_bad(*pmd)) {
+ __bad_pte_kernel(pmd);
+ return NULL;
+ }
+ return (pte_t *) pmd_page(*pmd) + offset;
+}
+
+pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset)
+{
+ unsigned long pte;
+
+ pte = (unsigned long) __get_free_page(GFP_KERNEL);
+ if (pmd_none(*pmd)) {
+ if (pte) {
+ clear_page(pte);
+ pmd_val(*pmd) = _PAGE_TABLE + __pa(pte);
+ return (pte_t *)(pte + offset);
+ }
+ pmd_val(*pmd) = _PAGE_TABLE + __pa(BAD_PAGETABLE);
+ return NULL;
+ }
+ free_page(pte);
+ if (pmd_bad(*pmd)) {
+ __bad_pte(pmd);
+ return NULL;
+ }
+ return (pte_t *) (pmd_page(*pmd) + offset);
+}
+
+
/*
* BAD_PAGE is the page that is used for page faults when linux
* is out-of-memory. Older versions of linux just did a
@@ -82,7 +137,7 @@
total++;
if (PageReserved(mem_map+i))
reserved++;
- if (PageSwapCache(mem_map+i))
+ else if (PageSwapCache(mem_map+i))
cached++;
else if (!atomic_read(&mem_map[i].count))
free++;
@@ -93,6 +148,7 @@
printk("%d reserved pages\n",reserved);
printk("%d pages shared\n",shared);
printk("%d pages swap cached\n",cached);
+ printk("%ld pages in page table cache\n",pgtable_cache_size);
show_buffers();
#ifdef CONFIG_NET
show_net_buffers();
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov