patch-2.1.106 linux/include/asm-alpha/pgtable.h
Next file: linux/include/asm-alpha/signal.h
Previous file: linux/include/asm-alpha/bitops.h
Back to the patch index
Back to the overall index
- Lines: 270
- Date:
Sat Jun 13 12:48:10 1998
- Orig file:
v2.1.105/linux/include/asm-alpha/pgtable.h
- Orig date:
Wed Apr 1 20:11:53 1998
diff -u --recursive --new-file v2.1.105/linux/include/asm-alpha/pgtable.h linux/include/asm-alpha/pgtable.h
@@ -11,6 +11,7 @@
#include <linux/config.h>
#include <asm/system.h>
+#include <asm/processor.h> /* For TASK_SIZE */
#include <asm/mmu_context.h>
/* Caches aren't brain-dead on the alpha. */
@@ -181,6 +182,7 @@
#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
#define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3))
#define PTRS_PER_PGD ((1UL << (PAGE_SHIFT-3))-1)
+#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
/* the no. of pointers that fit on a page: this will go away */
#define PTRS_PER_PAGE (1UL << (PAGE_SHIFT-3))
@@ -401,126 +403,174 @@
* used to allocate a kernel page table - this turns on ASN bits
* if any.
*/
-extern inline void pte_free_kernel(pte_t * pte)
+#ifndef __SMP__
+extern struct pgtable_cache_struct {
+ unsigned long *pgd_cache;
+ unsigned long *pte_cache;
+ unsigned long pgtable_cache_sz;
+} quicklists;
+#else
+#include <asm/smp.h>
+#define quicklists cpu_data[smp_processor_id()]
+#endif
+#define pgd_quicklist (quicklists.pgd_cache)
+#define pmd_quicklist ((unsigned long *)0)
+#define pte_quicklist (quicklists.pte_cache)
+#define pgtable_cache_size (quicklists.pgtable_cache_sz)
+
+extern __inline__ pgd_t *get_pgd_slow(void)
+{
+ pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL), *init;
+
+ if (ret) {
+ init = pgd_offset(&init_mm, 0);
+ memset (ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+ memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ }
+ return ret;
+}
+
+extern __inline__ pgd_t *get_pgd_fast(void)
{
- free_page((unsigned long) pte);
+ unsigned long *ret;
+
+ if((ret = pgd_quicklist) != NULL) {
+ pgd_quicklist = (unsigned long *)(*ret);
+ ret[0] = ret[1];
+ pgtable_cache_size--;
+ } else
+ ret = (unsigned long *)get_pgd_slow();
+ return (pgd_t *)ret;
}
-extern inline pte_t * pte_alloc_kernel(pmd_t *pmd, unsigned long address)
+extern __inline__ void free_pgd_fast(pgd_t *pgd)
{
- address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
- if (pmd_none(*pmd)) {
- pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
- if (pmd_none(*pmd)) {
- if (page) {
- pmd_set(pmd, page);
- return page + address;
- }
- pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
- return NULL;
- }
- free_page((unsigned long) page);
- }
- if (pmd_bad(*pmd)) {
- printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
- pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
- return NULL;
- }
- return (pte_t *) pmd_page(*pmd) + address;
+ *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
+ pgd_quicklist = (unsigned long *) pgd;
+ pgtable_cache_size++;
}
-extern inline void pmd_free_kernel(pmd_t * pmd)
+extern __inline__ void free_pgd_slow(pgd_t *pgd)
{
- free_page((unsigned long) pmd);
+ free_page((unsigned long)pgd);
}
-extern inline pmd_t * pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
+extern pmd_t *get_pmd_slow(pgd_t *pgd, unsigned long address_premasked);
+
+extern __inline__ pmd_t *get_pmd_fast(void)
{
- address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
- if (pgd_none(*pgd)) {
- pmd_t *page = (pmd_t *) get_free_page(GFP_KERNEL);
- if (pgd_none(*pgd)) {
- if (page) {
- pgd_set(pgd, page);
- return page + address;
- }
- pgd_set(pgd, BAD_PAGETABLE);
- return NULL;
- }
- free_page((unsigned long) page);
+ unsigned long *ret;
+
+ if((ret = (unsigned long *)pte_quicklist) != NULL) {
+ pte_quicklist = (unsigned long *)(*ret);
+ ret[0] = ret[1];
+ pgtable_cache_size--;
}
- if (pgd_bad(*pgd)) {
- printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
- pgd_set(pgd, BAD_PAGETABLE);
- return NULL;
+ return (pmd_t *)ret;
+}
+
+extern __inline__ void free_pmd_fast(pmd_t *pmd)
+{
+ *(unsigned long *)pmd = (unsigned long) pte_quicklist;
+ pte_quicklist = (unsigned long *) pmd;
+ pgtable_cache_size++;
+}
+
+extern __inline__ void free_pmd_slow(pmd_t *pmd)
+{
+ free_page((unsigned long)pmd);
+}
+
+extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
+
+extern __inline__ pte_t *get_pte_fast(void)
+{
+ unsigned long *ret;
+
+ if((ret = (unsigned long *)pte_quicklist) != NULL) {
+ pte_quicklist = (unsigned long *)(*ret);
+ ret[0] = ret[1];
+ pgtable_cache_size--;
}
- return (pmd_t *) pgd_page(*pgd) + address;
+ return (pte_t *)ret;
}
-extern inline void pte_free(pte_t * pte)
+extern __inline__ void free_pte_fast(pte_t *pte)
{
- free_page((unsigned long) pte);
+ *(unsigned long *)pte = (unsigned long) pte_quicklist;
+ pte_quicklist = (unsigned long *) pte;
+ pgtable_cache_size++;
}
+extern __inline__ void free_pte_slow(pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+extern void __bad_pte(pmd_t *pmd);
+extern void __bad_pmd(pgd_t *pgd);
+
+#define pte_free_kernel(pte) free_pte_fast(pte)
+#define pte_free(pte) free_pte_fast(pte)
+#define pmd_free_kernel(pmd) free_pmd_fast(pmd)
+#define pmd_free(pmd) free_pmd_fast(pmd)
+#define pgd_free(pgd) free_pgd_fast(pgd)
+#define pgd_alloc() get_pgd_fast()
+
extern inline pte_t * pte_alloc(pmd_t *pmd, unsigned long address)
{
address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
if (pmd_none(*pmd)) {
- pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
- if (pmd_none(*pmd)) {
- if (page) {
- pmd_set(pmd, page);
- return page + address;
- }
- pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
- return NULL;
- }
- free_page((unsigned long) page);
+ pte_t *page = get_pte_fast();
+
+ if (!page)
+ return get_pte_slow(pmd, address);
+ pmd_set(pmd, page);
+ return page + address;
}
if (pmd_bad(*pmd)) {
- printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
- pmd_set(pmd, (pte_t *) BAD_PAGETABLE);
+ __bad_pte(pmd);
return NULL;
}
return (pte_t *) pmd_page(*pmd) + address;
}
-extern inline void pmd_free(pmd_t * pmd)
-{
- free_page((unsigned long) pmd);
-}
-
extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
{
address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
if (pgd_none(*pgd)) {
- pmd_t *page = (pmd_t *) get_free_page(GFP_KERNEL);
- if (pgd_none(*pgd)) {
- if (page) {
- pgd_set(pgd, page);
- return page + address;
- }
- pgd_set(pgd, BAD_PAGETABLE);
- return NULL;
- }
- free_page((unsigned long) page);
+ pmd_t *page = get_pmd_fast();
+
+ if (!page)
+ return get_pmd_slow(pgd, address);
+ pgd_set(pgd, page);
+ return page + address;
}
if (pgd_bad(*pgd)) {
- printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
- pgd_set(pgd, BAD_PAGETABLE);
+ __bad_pmd(pgd);
return NULL;
}
return (pmd_t *) pgd_page(*pgd) + address;
}
-extern inline void pgd_free(pgd_t * pgd)
-{
- free_page((unsigned long) pgd);
-}
+#define pte_alloc_kernel pte_alloc
+#define pmd_alloc_kernel pmd_alloc
-extern inline pgd_t * pgd_alloc(void)
+extern inline void set_pgdir(unsigned long address, pgd_t entry)
{
- return (pgd_t *) get_free_page(GFP_KERNEL);
+ struct task_struct * p;
+ pgd_t *pgd;
+
+ read_lock(&tasklist_lock);
+ for_each_task(p) {
+ if (!p->mm)
+ continue;
+ *pgd_offset(p->mm,address) = entry;
+ }
+ read_unlock(&tasklist_lock);
+ for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
+ pgd[(address >> PGDIR_SHIFT) & (PTRS_PER_PAGE - 1)] = entry;
}
extern pgd_t swapper_pg_dir[1024];
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov