patch-2.4.6 linux/include/asm-ppc/mmu_context.h
Next file: linux/include/asm-ppc/pgtable.h
Previous file: linux/include/asm-ppc/mmu.h
Back to the patch index
Back to the overall index
- Lines: 205
- Date:
Mon Jul 2 14:34:57 2001
- Orig file:
v2.4.5/linux/include/asm-ppc/mmu_context.h
- Orig date:
Mon May 21 15:02:06 2001
diff -u --recursive --new-file v2.4.5/linux/include/asm-ppc/mmu_context.h linux/include/asm-ppc/mmu_context.h
@@ -1,18 +1,40 @@
/*
- * BK Id: SCCS/s.mmu_context.h 1.9 05/17/01 18:14:25 cort
+ * BK Id: SCCS/s.mmu_context.h 1.12 06/28/01 15:50:17 paulus
*/
-#include <linux/config.h>
-
#ifdef __KERNEL__
#ifndef __PPC_MMU_CONTEXT_H
#define __PPC_MMU_CONTEXT_H
-/* the way contexts are handled on the ppc they are vsid's and
- don't need any special treatment right now.
- perhaps I can defer flushing the tlb by keeping a list of
- zombie vsid/context's and handling that through destroy_context
- later -- Cort
+#include <linux/config.h>
+#include <asm/atomic.h>
+#include <asm/bitops.h>
+#include <asm/mmu.h>
+
+/*
+ * On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs
+ * (virtual segment identifiers) for each context. Although the
+ * hardware supports 24-bit VSIDs, and thus >1 million contexts,
+ * we only use 32,768 of them. That is ample, since there can be
+ * at most around 30,000 tasks in the system anyway, and it means
+ * that we can use a bitmap to indicate which contexts are in use.
+ * Using a bitmap means that we entirely avoid all of the problems
+ * that we used to have when the context number overflowed,
+ * particularly on SMP systems.
+ * -- paulus.
+ */
+/*
+ * This function defines the mapping from contexts to VSIDs (virtual
+ * segment IDs). We use a skew on both the context and the high 4 bits
+ * of the 32-bit virtual address (the "effective segment ID") in order
+ * to spread out the entries in the MMU hash table. Note, if this
+ * function is changed then arch/ppc/mm/hashtable.S will have to be
+ * changed correspondly.
+ */
+#define CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \
+ & 0xffffff)
+
+/*
The MPC8xx has only 16 contexts. We rotate through them on each
task switch. A better way would be to keep track of tasks that
own contexts, and implement an LRU usage. That way very active
@@ -32,38 +54,22 @@
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
{
}
+
#ifdef CONFIG_8xx
#define NO_CONTEXT 16
#define LAST_CONTEXT 15
-#define BASE_CONTEXT (-1)
-#define MUNGE_CONTEXT(n) (n)
-#define flush_hash_segments(X, Y) do { } while (0)
#elif CONFIG_4xx
#define NO_CONTEXT 256
#define LAST_CONTEXT 255
-#define BASE_CONTEXT (0)
-#define MUNGE_CONTEXT(n) (n)
-#define flush_hash_segments(X, Y) do { } while (0)
#else
/* PPC 6xx, 7xx CPUs */
-#define NO_CONTEXT 0
-#define BASE_CONTEXT (0)
-#define LAST_CONTEXT 0xfffff
-
-/*
- * Allocating context numbers this way tends to spread out
- * the entries in the hash table better than a simple linear
- * allocation.
- */
-#define MUNGE_CONTEXT(n) (((n) * 897) & LAST_CONTEXT)
+#define NO_CONTEXT ((mm_context_t) -1)
+#define LAST_CONTEXT 32767
#endif
-extern atomic_t next_mmu_context;
-extern void mmu_context_overflow(void);
-
/*
* Set the current MMU context.
* On 32-bit PowerPCs (other than the 8xx embedded chips), this is done by
@@ -73,19 +79,58 @@
* and once I implement a real TLB context manager this will disappear.
* The PGD is ignored on other processors. - Dan
*/
-extern void set_context(int context, void *pgd);
+extern void set_context(mm_context_t context);
/*
- * Get a new mmu context for task tsk if necessary.
+ * Bitmap of contexts in use.
+ * The size of this bitmap is LAST_CONTEXT + 1 bits.
*/
-#define get_mmu_context(mm) \
-do { \
- if (mm->context == NO_CONTEXT) { \
- if (atomic_read(&next_mmu_context) == LAST_CONTEXT) \
- mmu_context_overflow(); \
- mm->context = MUNGE_CONTEXT(atomic_inc_return(&next_mmu_context));\
- } \
-} while (0)
+extern unsigned long context_map[(LAST_CONTEXT+1) / (8*sizeof(unsigned long))];
+
+/*
+ * This caches the next context number that we expect to be free.
+ * Its use is an optimization only, we can't rely on this context
+ * number to be free, but it usually will be.
+ */
+extern mm_context_t next_mmu_context;
+
+/*
+ * If we don't have sufficient contexts to give one to every task
+ * that could be in the system, we need to be able to steal contexts.
+ * These variables support that.
+ */
+#if LAST_CONTEXT < 30000
+#define FEW_CONTEXTS 1
+extern atomic_t nr_free_contexts;
+extern struct mm_struct *context_mm[LAST_CONTEXT+1];
+extern void steal_context(void);
+#endif
+
+/*
+ * Get a new mmu context for the address space described by `mm'.
+ */
+static inline void get_mmu_context(struct mm_struct *mm)
+{
+ mm_context_t ctx;
+
+ if (mm->context != NO_CONTEXT)
+ return;
+#ifdef FEW_CONTEXTS
+ while (atomic_dec_if_positive(&nr_free_contexts) < 0)
+ steal_context();
+#endif
+ ctx = next_mmu_context;
+ while (test_and_set_bit(ctx, context_map)) {
+ ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
+ if (ctx > LAST_CONTEXT)
+ ctx = 0;
+ }
+ next_mmu_context = (ctx + 1) & LAST_CONTEXT;
+ mm->context = ctx;
+#ifdef FEW_CONTEXTS
+ context_mm[ctx] = mm;
+#endif
+}
/*
* Set up the context for a new address space.
@@ -95,14 +140,23 @@
/*
* We're finished using the context for an address space.
*/
-#define destroy_context(mm) do { } while (0)
+static inline void destroy_context(struct mm_struct *mm)
+{
+ if (mm->context != NO_CONTEXT) {
+ clear_bit(mm->context, context_map);
+ mm->context = NO_CONTEXT;
+#ifdef FEW_CONTEXTS
+ atomic_inc(&nr_free_contexts);
+#endif
+ }
+}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk, int cpu)
{
tsk->thread.pgdir = next->pgd;
get_mmu_context(next);
- set_context(next->context, next->pgd);
+ set_context(next->context);
}
/*
@@ -113,16 +167,8 @@
{
current->thread.pgdir = mm->pgd;
get_mmu_context(mm);
- set_context(mm->context, mm->pgd);
+ set_context(mm->context);
}
-/*
- * compute the vsid from the context and segment
- * segments > 7 are kernel segments and their
- * vsid is the segment -- Cort
- */
-#define VSID_FROM_CONTEXT(segment,context) \
- ((segment < 8) ? ((segment) | (context)<<4) : (segment))
-
-#endif
+#endif /* __PPC_MMU_CONTEXT_H */
#endif /* __KERNEL__ */
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)