patch-2.4.22 linux-2.4.22/arch/mips/mm/pg-sb1.c
Next file: linux-2.4.22/arch/mips/mm/r5k-sc.c
Previous file: linux-2.4.22/arch/mips/mm/pg-rm7k.c
Back to the patch index
Back to the overall index
- Lines: 181
- Date:
2003-08-25 04:44:40.000000000 -0700
- Orig file:
linux-2.4.21/arch/mips/mm/pg-sb1.c
- Orig date:
2002-11-28 15:53:10.000000000 -0800
diff -urN linux-2.4.21/arch/mips/mm/pg-sb1.c linux-2.4.22/arch/mips/mm/pg-sb1.c
@@ -1,9 +1,10 @@
/*
* Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
* Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org)
- * Copyright (C) 2000 Sibyte
+ * Copyright (C) 2000 SiByte, Inc.
*
- * Written by Justin Carlson (carlson@sibyte.com)
+ * Written by Justin Carlson of SiByte, Inc.
+ * and Kip Walker of Broadcom Corp.
*
*
* This program is free software; you can redistribute it and/or
@@ -20,8 +21,16 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+
#include <linux/config.h>
-#include <asm/page.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+
+#include <asm/io.h>
+#include <asm/sibyte/sb1250.h>
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/sb1250_dma.h>
+#include <asm/sibyte/64bit.h>
#ifdef CONFIG_SB1_PASS_1_WORKAROUNDS
#define SB1_PREF_LOAD_STREAMED_HINT "0"
@@ -34,13 +43,15 @@
/* These are the functions hooked by the memory management function pointers */
void sb1_clear_page(void *page)
{
- /* JDCXXX - This should be bottlenecked by the write buffer, but these
- things tend to be mildly unpredictable...should check this on the
- performance model */
-
- /* We prefetch 4 lines ahead. We're also "cheating" slightly here...
- since we know we're on an SB1, we force the assembler to take
- 64-bit operands to speed things up */
+ /*
+ * JDCXXX - This should be bottlenecked by the write buffer, but these
+ * things tend to be mildly unpredictable...should check this on the
+ * performance model
+ *
+ * We prefetch 4 lines ahead. We're also "cheating" slightly here...
+ * since we know we're on an SB1, we force the assembler to take
+ * 64-bit operands to speed things up
+ */
__asm__ __volatile__(
".set push \n"
".set noreorder \n"
@@ -63,22 +74,20 @@
" bne $1, %0, 1b \n"
" addiu %0, %0, 32 \n" /* Next cacheline (This instruction better be short piped!) */
".set pop \n"
- :"=r" (page)
- :"0" (page),
- "I" (PAGE_SIZE-32)
- :"$1","memory");
+ : "=r" (page)
+ : "0" (page), "I" (PAGE_SIZE-32)
+ : "memory");
}
void sb1_copy_page(void *to, void *from)
{
-
- /* This should be optimized in assembly...can't use ld/sd, though,
+ /*
+ * This should be optimized in assembly...can't use ld/sd, though,
* because the top 32 bits could be nuked if we took an interrupt
* during the routine. And this is not a good place to be cli()'ing
- */
-
- /* The pref's used here are using "streaming" hints, which cause the
+ *
+ * The pref's used here are using "streaming" hints, which cause the
* copied data to be kicked out of the cache sooner. A page copy often
* ends up copying a lot more data than is commonly used, so this seems
* to make sense in terms of reducing cache pollution, but I've no real
@@ -125,19 +134,82 @@
" bne $1, %0, 1b \n"
" addiu %0, %0, 32 \n" /* Next cacheline */
".set pop \n"
- :"=r" (to),
- "=r" (from)
- :
- "0" (from),
- "1" (to),
- "I" (PAGE_SIZE-32)
- :"$1","$2","$3","$4","$5","$6","$7","$8","$9","memory");
+ : "=r" (to), "=r" (from)
+ : "0" (from), "1" (to), "I" (PAGE_SIZE-32)
+ : "$2","$3","$4","$5","$6","$7","$8","$9","memory");
+}
+
+
+#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
+
/*
- unsigned long *src = from;
- unsigned long *dest = to;
- unsigned long *target = (unsigned long *) (((unsigned long)src) + PAGE_SIZE);
- while (src != target) {
- *dest++ = *src++;
- }
-*/
+ * Pad descriptors to cacheline, since each is exclusively owned by a
+ * particular CPU.
+ */
+typedef struct dmadscr_s {
+ uint64_t dscr_a;
+ uint64_t dscr_b;
+ uint64_t pad_a;
+ uint64_t pad_b;
+} dmadscr_t;
+
+static dmadscr_t page_descr[NR_CPUS] __attribute__((aligned(SMP_CACHE_BYTES)));
+
+void sb1_dma_init(void)
+{
+ int cpu = smp_processor_id();
+ uint64_t base_val = PHYSADDR(&page_descr[cpu]) | V_DM_DSCR_BASE_RINGSZ(1);
+
+ out64(base_val,
+ IO_SPACE_BASE + A_DM_REGISTER(cpu, R_DM_DSCR_BASE));
+ out64(base_val | M_DM_DSCR_BASE_RESET,
+ IO_SPACE_BASE + A_DM_REGISTER(cpu, R_DM_DSCR_BASE));
+ out64(base_val | M_DM_DSCR_BASE_ENABL,
+ IO_SPACE_BASE + A_DM_REGISTER(cpu, R_DM_DSCR_BASE));
+}
+
+void sb1_clear_page_dma(void *page)
+{
+ int cpu = smp_processor_id();
+
+ /* if the page is above Kseg0, use old way */
+ if (KSEGX(page) != K0BASE)
+ return sb1_clear_page(page);
+
+ page_descr[cpu].dscr_a = PHYSADDR(page) | M_DM_DSCRA_ZERO_MEM | M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT;
+ page_descr[cpu].dscr_b = V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
+ out64(1, IO_SPACE_BASE + A_DM_REGISTER(cpu, R_DM_DSCR_COUNT));
+
+ /*
+ * Don't really want to do it this way, but there's no
+ * reliable way to delay completion detection.
+ */
+ while (!(in64(IO_SPACE_BASE + A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)) & M_DM_DSCR_BASE_INTERRUPT))
+ ;
+ in64(IO_SPACE_BASE + A_DM_REGISTER(cpu, R_DM_DSCR_BASE));
}
+
+void sb1_copy_page_dma(void *to, void *from)
+{
+ unsigned long from_phys = PHYSADDR(from);
+ unsigned long to_phys = PHYSADDR(to);
+ int cpu = smp_processor_id();
+
+ /* if either page is above Kseg0, use old way */
+ if ((KSEGX(to) != K0BASE) || (KSEGX(from) != K0BASE))
+ return sb1_copy_page(to, from);
+
+ page_descr[cpu].dscr_a = PHYSADDR(to_phys) | M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT;
+ page_descr[cpu].dscr_b = PHYSADDR(from_phys) | V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
+ out64(1, IO_SPACE_BASE + A_DM_REGISTER(cpu, R_DM_DSCR_COUNT));
+
+ /*
+ * Don't really want to do it this way, but there's no
+ * reliable way to delay completion detection.
+ */
+ while (!(in64(IO_SPACE_BASE + A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)) & M_DM_DSCR_BASE_INTERRUPT))
+ ;
+ in64(IO_SPACE_BASE + A_DM_REGISTER(cpu, R_DM_DSCR_BASE));
+}
+
+#endif
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)