patch-2.1.110 linux/arch/alpha/lib/io.c
Next file: linux/arch/alpha/lib/memset.S
Previous file: linux/arch/alpha/kernel/smp.c
Back to the patch index
Back to the overall index
- Lines: 189
- Date:
Sun Jul 19 20:44:41 1998
- Orig file:
v2.1.109/linux/arch/alpha/lib/io.c
- Orig date:
Tue Aug 20 06:57:15 1996
diff -u --recursive --new-file v2.1.109/linux/arch/alpha/lib/io.c linux/arch/alpha/lib/io.c
@@ -3,7 +3,7 @@
* files..
*/
#include <linux/kernel.h>
-
+#include <linux/types.h>
#include <asm/io.h>
/*
@@ -70,6 +70,10 @@
return __readl(addr);
}
+unsigned long _readq(unsigned long addr)
+{
+ return __readq(addr);
+}
void _writeb(unsigned char b, unsigned long addr)
{
@@ -86,6 +90,11 @@
__writel(b, addr);
}
+void _writeq(unsigned long b, unsigned long addr)
+{
+ __writeq(b, addr);
+}
+
/*
* Read COUNT 8-bit bytes from port PORT into memory starting at
* SRC.
@@ -363,12 +372,48 @@
* Copy data from IO memory space to "real" memory space.
* This needs to be optimized.
*/
-void _memcpy_fromio(void * to, unsigned long from, unsigned long count)
+void _memcpy_fromio(void * to, unsigned long from, long count)
{
- while (count) {
+ /* Optimize co-aligned transfers. Everything else gets handled
+ a byte at a time. */
+
+ if (count >= 8 && ((long)to & 7) == (from & 7)) {
+ count -= 8;
+ do {
+ *(u64 *)to = readq(from);
+ count -= 8;
+ to += 8;
+ from += 8;
+ } while (count >= 0);
+ count += 8;
+ }
+
+ if (count >= 4 && ((long)to & 3) == (from & 3)) {
+ count -= 4;
+ do {
+ *(u32 *)to = readl(from);
+ count -= 4;
+ to += 4;
+ from += 4;
+ } while (count >= 0);
+ count += 4;
+ }
+
+ if (count >= 2 && ((long)to & 1) == (from & 1)) {
+ count -= 2;
+ do {
+ *(u16 *)to = readw(from);
+ count -= 2;
+ to += 2;
+ from += 2;
+ } while (count >= 0);
+ count += 2;
+ }
+
+ while (count > 0) {
+ *(u8 *) to = readb(from);
count--;
- *(char *) to = readb(from);
- ((char *) to)++;
+ to++;
from++;
}
}
@@ -377,13 +422,49 @@
* Copy data from "real" memory space to IO memory space.
* This needs to be optimized.
*/
-void _memcpy_toio(unsigned long to, void * from, unsigned long count)
+void _memcpy_toio(unsigned long to, void * from, long count)
{
- while (count) {
+ /* Optimize co-aligned transfers. Everything else gets handled
+ a byte at a time. */
+
+ if (count >= 8 && (to & 7) == ((long)from & 7)) {
+ count -= 8;
+ do {
+ writeq(*(u64 *)from, to);
+ count -= 8;
+ to += 8;
+ from += 8;
+ } while (count >= 0);
+ count += 8;
+ }
+
+ if (count >= 4 && (to & 3) == ((long)from & 3)) {
+ count -= 4;
+ do {
+ writel(*(u32 *)from, to);
+ count -= 4;
+ to += 4;
+ from += 4;
+ } while (count >= 0);
+ count += 4;
+ }
+
+ if (count >= 2 && (to & 1) == ((long)from & 1)) {
+ count -= 2;
+ do {
+ writew(*(u16 *)from, to);
+ count -= 2;
+ to += 2;
+ from += 2;
+ } while (count >= 0);
+ count += 2;
+ }
+
+ while (count > 0) {
+ writeb(*(u8 *) from, to);
count--;
- writeb(*(char *) from, to);
- ((char *) from)++;
to++;
+ from++;
}
}
@@ -391,11 +472,45 @@
* "memset" on IO memory space.
* This needs to be optimized.
*/
-void _memset_io(unsigned long dst, int c, unsigned long count)
+void _memset_c_io(unsigned long to, unsigned long c, long count)
{
- while (count) {
+ if (count > 0 && (to & 1)) {
+ writeb(c, to);
+ to++;
count--;
- writeb(c, dst);
- dst++;
+ }
+ if (count >= 2 && (to & 2)) {
+ writew(c, to);
+ to += 2;
+ count -= 2;
+ }
+ if (count >= 4 && (to & 4)) {
+ writel(c, to);
+ to += 4;
+ count -= 4;
+ }
+ if ((to & 7) == 0) {
+ count -= 8;
+ while (count >= 0) {
+ writeq(c, to);
+ to += 8;
+ count -= 8;
+ }
+ count += 8;
+ }
+ if (count >= 4 && (to & 4)) {
+ writel(c, to);
+ to += 4;
+ count -= 4;
+ }
+ if (count >= 2 && (to & 2)) {
+ writew(c, to);
+ to += 2;
+ count -= 2;
+ }
+ while (count > 0) {
+ writeb(c, to);
+ count--;
+ to++;
}
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov