patch-2.4.10 linux/arch/sparc64/kernel/sbus.c
Next file: linux/arch/sparc64/kernel/setup.c
Previous file: linux/arch/sparc64/kernel/ptrace.c
Back to the patch index
Back to the overall index
- Lines: 298
- Date:
Tue Aug 28 07:09:44 2001
- Orig file:
v2.4.9/linux/arch/sparc64/kernel/sbus.c
- Orig date:
Thu May 24 15:00:58 2001
diff -u --recursive --new-file v2.4.9/linux/arch/sparc64/kernel/sbus.c linux/arch/sparc64/kernel/sbus.c
@@ -1,4 +1,4 @@
-/* $Id: sbus.c,v 1.14 2001/05/23 03:06:51 davem Exp $
+/* $Id: sbus.c,v 1.16 2001/08/24 19:36:58 kanoj Exp $
* sbus.c: UltraSparc SBUS controller support.
*
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
@@ -37,7 +37,7 @@
#define ONE_GIG (1UL * 1024UL * 1024UL * 1024UL)
#define CLUSTER_SIZE (ONE_GIG / NCLUSTERS)
#define CLUSTER_MASK (CLUSTER_SIZE - 1)
-#define CLUSTER_NPAGES (CLUSTER_SIZE >> PAGE_SHIFT)
+#define CLUSTER_NPAGES (CLUSTER_SIZE >> IO_PAGE_SHIFT)
#define MAP_BASE ((u32)0xc0000000)
struct sbus_iommu {
@@ -99,7 +99,7 @@
static void iommu_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages)
{
while (npages--)
- upa_writeq(base + (npages << PAGE_SHIFT),
+ upa_writeq(base + (npages << IO_PAGE_SHIFT),
iommu->iommu_regs + IOMMU_FLUSH);
upa_readq(iommu->sbus_control_reg);
}
@@ -120,7 +120,7 @@
{
iommu->strbuf_flushflag = 0UL;
while (npages--)
- upa_writeq(base + (npages << PAGE_SHIFT),
+ upa_writeq(base + (npages << IO_PAGE_SHIFT),
iommu->strbuf_regs + STRBUF_PFLUSH);
/* Whoopee cushion! */
@@ -191,8 +191,8 @@
cnum = 0;
while ((1UL << cnum) < npages)
cnum++;
- ent = (base & CLUSTER_MASK) >> (PAGE_SHIFT + cnum);
- iopte = iommu->page_table + ((base - MAP_BASE) >> PAGE_SHIFT);
+ ent = (base & CLUSTER_MASK) >> (IO_PAGE_SHIFT + cnum);
+ iopte = iommu->page_table + ((base - MAP_BASE) >> IO_PAGE_SHIFT);
iopte_val(*iopte) = 0UL;
/* If the global flush might not have caught this entry,
@@ -235,7 +235,7 @@
static void free_consistent_cluster(struct sbus_iommu *iommu, u32 base, unsigned long npages)
{
- iopte_t *iopte = iommu->page_table + ((base - MAP_BASE) >> PAGE_SHIFT);
+ iopte_t *iopte = iommu->page_table + ((base - MAP_BASE) >> IO_PAGE_SHIFT);
if ((iopte - iommu->page_table) == iommu->lowest_consistent_map) {
iopte_t *walk = iopte + npages;
@@ -266,7 +266,7 @@
if (size <= 0 || sdev == NULL || dvma_addr == NULL)
return NULL;
- size = PAGE_ALIGN(size);
+ size = IO_PAGE_ALIGN(size);
order = get_order(size);
if (order >= 10)
return NULL;
@@ -278,7 +278,7 @@
iommu = sdev->bus->iommu;
spin_lock_irqsave(&iommu->lock, flags);
- iopte = alloc_consistent_cluster(iommu, size >> PAGE_SHIFT);
+ iopte = alloc_consistent_cluster(iommu, size >> IO_PAGE_SHIFT);
if (iopte == NULL) {
spin_unlock_irqrestore(&iommu->lock, flags);
free_pages(first_page, order);
@@ -286,15 +286,15 @@
}
/* Ok, we're committed at this point. */
- *dvma_addr = MAP_BASE + ((iopte - iommu->page_table) << PAGE_SHIFT);
+ *dvma_addr = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);
ret = (void *) first_page;
- npages = size >> PAGE_SHIFT;
+ npages = size >> IO_PAGE_SHIFT;
while (npages--) {
*iopte++ = __iopte(IOPTE_VALID | IOPTE_CACHE | IOPTE_WRITE |
(__pa(first_page) & IOPTE_PAGE));
- first_page += PAGE_SIZE;
+ first_page += IO_PAGE_SIZE;
}
- iommu_flush(iommu, *dvma_addr, size >> PAGE_SHIFT);
+ iommu_flush(iommu, *dvma_addr, size >> IO_PAGE_SHIFT);
spin_unlock_irqrestore(&iommu->lock, flags);
return ret;
@@ -308,7 +308,7 @@
if (size <= 0 || sdev == NULL || cpu == NULL)
return;
- npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
iommu = sdev->bus->iommu;
spin_lock_irq(&iommu->lock);
@@ -333,25 +333,25 @@
BUG();
pbase = (unsigned long) ptr;
- offset = (u32) (pbase & ~PAGE_MASK);
- size = (PAGE_ALIGN(pbase + size) - (pbase & PAGE_MASK));
- pbase = (unsigned long) __pa(pbase & PAGE_MASK);
+ offset = (u32) (pbase & ~IO_PAGE_MASK);
+ size = (IO_PAGE_ALIGN(pbase + size) - (pbase & IO_PAGE_MASK));
+ pbase = (unsigned long) __pa(pbase & IO_PAGE_MASK);
spin_lock_irqsave(&iommu->lock, flags);
- npages = size >> PAGE_SHIFT;
+ npages = size >> IO_PAGE_SHIFT;
iopte = alloc_streaming_cluster(iommu, npages);
if (iopte == NULL)
goto bad;
- dma_base = MAP_BASE + ((iopte - iommu->page_table) << PAGE_SHIFT);
- npages = size >> PAGE_SHIFT;
+ dma_base = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);
+ npages = size >> IO_PAGE_SHIFT;
iopte_bits = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
if (dir != SBUS_DMA_TODEVICE)
iopte_bits |= IOPTE_WRITE;
while (npages--) {
*iopte++ = __iopte(iopte_bits | (pbase & IOPTE_PAGE));
- pbase += PAGE_SIZE;
+ pbase += IO_PAGE_SIZE;
}
- npages = size >> PAGE_SHIFT;
+ npages = size >> IO_PAGE_SHIFT;
spin_unlock_irqrestore(&iommu->lock, flags);
return (dma_base | offset);
@@ -365,14 +365,14 @@
void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t dma_addr, size_t size, int direction)
{
struct sbus_iommu *iommu = sdev->bus->iommu;
- u32 dma_base = dma_addr & PAGE_MASK;
+ u32 dma_base = dma_addr & IO_PAGE_MASK;
unsigned long flags;
- size = (PAGE_ALIGN(dma_addr + size) - dma_base);
+ size = (IO_PAGE_ALIGN(dma_addr + size) - dma_base);
spin_lock_irqsave(&iommu->lock, flags);
- free_streaming_cluster(iommu, dma_base, size >> PAGE_SHIFT);
- strbuf_flush(iommu, dma_base, size >> PAGE_SHIFT);
+ free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT);
+ strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT);
spin_unlock_irqrestore(&iommu->lock, flags);
}
@@ -385,9 +385,9 @@
unsigned long pteval = ~0UL;
u32 dma_npages;
- dma_npages = ((dma_sg->dvma_address & (PAGE_SIZE - 1UL)) +
+ dma_npages = ((dma_sg->dvma_address & (IO_PAGE_SIZE - 1UL)) +
dma_sg->dvma_length +
- ((u32)(PAGE_SIZE - 1UL))) >> PAGE_SHIFT;
+ ((u32)(IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
do {
unsigned long offset;
signed int len;
@@ -402,15 +402,15 @@
tmp = (unsigned long) __pa(sg->address);
len = sg->length;
- if (((tmp ^ pteval) >> PAGE_SHIFT) != 0UL) {
- pteval = tmp & PAGE_MASK;
- offset = tmp & (PAGE_SIZE - 1UL);
+ if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
+ pteval = tmp & IO_PAGE_MASK;
+ offset = tmp & (IO_PAGE_SIZE - 1UL);
break;
}
- if (((tmp ^ (tmp + len - 1UL)) >> PAGE_SHIFT) != 0UL) {
- pteval = (tmp + PAGE_SIZE) & PAGE_MASK;
+ if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
+ pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
offset = 0UL;
- len -= (PAGE_SIZE - (tmp & (PAGE_SIZE - 1UL)));
+ len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
break;
}
sg++;
@@ -419,8 +419,8 @@
pteval = ((pteval & IOPTE_PAGE) | iopte_bits);
while (len > 0) {
*iopte++ = __iopte(pteval);
- pteval += PAGE_SIZE;
- len -= (PAGE_SIZE - offset);
+ pteval += IO_PAGE_SIZE;
+ len -= (IO_PAGE_SIZE - offset);
offset = 0;
dma_npages--;
}
@@ -432,14 +432,14 @@
* adjusting pteval along the way. Stop when we
* detect a page crossing event.
*/
- while ((pteval << (64 - PAGE_SHIFT)) != 0UL &&
+ while ((pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
pteval == __pa(sg->address) &&
((pteval ^
- (__pa(sg->address) + sg->length - 1UL)) >> PAGE_SHIFT) == 0UL) {
+ (__pa(sg->address) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
pteval += sg->length;
sg++;
}
- if ((pteval << (64 - PAGE_SHIFT)) == 0UL)
+ if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
pteval = ~0UL;
} while (dma_npages != 0);
dma_sg++;
@@ -472,7 +472,7 @@
iopte = alloc_streaming_cluster(iommu, npages);
if (iopte == NULL)
goto bad;
- dma_base = MAP_BASE + ((iopte - iommu->page_table) << PAGE_SHIFT);
+ dma_base = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);
/* Normalize DVMA addresses. */
sgtmp = sg;
@@ -516,18 +516,18 @@
return;
}
- dvma_base = sg[0].dvma_address & PAGE_MASK;
+ dvma_base = sg[0].dvma_address & IO_PAGE_MASK;
for (i = 0; i < nents; i++) {
if (sg[i].dvma_length == 0)
break;
}
i--;
- size = PAGE_ALIGN(sg[i].dvma_address + sg[i].dvma_length) - dvma_base;
+ size = IO_PAGE_ALIGN(sg[i].dvma_address + sg[i].dvma_length) - dvma_base;
iommu = sdev->bus->iommu;
spin_lock_irqsave(&iommu->lock, flags);
- free_streaming_cluster(iommu, dvma_base, size >> PAGE_SHIFT);
- strbuf_flush(iommu, dvma_base, size >> PAGE_SHIFT);
+ free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT);
+ strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT);
spin_unlock_irqrestore(&iommu->lock, flags);
}
@@ -536,10 +536,10 @@
struct sbus_iommu *iommu = sdev->bus->iommu;
unsigned long flags;
- size = (PAGE_ALIGN(base + size) - (base & PAGE_MASK));
+ size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK));
spin_lock_irqsave(&iommu->lock, flags);
- strbuf_flush(iommu, base & PAGE_MASK, size >> PAGE_SHIFT);
+ strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT);
spin_unlock_irqrestore(&iommu->lock, flags);
}
@@ -550,16 +550,16 @@
u32 base;
int i;
- base = sg[0].dvma_address & PAGE_MASK;
+ base = sg[0].dvma_address & IO_PAGE_MASK;
for (i = 0; i < nents; i++) {
if (sg[i].dvma_length == 0)
break;
}
i--;
- size = PAGE_ALIGN(sg[i].dvma_address + sg[i].dvma_length) - base;
+ size = IO_PAGE_ALIGN(sg[i].dvma_address + sg[i].dvma_length) - base;
spin_lock_irqsave(&iommu->lock, flags);
- strbuf_flush(iommu, base, size >> PAGE_SHIFT);
+ strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT);
spin_unlock_irqrestore(&iommu->lock, flags);
}
@@ -1123,14 +1123,14 @@
* table (128K ioptes * 8 bytes per iopte). This is
* page order 7 on UltraSparc.
*/
- tsb_base = __get_free_pages(GFP_ATOMIC, 7);
+ tsb_base = __get_free_pages(GFP_ATOMIC, get_order(IO_TSB_SIZE));
if (tsb_base == 0UL) {
prom_printf("sbus_iommu_init: Fatal error, cannot alloc TSB table.\n");
prom_halt();
}
iommu->page_table = (iopte_t *) tsb_base;
- memset(iommu->page_table, 0, (PAGE_SIZE << 7));
+ memset(iommu->page_table, 0, IO_TSB_SIZE);
upa_writeq(control, iommu->iommu_regs + IOMMU_CONTROL);
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)