patch-2.4.23 linux-2.4.23/include/asm-ppc/io.h
Next file: linux-2.4.23/include/asm-ppc/ipc.h
Previous file: linux-2.4.23/include/asm-ppc/ibm4xx.h
Back to the patch index
Back to the overall index
- Lines: 122
- Date:
2003-11-28 10:26:21.000000000 -0800
- Orig file:
linux-2.4.22/include/asm-ppc/io.h
- Orig date:
2003-08-25 04:44:44.000000000 -0700
diff -urN linux-2.4.22/include/asm-ppc/io.h linux-2.4.23/include/asm-ppc/io.h
@@ -3,7 +3,9 @@
#define _PPC_IO_H
#include <linux/config.h>
+#include <linux/mm.h>
#include <linux/types.h>
+#include <asm/mmu.h>
#include <asm/page.h>
#include <asm/byteorder.h>
@@ -40,6 +42,7 @@
extern unsigned long isa_io_base;
extern unsigned long isa_mem_base;
extern unsigned long pci_dram_offset;
+
#define readb(addr) in_8((volatile u8 *)(addr))
#define writeb(b,addr) out_8((volatile u8 *)(addr), (b))
#if defined(CONFIG_APUS)
@@ -195,14 +198,15 @@
* Map in an area of physical address space, for accessing
* I/O devices etc.
*/
-extern void *__ioremap(unsigned long address, unsigned long size,
+extern void *__ioremap(phys_addr_t address, unsigned long size,
unsigned long flags);
-extern void *ioremap(unsigned long address, unsigned long size);
+extern void *ioremap(phys_addr_t address, unsigned long size);
+extern void *ioremap64(unsigned long long address, unsigned long size);
#define ioremap_nocache(addr, size) ioremap((addr), (size))
extern void iounmap(void *addr);
extern unsigned long iopa(unsigned long addr);
extern unsigned long mm_ptov(unsigned long addr) __attribute__ ((const));
-extern void io_block_mapping(unsigned long virt, unsigned long phys,
+extern void io_block_mapping(unsigned long virt, phys_addr_t phys,
unsigned int size, int flags);
/*
@@ -223,10 +227,7 @@
*/
extern inline unsigned long virt_to_bus(volatile void * address)
{
-#if defined(CONFIG_APUS) || defined(CONFIG_8xx) || defined(CONFIG_4xx)
- /* I think everyone will be using this version if we start allowing
- * uncached pages in alternate virtual spaces. -- Dan
- */
+#ifdef CONFIG_APUS
return (iopa((unsigned long) address) + PCI_DRAM_OFFSET);
#else
if (address == (void *)0)
@@ -237,7 +238,7 @@
extern inline void * bus_to_virt(unsigned long address)
{
-#if defined(CONFIG_APUS) || defined(CONFIG_8xx) || defined(CONFIG_40x)
+#ifdef CONFIG_APUS
return (void*) mm_ptov (address - PCI_DRAM_OFFSET);
#else
if (address == 0)
@@ -252,7 +253,7 @@
*/
extern inline unsigned long virt_to_phys(volatile void * address)
{
-#if defined(CONFIG_APUS) || defined(CONFIG_8xx) || defined(CONFIG_40x)
+#ifdef CONFIG_APUS
return iopa ((unsigned long) address);
#else
return (unsigned long) address - KERNELBASE;
@@ -261,7 +262,7 @@
extern inline void * phys_to_virt(unsigned long address)
{
-#if defined(CONFIG_APUS) || defined(CONFIG_8xx) || defined(CONFIG_40x)
+#ifdef CONFIG_APUS
return (void*) mm_ptov (address);
#else
return (void *) (address + KERNELBASE);
@@ -401,11 +402,43 @@
return 0;
}
-/* Nothing to do */
+#ifdef CONFIG_NOT_COHERENT_CACHE
+
+/*
+ * DMA-consistent mapping functions for PowerPCs that don't support
+ * cache snooping. These allocate/free a region of uncached mapped
+ * memory space for use with DMA devices. Alternatively, you could
+ * allocate the space "normally" and use the cache management functions
+ * to ensure it is consistent.
+ */
+extern void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle);
+extern void consistent_free(void *vaddr);
+extern void consistent_sync(void *vaddr, size_t size, int rw);
+extern void consistent_sync_page(struct page *page, unsigned long offset,
+ size_t size, int rw);
+
+#define dma_cache_inv(_start,_size) \
+ invalidate_dcache_range(_start, (_start + _size))
+#define dma_cache_wback(_start,_size) \
+ clean_dcache_range(_start, (_start + _size))
+#define dma_cache_wback_inv(_start,_size) \
+ flush_dcache_range(_start, (_start + _size))
+
+#else /* ! CONFIG_NOT_COHERENT_CACHE */
+
+/*
+ * Cache coherent cores.
+ */
#define dma_cache_inv(_start,_size) do { } while (0)
#define dma_cache_wback(_start,_size) do { } while (0)
#define dma_cache_wback_inv(_start,_size) do { } while (0)
-#endif
+#define consistent_alloc(gfp, size, handle) NULL
+#define consistent_free(addr, size) do { } while (0)
+#define consistent_sync(addr, size, rw) do { } while (0)
+#define consistent_sync_page(pg, off, sz, rw) do { } while (0)
+
+#endif /* CONFIG_NOT_COHERENT_CACHE */
+#endif /* _PPC_IO_H */
#endif /* __KERNEL__ */
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)