patch-2.4.6 linux/drivers/char/agp/agpgart_be.c
Next file: linux/drivers/char/agp/agpgart_fe.c
Previous file: linux/drivers/char/agp/agp.h
Back to the patch index
Back to the overall index
- Lines: 673
- Date:
Mon Jul 2 15:27:56 2001
- Orig file:
v2.4.5/linux/drivers/char/agp/agpgart_be.c
- Orig date:
Tue Feb 13 14:13:43 2001
diff -u --recursive --new-file v2.4.5/linux/drivers/char/agp/agpgart_be.c linux/drivers/char/agp/agpgart_be.c
@@ -67,7 +67,7 @@
{
#if defined(__i386__)
asm volatile ("wbinvd":::"memory");
-#elif defined(__alpha__) || defined(__ia64__)
+#elif defined(__alpha__) || defined(__ia64__) || defined(__sparc__)
/* ??? I wonder if we'll really need to flush caches, or if the
core logic can manage to keep the system coherent. The ARM
speaks only of using `cflush' to get things in memory in
@@ -2189,6 +2189,621 @@
#endif /* CONFIG_AGP_ALI */
+#ifdef CONFIG_AGP_SWORKS
+typedef struct _serverworks_page_map {
+ unsigned long *real;
+ unsigned long *remapped;
+} serverworks_page_map;
+
+static struct _serverworks_private {
+ struct pci_dev *svrwrks_dev; /* device one */
+ volatile u8 *registers;
+ serverworks_page_map **gatt_pages;
+ int num_tables;
+ serverworks_page_map scratch_dir;
+
+ int gart_addr_ofs;
+ int mm_addr_ofs;
+} serverworks_private;
+
+static int serverworks_create_page_map(serverworks_page_map *page_map)
+{
+ int i;
+
+ page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
+ if (page_map->real == NULL) {
+ return -ENOMEM;
+ }
+ set_bit(PG_reserved, &virt_to_page(page_map->real)->flags);
+ CACHE_FLUSH();
+ page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
+ PAGE_SIZE);
+ if (page_map->remapped == NULL) {
+ clear_bit(PG_reserved,
+ &virt_to_page(page_map->real)->flags);
+ free_page((unsigned long) page_map->real);
+ page_map->real = NULL;
+ return -ENOMEM;
+ }
+ CACHE_FLUSH();
+
+ for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
+ page_map->remapped[i] = agp_bridge.scratch_page;
+ }
+
+ return 0;
+}
+
+static void serverworks_free_page_map(serverworks_page_map *page_map)
+{
+ iounmap(page_map->remapped);
+ clear_bit(PG_reserved,
+ &virt_to_page(page_map->real)->flags);
+ free_page((unsigned long) page_map->real);
+}
+
+static void serverworks_free_gatt_pages(void)
+{
+ int i;
+ serverworks_page_map **tables;
+ serverworks_page_map *entry;
+
+ tables = serverworks_private.gatt_pages;
+ for(i = 0; i < serverworks_private.num_tables; i++) {
+ entry = tables[i];
+ if (entry != NULL) {
+ if (entry->real != NULL) {
+ serverworks_free_page_map(entry);
+ }
+ kfree(entry);
+ }
+ }
+ kfree(tables);
+}
+
+static int serverworks_create_gatt_pages(int nr_tables)
+{
+ serverworks_page_map **tables;
+ serverworks_page_map *entry;
+ int retval = 0;
+ int i;
+
+ tables = kmalloc((nr_tables + 1) * sizeof(serverworks_page_map *),
+ GFP_KERNEL);
+ if (tables == NULL) {
+ return -ENOMEM;
+ }
+ memset(tables, 0, sizeof(serverworks_page_map *) * (nr_tables + 1));
+ for (i = 0; i < nr_tables; i++) {
+ entry = kmalloc(sizeof(serverworks_page_map), GFP_KERNEL);
+ if (entry == NULL) {
+ retval = -ENOMEM;
+ break;
+ }
+ memset(entry, 0, sizeof(serverworks_page_map));
+ tables[i] = entry;
+ retval = serverworks_create_page_map(entry);
+ if (retval != 0) break;
+ }
+ serverworks_private.num_tables = nr_tables;
+ serverworks_private.gatt_pages = tables;
+
+ if (retval != 0) serverworks_free_gatt_pages();
+
+ return retval;
+}
+
+#define SVRWRKS_GET_GATT(addr) (serverworks_private.gatt_pages[\
+ GET_PAGE_DIR_IDX(addr)]->remapped)
+
+#ifndef GET_PAGE_DIR_OFF
+#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
+#endif
+
+#ifndef GET_PAGE_DIR_IDX
+#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
+ GET_PAGE_DIR_OFF(agp_bridge.gart_bus_addr))
+#endif
+
+#ifndef GET_GATT_OFF
+#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
+#endif
+
+static int serverworks_create_gatt_table(void)
+{
+ aper_size_info_lvl2 *value;
+ serverworks_page_map page_dir;
+ int retval;
+ u32 temp;
+ int i;
+
+ value = A_SIZE_LVL2(agp_bridge.current_size);
+ retval = serverworks_create_page_map(&page_dir);
+ if (retval != 0) {
+ return retval;
+ }
+ retval = serverworks_create_page_map(&serverworks_private.scratch_dir);
+ if (retval != 0) {
+ serverworks_free_page_map(&page_dir);
+ return retval;
+ }
+ /* Create a fake scratch directory */
+ for(i = 0; i < 1024; i++) {
+ serverworks_private.scratch_dir.remapped[i] = (unsigned long) agp_bridge.scratch_page;
+ page_dir.remapped[i] =
+ virt_to_bus(serverworks_private.scratch_dir.real);
+ page_dir.remapped[i] |= 0x00000001;
+ }
+
+ retval = serverworks_create_gatt_pages(value->num_entries / 1024);
+ if (retval != 0) {
+ serverworks_free_page_map(&page_dir);
+ return retval;
+ }
+
+ agp_bridge.gatt_table_real = page_dir.real;
+ agp_bridge.gatt_table = page_dir.remapped;
+ agp_bridge.gatt_bus_addr = virt_to_bus(page_dir.real);
+
+ /* Get the address for the gart region.
+ * This is a bus address even on the alpha, b/c its
+ * used to program the agp master not the cpu
+ */
+
+ pci_read_config_dword(agp_bridge.dev,
+ serverworks_private.gart_addr_ofs,
+ &temp);
+ agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+ /* Calculate the agp offset */
+
+ for(i = 0; i < value->num_entries / 1024; i++) {
+ page_dir.remapped[i] =
+ virt_to_bus(serverworks_private.gatt_pages[i]->real);
+ page_dir.remapped[i] |= 0x00000001;
+ }
+
+ return 0;
+}
+
+static int serverworks_free_gatt_table(void)
+{
+ serverworks_page_map page_dir;
+
+ page_dir.real = agp_bridge.gatt_table_real;
+ page_dir.remapped = agp_bridge.gatt_table;
+
+ serverworks_free_gatt_pages();
+ serverworks_free_page_map(&page_dir);
+ return 0;
+}
+
+static int serverworks_fetch_size(void)
+{
+ int i;
+ u32 temp;
+ u32 temp2;
+ aper_size_info_lvl2 *values;
+
+ values = A_SIZE_LVL2(agp_bridge.aperture_sizes);
+ pci_read_config_dword(agp_bridge.dev,
+ serverworks_private.gart_addr_ofs,
+ &temp);
+ pci_write_config_dword(agp_bridge.dev,
+ serverworks_private.gart_addr_ofs,
+ 0xfe000000);
+ pci_read_config_dword(agp_bridge.dev,
+ serverworks_private.gart_addr_ofs,
+ &temp2);
+ pci_write_config_dword(agp_bridge.dev,
+ serverworks_private.gart_addr_ofs,
+ temp);
+ temp2 &= SVWRKS_SIZE_MASK;
+
+ for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
+ if (temp2 == values[i].size_value) {
+ agp_bridge.previous_size =
+ agp_bridge.current_size = (void *) (values + i);
+
+ agp_bridge.aperture_size_idx = i;
+ return values[i].size;
+ }
+ }
+
+ return 0;
+}
+
+static int serverworks_configure(void)
+{
+ aper_size_info_lvl2 *current_size;
+ u32 temp;
+ u8 enable_reg;
+ u8 cap_ptr;
+ u32 cap_id;
+ u16 cap_reg;
+
+ current_size = A_SIZE_LVL2(agp_bridge.current_size);
+
+ /* Get the memory mapped registers */
+ pci_read_config_dword(agp_bridge.dev,
+ serverworks_private.mm_addr_ofs,
+ &temp);
+ temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+ serverworks_private.registers = (volatile u8 *) ioremap(temp, 4096);
+
+ OUTREG8(serverworks_private.registers, SVWRKS_GART_CACHE, 0x0a);
+
+ OUTREG32(serverworks_private.registers, SVWRKS_GATTBASE,
+ agp_bridge.gatt_bus_addr);
+
+ cap_reg = INREG16(serverworks_private.registers, SVWRKS_COMMAND);
+ cap_reg &= ~0x0007;
+ cap_reg |= 0x4;
+ OUTREG16(serverworks_private.registers, SVWRKS_COMMAND, cap_reg);
+
+ pci_read_config_byte(serverworks_private.svrwrks_dev,
+ SVWRKS_AGP_ENABLE, &enable_reg);
+ enable_reg |= 0x1; /* Agp Enable bit */
+ pci_write_config_byte(serverworks_private.svrwrks_dev,
+ SVWRKS_AGP_ENABLE, enable_reg);
+ agp_bridge.tlb_flush(NULL);
+
+ pci_read_config_byte(serverworks_private.svrwrks_dev, 0x34, &cap_ptr);
+ if (cap_ptr != 0x00) {
+ do {
+ pci_read_config_dword(serverworks_private.svrwrks_dev,
+ cap_ptr, &cap_id);
+
+ if ((cap_id & 0xff) != 0x02)
+ cap_ptr = (cap_id >> 8) & 0xff;
+ }
+ while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
+ }
+ agp_bridge.capndx = cap_ptr;
+
+ /* Fill in the mode register */
+ pci_read_config_dword(serverworks_private.svrwrks_dev,
+ agp_bridge.capndx + 4,
+ &agp_bridge.mode);
+
+ pci_read_config_byte(agp_bridge.dev,
+ SVWRKS_CACHING,
+ &enable_reg);
+ enable_reg &= ~0x3;
+ pci_write_config_byte(agp_bridge.dev,
+ SVWRKS_CACHING,
+ enable_reg);
+
+ pci_read_config_byte(agp_bridge.dev,
+ SVWRKS_FEATURE,
+ &enable_reg);
+ enable_reg |= (1<<6);
+ pci_write_config_byte(agp_bridge.dev,
+ SVWRKS_FEATURE,
+ enable_reg);
+
+ return 0;
+}
+
+static void serverworks_cleanup(void)
+{
+ iounmap((void *) serverworks_private.registers);
+}
+
+/*
+ * This routine could be implemented by taking the addresses
+ * written to the GATT, and flushing them individually. However
+ * currently it just flushes the whole table. Which is probably
+ * more efficent, since agp_memory blocks can be a large number of
+ * entries.
+ */
+
+static void serverworks_tlbflush(agp_memory * temp)
+{
+ unsigned long end;
+
+ OUTREG8(serverworks_private.registers, SVWRKS_POSTFLUSH, 0x01);
+ end = jiffies + 3*HZ;
+ while(INREG8(serverworks_private.registers,
+ SVWRKS_POSTFLUSH) == 0x01) {
+ if((signed)(end - jiffies) <= 0) {
+ printk(KERN_ERR "Posted write buffer flush took more"
+ "then 3 seconds\n");
+ }
+ }
+ OUTREG32(serverworks_private.registers, SVWRKS_DIRFLUSH, 0x00000001);
+ end = jiffies + 3*HZ;
+ while(INREG32(serverworks_private.registers,
+ SVWRKS_DIRFLUSH) == 0x00000001) {
+ if((signed)(end - jiffies) <= 0) {
+ printk(KERN_ERR "TLB flush took more"
+ "then 3 seconds\n");
+ }
+ }
+}
+
+static unsigned long serverworks_mask_memory(unsigned long addr, int type)
+{
+ /* Only type 0 is supported by the serverworks chipsets */
+
+ return addr | agp_bridge.masks[0].mask;
+}
+
+static int serverworks_insert_memory(agp_memory * mem,
+ off_t pg_start, int type)
+{
+ int i, j, num_entries;
+ unsigned long *cur_gatt;
+ unsigned long addr;
+
+ num_entries = A_SIZE_LVL2(agp_bridge.current_size)->num_entries;
+
+ if (type != 0 || mem->type != 0) {
+ return -EINVAL;
+ }
+ if ((pg_start + mem->page_count) > num_entries) {
+ return -EINVAL;
+ }
+
+ j = pg_start;
+ while (j < (pg_start + mem->page_count)) {
+ addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr;
+ cur_gatt = SVRWRKS_GET_GATT(addr);
+ if (!PGE_EMPTY(cur_gatt[GET_GATT_OFF(addr)])) {
+ return -EBUSY;
+ }
+ j++;
+ }
+
+ if (mem->is_flushed == FALSE) {
+ CACHE_FLUSH();
+ mem->is_flushed = TRUE;
+ }
+
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr;
+ cur_gatt = SVRWRKS_GET_GATT(addr);
+ cur_gatt[GET_GATT_OFF(addr)] = mem->memory[i];
+ }
+ agp_bridge.tlb_flush(mem);
+ return 0;
+}
+
+static int serverworks_remove_memory(agp_memory * mem, off_t pg_start,
+ int type)
+{
+ int i;
+ unsigned long *cur_gatt;
+ unsigned long addr;
+
+ if (type != 0 || mem->type != 0) {
+ return -EINVAL;
+ }
+
+ CACHE_FLUSH();
+ agp_bridge.tlb_flush(mem);
+
+ for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ addr = (i * PAGE_SIZE) + agp_bridge.gart_bus_addr;
+ cur_gatt = SVRWRKS_GET_GATT(addr);
+ cur_gatt[GET_GATT_OFF(addr)] =
+ (unsigned long) agp_bridge.scratch_page;
+ }
+
+ agp_bridge.tlb_flush(mem);
+ return 0;
+}
+
+static gatt_mask serverworks_masks[] =
+{
+ {0x00000001, 0}
+};
+
+static aper_size_info_lvl2 serverworks_sizes[7] =
+{
+ {2048, 524288, 0x80000000},
+ {1024, 262144, 0xc0000000},
+ {512, 131072, 0xe0000000},
+ {256, 65536, 0xf0000000},
+ {128, 32768, 0xf8000000},
+ {64, 16384, 0xfc000000},
+ {32, 8192, 0xfe000000}
+};
+
+static void serverworks_agp_enable(u32 mode)
+{
+ struct pci_dev *device = NULL;
+ u32 command, scratch, cap_id;
+ u8 cap_ptr;
+
+ pci_read_config_dword(serverworks_private.svrwrks_dev,
+ agp_bridge.capndx + 4,
+ &command);
+
+ /*
+ * PASS1: go throu all devices that claim to be
+ * AGP devices and collect their data.
+ */
+
+ while ((device = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8,
+ device)) != NULL) {
+ pci_read_config_dword(device, 0x04, &scratch);
+
+ if (!(scratch & 0x00100000))
+ continue;
+
+ pci_read_config_byte(device, 0x34, &cap_ptr);
+
+ if (cap_ptr != 0x00) {
+ do {
+ pci_read_config_dword(device,
+ cap_ptr, &cap_id);
+
+ if ((cap_id & 0xff) != 0x02)
+ cap_ptr = (cap_id >> 8) & 0xff;
+ }
+ while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
+ }
+ if (cap_ptr != 0x00) {
+ /*
+ * Ok, here we have a AGP device. Disable impossible
+ * settings, and adjust the readqueue to the minimum.
+ */
+
+ pci_read_config_dword(device, cap_ptr + 4, &scratch);
+
+ /* adjust RQ depth */
+ command =
+ ((command & ~0xff000000) |
+ min((mode & 0xff000000),
+ min((command & 0xff000000),
+ (scratch & 0xff000000))));
+
+ /* disable SBA if it's not supported */
+ if (!((command & 0x00000200) &&
+ (scratch & 0x00000200) &&
+ (mode & 0x00000200)))
+ command &= ~0x00000200;
+
+ /* disable FW */
+ command &= ~0x00000010;
+
+ command &= ~0x00000008;
+
+ if (!((command & 4) &&
+ (scratch & 4) &&
+ (mode & 4)))
+ command &= ~0x00000004;
+
+ if (!((command & 2) &&
+ (scratch & 2) &&
+ (mode & 2)))
+ command &= ~0x00000002;
+
+ if (!((command & 1) &&
+ (scratch & 1) &&
+ (mode & 1)))
+ command &= ~0x00000001;
+ }
+ }
+ /*
+ * PASS2: Figure out the 4X/2X/1X setting and enable the
+ * target (our motherboard chipset).
+ */
+
+ if (command & 4) {
+ command &= ~3; /* 4X */
+ }
+ if (command & 2) {
+ command &= ~5; /* 2X */
+ }
+ if (command & 1) {
+ command &= ~6; /* 1X */
+ }
+ command |= 0x00000100;
+
+ pci_write_config_dword(serverworks_private.svrwrks_dev,
+ agp_bridge.capndx + 8,
+ command);
+
+ /*
+ * PASS3: Go throu all AGP devices and update the
+ * command registers.
+ */
+
+ while ((device = pci_find_class(PCI_CLASS_DISPLAY_VGA << 8,
+ device)) != NULL) {
+ pci_read_config_dword(device, 0x04, &scratch);
+
+ if (!(scratch & 0x00100000))
+ continue;
+
+ pci_read_config_byte(device, 0x34, &cap_ptr);
+
+ if (cap_ptr != 0x00) {
+ do {
+ pci_read_config_dword(device,
+ cap_ptr, &cap_id);
+
+ if ((cap_id & 0xff) != 0x02)
+ cap_ptr = (cap_id >> 8) & 0xff;
+ }
+ while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00));
+ }
+ if (cap_ptr != 0x00)
+ pci_write_config_dword(device, cap_ptr + 8, command);
+ }
+}
+
+static int __init serverworks_setup (struct pci_dev *pdev)
+{
+ u32 temp;
+ u32 temp2;
+
+ serverworks_private.svrwrks_dev = pdev;
+
+ agp_bridge.masks = serverworks_masks;
+ agp_bridge.num_of_masks = 1;
+ agp_bridge.aperture_sizes = (void *) serverworks_sizes;
+ agp_bridge.size_type = LVL2_APER_SIZE;
+ agp_bridge.num_aperture_sizes = 7;
+ agp_bridge.dev_private_data = (void *) &serverworks_private;
+ agp_bridge.needs_scratch_page = TRUE;
+ agp_bridge.configure = serverworks_configure;
+ agp_bridge.fetch_size = serverworks_fetch_size;
+ agp_bridge.cleanup = serverworks_cleanup;
+ agp_bridge.tlb_flush = serverworks_tlbflush;
+ agp_bridge.mask_memory = serverworks_mask_memory;
+ agp_bridge.agp_enable = serverworks_agp_enable;
+ agp_bridge.cache_flush = global_cache_flush;
+ agp_bridge.create_gatt_table = serverworks_create_gatt_table;
+ agp_bridge.free_gatt_table = serverworks_free_gatt_table;
+ agp_bridge.insert_memory = serverworks_insert_memory;
+ agp_bridge.remove_memory = serverworks_remove_memory;
+ agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
+ agp_bridge.free_by_type = agp_generic_free_by_type;
+ agp_bridge.agp_alloc_page = agp_generic_alloc_page;
+ agp_bridge.agp_destroy_page = agp_generic_destroy_page;
+
+ pci_read_config_dword(agp_bridge.dev,
+ SVWRKS_APSIZE,
+ &temp);
+
+ serverworks_private.gart_addr_ofs = 0x10;
+
+ if(temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ pci_read_config_dword(agp_bridge.dev,
+ SVWRKS_APSIZE + 4,
+ &temp2);
+ if(temp2 != 0) {
+ printk("Detected 64 bit aperture address, but top "
+ "bits are not zero. Disabling agp\n");
+ return -ENODEV;
+ }
+ serverworks_private.mm_addr_ofs = 0x18;
+ } else {
+ serverworks_private.mm_addr_ofs = 0x14;
+ }
+
+ pci_read_config_dword(agp_bridge.dev,
+ serverworks_private.mm_addr_ofs,
+ &temp);
+ if(temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ pci_read_config_dword(agp_bridge.dev,
+ serverworks_private.mm_addr_ofs + 4,
+ &temp2);
+ if(temp2 != 0) {
+ printk("Detected 64 bit MMIO address, but top "
+ "bits are not zero. Disabling agp\n");
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_AGP_SWORKS */
+
/* per-chipset initialization data.
* note -- all chipsets for a single vendor MUST be grouped together
@@ -2585,6 +3200,41 @@
}
}
#endif /* CONFIG_AGP_I810 */
+
+#ifdef CONFIG_AGP_SWORKS
+ /* Everything is on func 1 here so we are hardcoding function one */
+ if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS) {
+ struct pci_dev *bridge_dev;
+
+ bridge_dev = pci_find_slot ((unsigned int)dev->bus->number,
+ PCI_DEVFN(0, 1));
+ if(bridge_dev == NULL) {
+ printk(KERN_INFO PFX "agpgart: Detected a Serverworks "
+ "Chipset, but could not find the secondary "
+ "device.\n");
+ return -ENODEV;
+ }
+
+ switch (dev->device) {
+ case PCI_DEVICE_ID_SERVERWORKS_HE:
+ agp_bridge.type = SVWRKS_HE;
+ return serverworks_setup(bridge_dev);
+
+ case PCI_DEVICE_ID_SERVERWORKS_LE:
+ case 0x0007:
+ agp_bridge.type = SVWRKS_LE;
+ return serverworks_setup(bridge_dev);
+
+ default:
+ if(agp_try_unsupported) {
+ agp_bridge.type = SVWRKS_GENERIC;
+ return serverworks_setup(bridge_dev);
+ }
+ break;
+ }
+ }
+
+#endif /* CONFIG_AGP_SWORKS */
/* find capndx */
pci_read_config_dword(dev, 0x04, &scratch);
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)