patch-2.4.23 linux-2.4.23/arch/ia64/mm/init.c
Next file: linux-2.4.23/arch/ia64/mm/numa.c
Previous file: linux-2.4.23/arch/ia64/mm/discontig.c
Back to the patch index
Back to the overall index
- Lines: 450
- Date:
2003-11-28 10:26:19.000000000 -0800
- Orig file:
linux-2.4.22/arch/ia64/mm/init.c
- Orig date:
2003-08-25 04:44:39.000000000 -0700
diff -urN linux-2.4.22/arch/ia64/mm/init.c linux-2.4.23/arch/ia64/mm/init.c
@@ -16,12 +16,14 @@
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/efi.h>
+#include <linux/mmzone.h>
#include <asm/bitops.h>
#include <asm/dma.h>
#include <asm/ia32.h>
#include <asm/io.h>
#include <asm/machvec.h>
+#include <asm/numa.h>
#include <asm/pgalloc.h>
#include <asm/sal.h>
#include <asm/system.h>
@@ -34,11 +36,14 @@
extern char _stext, _etext, _edata, __init_begin, __init_end;
extern void ia64_tlb_init (void);
+extern int filter_rsvd_memory (unsigned long, unsigned long, void *);
+/* Note - may be changed by platform_setup */
unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
#define LARGE_GAP 0x40000000 /* Use virtual mem map if a hole is > than this */
-static unsigned long totalram_pages;
+static unsigned long totalram_pages, reserved_pages;
+struct page *zero_page_memmap_ptr; /* map entry for zero page */
unsigned long vmalloc_end = VMALLOC_END_INIT;
@@ -63,6 +68,16 @@
return freed;
}
+inline void
+ia64_set_rbs_bot (void)
+{
+ unsigned long stack_size = current->rlim[RLIMIT_STACK].rlim_max & -16;
+
+ if (stack_size > MAX_USER_STACK_SIZE)
+ stack_size = MAX_USER_STACK_SIZE;
+ current->thread.rbs_bot = STACK_TOP - stack_size;
+}
+
/*
* This performs some platform-dependent address space initialization.
* On IA-64, we want to setup the VM area for the register backing
@@ -74,6 +89,8 @@
{
struct vm_area_struct *vma;
+ ia64_set_rbs_bot();
+
/*
* If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
* the problem. When the process attempts to write to the register backing store
@@ -82,7 +99,7 @@
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (vma) {
vma->vm_mm = current->mm;
- vma->vm_start = IA64_RBS_BOT;
+ vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
vma->vm_end = vma->vm_start + PAGE_SIZE;
vma->vm_page_prot = PAGE_COPY;
vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE|VM_GROWSUP;
@@ -110,10 +127,11 @@
void
free_initmem (void)
{
- unsigned long addr;
+ unsigned long addr, eaddr;
- addr = (unsigned long) &__init_begin;
- for (; addr < (unsigned long) &__init_end; addr += PAGE_SIZE) {
+ addr = (unsigned long) ia64_imva(&__init_begin);
+ eaddr = (unsigned long) ia64_imva(&__init_end);
+ for (; addr < eaddr; addr += PAGE_SIZE) {
clear_bit(PG_reserved, &virt_to_page((void *)addr)->flags);
set_page_count(virt_to_page((void *)addr), 1);
free_page(addr);
@@ -189,58 +207,39 @@
void
show_mem(void)
{
- int i, total = 0, reserved = 0;
- int shared = 0, cached = 0;
+ int i, reserved;
+ int shared, cached;
+ pg_data_t *pgdat;
+ char *tchar = (numnodes > 1) ? "\t" : "";
printk("Mem-info:\n");
show_free_areas();
-#ifdef CONFIG_DISCONTIGMEM
- {
- pg_data_t *pgdat = pgdat_list;
-
- printk("Free swap: %6dkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
- do {
- printk("Node ID: %d\n", pgdat->node_id);
- for(i = 0; i < pgdat->node_size; i++) {
- if (PageReserved(pgdat->node_mem_map+i))
- reserved++;
- else if (PageSwapCache(pgdat->node_mem_map+i))
- cached++;
- else if (page_count(pgdat->node_mem_map + i))
- shared += page_count(pgdat->node_mem_map + i) - 1;
- }
- printk("\t%d pages of RAM\n", pgdat->node_size);
- printk("\t%d reserved pages\n", reserved);
- printk("\t%d pages shared\n", shared);
- printk("\t%d pages swap cached\n", cached);
- pgdat = pgdat->node_next;
- } while (pgdat);
- printk("Total of %ld pages in page table cache\n", pgtable_cache_size);
- show_buffers();
- printk("%d free buffer pages\n", nr_free_buffer_pages());
- }
-#else /* !CONFIG_DISCONTIGMEM */
printk("Free swap: %6dkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
- i = max_mapnr;
- while (i-- > 0) {
- if (!VALID_PAGE(mem_map + i))
- continue;
- total++;
- if (PageReserved(mem_map+i))
- reserved++;
- else if (PageSwapCache(mem_map+i))
- cached++;
- else if (page_count(mem_map + i))
- shared += page_count(mem_map + i) - 1;
+ for_each_pgdat(pgdat) {
+ reserved=0;
+ cached=0;
+ shared=0;
+ if (numnodes > 1)
+ printk("Node ID: %d\n", pgdat->node_id);
+ for(i = 0; i < pgdat->node_size; i++) {
+ if (!VALID_PAGE(pgdat->node_mem_map+i))
+ continue;
+ if (PageReserved(pgdat->node_mem_map+i))
+ reserved++;
+ else if (PageSwapCache(pgdat->node_mem_map+i))
+ cached++;
+ else if (page_count(pgdat->node_mem_map + i))
+ shared += page_count(pgdat->node_mem_map + i) - 1;
+ }
+ printk("%s%ld pages of RAM\n", tchar, pgdat->node_size);
+ printk("%s%d reserved pages\n", tchar, reserved);
+ printk("%s%d pages shared\n", tchar, shared);
+ printk("%s%d pages swap cached\n", tchar, cached);
}
- printk("%d pages of RAM\n", total);
- printk("%d reserved pages\n", reserved);
- printk("%d pages shared\n", shared);
- printk("%d pages swap cached\n", cached);
- printk("%ld pages in page table cache\n", pgtable_cache_size);
+ printk("Total of %ld pages in page table cache\n", pgtable_cache_size);
show_buffers();
-#endif /* !CONFIG_DISCONTIGMEM */
+ printk("%d free buffer pages\n", nr_free_buffer_pages());
}
/*
@@ -360,8 +359,10 @@
static int
create_mem_map_page_table (u64 start, u64 end, void *arg)
{
- unsigned long address, start_page, end_page;
+ unsigned long address, start_page, end_page, next_blk_page;
+ unsigned long blk_start;
struct page *map_start, *map_end;
+ int node=0;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
@@ -374,18 +375,35 @@
start_page = (unsigned long) map_start & PAGE_MASK;
end_page = PAGE_ALIGN((unsigned long) map_end);
+ /* force the first iteration to get node id */
+ blk_start = start;
+ next_blk_page = 0;
+
for (address = start_page; address < end_page; address += PAGE_SIZE) {
+
+ /* if we went across a node boundary, get new nid */
+ if (address >= next_blk_page) {
+ struct page *map_next_blk;
+
+ node = paddr_to_nid(__pa(blk_start));
+
+ /* get end addr of this memblk as next blk_start */
+ blk_start = (unsigned long) __va(min(end, memblk_endpaddr(__pa(blk_start))));
+ map_next_blk = vmem_map + MAP_NR_DENSE(blk_start);
+ next_blk_page = PAGE_ALIGN((unsigned long) map_next_blk);
+ }
+
pgd = pgd_offset_k(address);
if (pgd_none(*pgd))
- pgd_populate(&init_mm, pgd, alloc_bootmem_pages(PAGE_SIZE));
+ pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
pmd = pmd_offset(pgd, address);
if (pmd_none(*pmd))
- pmd_populate(&init_mm, pmd, alloc_bootmem_pages(PAGE_SIZE));
+ pmd_populate(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
pte = pte_offset(pmd, address);
if (pte_none(*pte))
- set_pte(pte, mk_pte_phys(__pa(alloc_bootmem_pages(PAGE_SIZE)),
+ set_pte(pte, mk_pte_phys(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)),
PAGE_KERNEL));
}
return 0;
@@ -399,6 +417,14 @@
int highmem;
};
+struct memmap_count_callback_data {
+ int node;
+ unsigned long num_physpages;
+ unsigned long num_dma_physpages;
+ unsigned long min_pfn;
+ unsigned long max_pfn;
+} cdata;
+
static int
virtual_memmap_init (u64 start, u64 end, void *arg)
{
@@ -454,17 +480,7 @@
efi_memmap_walk(virtual_memmap_init, &args);
}
- return page_to_phys(end);
-}
-
-static int
-count_dma_pages (u64 start, u64 end, void *arg)
-{
- unsigned long *count = arg;
-
- if (end <= MAX_DMA_ADDRESS)
- *count += (end - start) >> PAGE_SHIFT;
- return 0;
+ return page_to_phys(end-1) + PAGE_SIZE;;
}
int
@@ -475,16 +491,27 @@
return __get_user(byte, (char *) page) == 0;
}
+#define GRANULEROUNDDOWN(n) ((n) & ~(IA64_GRANULE_SIZE-1))
+#define GRANULEROUNDUP(n) (((n)+IA64_GRANULE_SIZE-1) & ~(IA64_GRANULE_SIZE-1))
+#define ORDERROUNDDOWN(n) ((n) & ~((PAGE_SIZE<<MAX_ORDER)-1))
static int
-count_pages (u64 start, u64 end, void *arg)
+count_pages (u64 start, u64 end, int node)
{
- unsigned long *count = arg;
-
- *count += (end - start) >> PAGE_SHIFT;
+ start = __pa(start);
+ end = __pa(end);
+ if (node == cdata.node) {
+ cdata.num_physpages += (end - start) >> PAGE_SHIFT;
+ if (start <= __pa(MAX_DMA_ADDRESS))
+ cdata.num_dma_physpages += (min(end, __pa(MAX_DMA_ADDRESS)) - start) >> PAGE_SHIFT;
+ start = GRANULEROUNDDOWN(__pa(start));
+ start = ORDERROUNDDOWN(start);
+ end = GRANULEROUNDUP(__pa(end));
+ cdata.max_pfn = max(cdata.max_pfn, end >> PAGE_SHIFT);
+ cdata.min_pfn = min(cdata.min_pfn, start >> PAGE_SHIFT);
+ }
return 0;
}
-#ifndef CONFIG_DISCONTIGMEM
static int
find_largest_hole(u64 start, u64 end, void *arg)
{
@@ -498,7 +525,6 @@
last_end = end;
return 0;
}
-#endif
/*
* Set up the page tables.
@@ -509,73 +535,76 @@
unsigned long max_dma;
unsigned long zones_size[MAX_NR_ZONES];
unsigned long zholes_size[MAX_NR_ZONES];
-#ifndef CONFIG_DISCONTIGMEM
unsigned long max_gap;
-#endif
+ int node;
/* initialize mem_map[] */
- memset(zones_size, 0, sizeof(zones_size));
- memset(zholes_size, 0, sizeof(zholes_size));
-
- num_physpages = 0;
- efi_memmap_walk(count_pages, &num_physpages);
-
- num_dma_physpages = 0;
- efi_memmap_walk(count_dma_pages, &num_dma_physpages);
-
max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
-
- if (max_low_pfn < max_dma) {
- zones_size[ZONE_DMA] = max_low_pfn;
- zholes_size[ZONE_DMA] = max_low_pfn - num_dma_physpages;
- } else {
- zones_size[ZONE_DMA] = max_dma;
- zholes_size[ZONE_DMA] = max_dma - num_dma_physpages;
- if (num_physpages > num_dma_physpages) {
- zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
- zholes_size[ZONE_NORMAL] = (max_low_pfn - max_dma)
- - (num_physpages - num_dma_physpages);
- }
- }
-
-#ifdef CONFIG_DISCONTIGMEM
- free_area_init_node(0, NULL, NULL, zones_size, 0, zholes_size);
-#else
max_gap = 0;
efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
- if (max_gap < LARGE_GAP) {
- vmem_map = (struct page *)0;
- free_area_init_node(0, NULL, NULL, zones_size, 0, zholes_size);
+ for (node=0; node < numnodes; node++) {
+ memset(zones_size, 0, sizeof(zones_size));
+ memset(zholes_size, 0, sizeof(zholes_size));
+ memset(&cdata, 0, sizeof(cdata));
+
+ cdata.node = node;
+ cdata.min_pfn = ~0;
+
+ efi_memmap_walk(filter_rsvd_memory, count_pages);
+ num_dma_physpages += cdata.num_dma_physpages;
+ num_physpages += cdata.num_physpages;
+
+ if (cdata.min_pfn >= max_dma) {
+ zones_size[ZONE_NORMAL] = cdata.max_pfn - cdata.min_pfn;
+ zholes_size[ZONE_NORMAL] = cdata.max_pfn - cdata.min_pfn - cdata.num_physpages;
+ } else if (cdata.max_pfn < max_dma) {
+ zones_size[ZONE_DMA] = cdata.max_pfn - cdata.min_pfn;
+ zholes_size[ZONE_DMA] = cdata.max_pfn - cdata.min_pfn - cdata.num_dma_physpages;
+ } else {
+ zones_size[ZONE_DMA] = max_dma - cdata.min_pfn;
+ zholes_size[ZONE_DMA] = zones_size[ZONE_DMA] - cdata.num_dma_physpages;
+ zones_size[ZONE_NORMAL] = cdata.max_pfn - max_dma;
+ zholes_size[ZONE_NORMAL] = zones_size[ZONE_NORMAL] - (cdata.num_physpages - cdata.num_dma_physpages);
+ }
+
+ if (numnodes == 1 && max_gap < LARGE_GAP) {
+ vmem_map = (struct page *)0;
+ zones_size[ZONE_DMA] += cdata.min_pfn;
+ zholes_size[ZONE_DMA] += cdata.min_pfn;
+ free_area_init_core(0, NODE_DATA(node), &mem_map, zones_size, 0, zholes_size, NULL);
+ } else {
+
+ /* allocate virtual mem_map */
+
+ if (node == 0) {
+ unsigned long map_size;
+ map_size = PAGE_ALIGN(max_low_pfn*sizeof(struct page));
+ vmalloc_end -= map_size;
+ mem_map = vmem_map = (struct page *) vmalloc_end;
+ efi_memmap_walk(create_mem_map_page_table, 0);
+ printk(KERN_INFO "Virtual mem_map starts at 0x%p\n", mem_map);
+ }
+
+ free_area_init_node(node, NODE_DATA(node), vmem_map+cdata.min_pfn, zones_size,
+ cdata.min_pfn<<PAGE_SHIFT, zholes_size);
+ }
}
- else {
- unsigned long map_size;
- /* allocate virtual mem_map */
-
- map_size = PAGE_ALIGN(max_low_pfn*sizeof(struct page));
- vmalloc_end -= map_size;
- vmem_map = (struct page *) vmalloc_end;
- efi_memmap_walk(create_mem_map_page_table, 0);
-
- free_area_init_node(0, NULL, vmem_map, zones_size, 0, zholes_size);
- printk(KERN_INFO "Virtual mem_map starts at 0x%p\n", mem_map);
- }
-#endif
+ zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
}
static int
count_reserved_pages (u64 start, u64 end, void *arg)
{
unsigned long num_reserved = 0;
- unsigned long *count = arg;
struct page *pg;
for (pg = virt_to_page((void *)start); pg < virt_to_page((void *)end); ++pg)
if (PageReserved(pg))
++num_reserved;
- *count += num_reserved;
+ reserved_pages += num_reserved;
return 0;
}
@@ -583,8 +612,10 @@
mem_init (void)
{
extern char __start_gate_section[];
- long reserved_pages, codesize, datasize, initsize;
+ long codesize, datasize, initsize;
unsigned long num_pgt_pages;
+ pg_data_t *pgdat;
+
#ifdef CONFIG_PCI
/*
@@ -601,10 +632,11 @@
max_mapnr = max_low_pfn;
high_memory = __va(max_low_pfn * PAGE_SIZE);
- totalram_pages += free_all_bootmem();
+ for_each_pgdat(pgdat)
+ totalram_pages += free_all_bootmem_node(pgdat);
reserved_pages = 0;
- efi_memmap_walk(count_reserved_pages, &reserved_pages);
+ efi_memmap_walk(filter_rsvd_memory, count_reserved_pages);
codesize = (unsigned long) &_etext - (unsigned long) &_stext;
datasize = (unsigned long) &_edata - (unsigned long) &_etext;
@@ -629,7 +661,7 @@
pgt_cache_water[1] = num_pgt_pages;
/* install the gate page in the global page table: */
- put_gate_page(virt_to_page(__start_gate_section), GATE_ADDR);
+ put_gate_page(virt_to_page(ia64_imva(__start_gate_section)), GATE_ADDR);
#ifdef CONFIG_IA32_SUPPORT
ia32_gdt_init();
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)