patch-2.4.10 linux/mm/numa.c
Next file: linux/mm/oom_kill.c
Previous file: linux/mm/mremap.c
Back to the patch index
Back to the overall index
- Lines: 29
- Date:
Mon Sep 17 16:15:02 2001
- Orig file:
v2.4.9/linux/mm/numa.c
- Orig date:
Tue Jul 3 17:08:22 2001
diff -u --recursive --new-file v2.4.9/linux/mm/numa.c linux/mm/numa.c
@@ -31,7 +31,7 @@
#endif /* !CONFIG_DISCONTIGMEM */
-struct page * alloc_pages_node(int nid, int gfp_mask, unsigned long order)
+struct page * alloc_pages_node(int nid, unsigned int gfp_mask, unsigned int order)
{
#ifdef CONFIG_NUMA
return __alloc_pages(gfp_mask, order, NODE_DATA(nid)->node_zonelists + (gfp_mask & GFP_ZONEMASK));
@@ -82,8 +82,8 @@
memset(pgdat->valid_addr_bitmap, 0, size);
}
-static struct page * alloc_pages_pgdat(pg_data_t *pgdat, int gfp_mask,
- unsigned long order)
+static struct page * alloc_pages_pgdat(pg_data_t *pgdat, unsigned int gfp_mask,
+ unsigned int order)
{
return __alloc_pages(gfp_mask, order, pgdat->node_zonelists + (gfp_mask & GFP_ZONEMASK));
}
@@ -92,7 +92,7 @@
* This can be refined. Currently, tries to do round robin, instead
* should do concentratic circle search, starting from current node.
*/
-struct page * _alloc_pages(unsigned int gfp_mask, unsigned long order)
+struct page * _alloc_pages(unsigned int gfp_mask, unsigned int order)
{
struct page *ret = 0;
pg_data_t *start, *temp;
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)