patch-2.4.20 linux-2.4.20/mm/memory.c
Next file: linux-2.4.20/mm/mmap.c
Previous file: linux-2.4.20/mm/highmem.c
Back to the patch index
Back to the overall index
- Lines: 35
- Date:
Thu Nov 28 15:53:15 2002
- Orig file:
linux-2.4.19/mm/memory.c
- Orig date:
Fri Aug 2 17:39:46 2002
diff -urN linux-2.4.19/mm/memory.c linux-2.4.20/mm/memory.c
@@ -146,7 +146,6 @@
void clear_page_tables(struct mm_struct *mm, unsigned long first, int nr)
{
pgd_t * page_dir = mm->pgd;
- unsigned long last = first + nr;
spin_lock(&mm->page_table_lock);
page_dir += first;
@@ -156,8 +155,6 @@
} while (--nr);
spin_unlock(&mm->page_table_lock);
- flush_tlb_pgtables(mm, first * PGDIR_SIZE, last * PGDIR_SIZE);
-
/* keep the page table cache within bounds */
check_pgt_cache();
}
@@ -589,6 +586,8 @@
* occurs, the number of bytes read into memory may be less than the
* size of the kiobuf, so we have to stop marking pages dirty once the
* requested byte count has been reached.
+ *
+ * Must be called from process context - set_page_dirty() takes VFS locks.
*/
void mark_dirty_kiobuf(struct kiobuf *iobuf, int bytes)
@@ -606,7 +605,7 @@
page = iobuf->maplist[index];
if (!PageReserved(page))
- SetPageDirty(page);
+ set_page_dirty(page);
remaining -= (PAGE_SIZE - offset);
offset = 0;
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)