patch-2.1.106 linux/mm/mlock.c
Next file: linux/mm/mmap.c
Previous file: linux/mm/memory.c
Back to the patch index
Back to the overall index
- Lines: 84
- Date:
Tue Jun 9 11:10:20 1998
- Orig file:
v2.1.105/linux/mm/mlock.c
- Orig date:
Fri May 8 23:14:57 1998
diff -u --recursive --new-file v2.1.105/linux/mm/mlock.c linux/mm/mlock.c
@@ -127,13 +127,25 @@
pages = -pages;
vma->vm_mm->locked_vm += pages;
- if (newflags & VM_LOCKED)
+#if 0
+/*
+ * This is horribly broken. See the comment on the same
+ * brokenness in mm/mmap.c (essentially, this doesn't
+ * work anyway for PROT_NONE and writable pages, and now
+ * that we properly get the mmap semaphore it would just
+ * lock up on us).
+ *
+ * Fix the same way.
+ */
+ if (newflags & VM_LOCKED) {
while (start < end) {
int c;
get_user(c,(int *) start);
__asm__ __volatile__("": :"r" (c));
start += PAGE_SIZE;
}
+ }
+#endif
}
return retval;
}
@@ -192,6 +204,7 @@
unsigned long lock_limit;
int error = -ENOMEM;
+ down(¤t->mm->mmap_sem);
lock_kernel();
len = (len + (start & ~PAGE_MASK) + ~PAGE_MASK) & PAGE_MASK;
start &= PAGE_MASK;
@@ -214,6 +227,7 @@
error = do_mlock(start, len, 1);
out:
unlock_kernel();
+ up(¤t->mm->mmap_sem);
return error;
}
@@ -221,11 +235,13 @@
{
int ret;
+ down(¤t->mm->mmap_sem);
lock_kernel();
len = (len + (start & ~PAGE_MASK) + ~PAGE_MASK) & PAGE_MASK;
start &= PAGE_MASK;
ret = do_mlock(start, len, 0);
unlock_kernel();
+ up(¤t->mm->mmap_sem);
return ret;
}
@@ -263,6 +279,7 @@
unsigned long lock_limit;
int ret = -EINVAL;
+ down(¤t->mm->mmap_sem);
lock_kernel();
if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
goto out;
@@ -282,6 +299,7 @@
ret = do_mlockall(flags);
out:
unlock_kernel();
+ up(¤t->mm->mmap_sem);
return ret;
}
@@ -289,8 +307,10 @@
{
int ret;
+ down(¤t->mm->mmap_sem);
lock_kernel();
ret = do_mlockall(0);
unlock_kernel();
+ up(¤t->mm->mmap_sem);
return ret;
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov