patch-2.0.32 linux/fs/locks.c
Next file: linux/fs/ncpfs/sock.c
Previous file: linux/fs/inode.c
Back to the patch index
Back to the overall index
- Lines: 368
- Date:
Fri Nov 7 15:35:04 1997
- Orig file:
v2.0.31/linux/fs/locks.c
- Orig date:
Mon Sep 1 11:20:42 1997
diff -u --recursive --new-file v2.0.31/linux/fs/locks.c linux/fs/locks.c
@@ -130,8 +130,9 @@
struct task_struct *blocked_task);
static void posix_remove_locks(struct file_lock **before, struct task_struct *task);
static void flock_remove_locks(struct file_lock **before, struct file *filp);
-
-static struct file_lock *locks_alloc_lock(struct file_lock *fl);
+static struct file_lock *locks_empty_lock(void);
+static struct file_lock *locks_init_lock(struct file_lock *,
+ struct file_lock *);
static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl);
static void locks_delete_lock(struct file_lock **thisfl_p, unsigned int wait);
static char *lock_get_status(struct file_lock *fl, int id, char *pfx);
@@ -142,6 +143,15 @@
static struct file_lock *file_lock_table = NULL;
+/* Allocate a new lock, and initialize its fields from fl.
+ * The lock is not inserted into any lists until locks_insert_lock() or
+ * locks_insert_block() are called.
+ */
+static inline struct file_lock *locks_alloc_lock(struct file_lock *fl)
+{
+ return locks_init_lock(locks_empty_lock(), fl);
+}
+
/* Free lock not inserted in any queue.
*/
static inline void locks_free_lock(struct file_lock *fl)
@@ -281,6 +291,7 @@
if (!filp->f_inode || !posix_make_lock(filp, &file_lock, &flock))
return (-EINVAL);
+ flock.l_type = F_UNLCK;
for (fl = filp->f_inode->i_flock; fl != NULL; fl = fl->fl_next) {
if (!(fl->fl_flags & FL_POSIX))
break;
@@ -291,12 +302,10 @@
fl->fl_end - fl->fl_start + 1;
flock.l_whence = 0;
flock.l_type = fl->fl_type;
- memcpy_tofs(l, &flock, sizeof(flock));
- return (0);
+ break;
}
}
- flock.l_type = F_UNLCK; /* no conflict found */
memcpy_tofs(l, &flock, sizeof(flock));
return (0);
}
@@ -327,6 +336,11 @@
if (!(inode = filp->f_inode))
return (-EINVAL);
+ /*
+ * This might block, so we do it before checking the inode.
+ */
+ memcpy_fromfs(&flock, l, sizeof(flock));
+
/* Don't allow mandatory locks on files that may be memory mapped
* and shared.
*/
@@ -341,7 +355,6 @@
} while (vma != inode->i_mmap);
}
- memcpy_fromfs(&flock, l, sizeof(flock));
if (!posix_make_lock(filp, &file_lock, &flock))
return (-EINVAL);
@@ -708,19 +721,38 @@
unsigned int wait)
{
struct file_lock *fl;
- struct file_lock *new_fl;
+ struct file_lock *new_fl = NULL;
struct file_lock **before;
- int change = 0;
+ int error;
+ int change;
+ int unlock = (caller->fl_type == F_UNLCK);
+
+ /*
+ * If we need a new lock, get it in advance to avoid races.
+ */
+ if (!unlock) {
+ error = -ENOLCK;
+ new_fl = locks_alloc_lock(caller);
+ if (!new_fl)
+ goto out;
+ }
+
+
+ error = 0;
+search:
+ change = 0;
before = &filp->f_inode->i_flock;
- if ((fl = *before) && (fl->fl_flags & FL_POSIX))
- return (-EBUSY);
+ if ((fl = *before) && (fl->fl_flags & FL_POSIX)) {
+ error = -EBUSY;
+ goto out;
+ }
while ((fl = *before) != NULL) {
if (caller->fl_file == fl->fl_file) {
if (caller->fl_type == fl->fl_type)
- return (0);
+ goto out;
change = 1;
break;
}
@@ -729,50 +761,48 @@
/* change means that we are changing the type of an existing lock, or
* or else unlocking it.
*/
- if (change)
- locks_delete_lock(before, caller->fl_type != F_UNLCK);
- if (caller->fl_type == F_UNLCK)
- return (0);
- if ((new_fl = locks_alloc_lock(caller)) == NULL)
- return (-ENOLCK);
-repeat:
- if ((fl = filp->f_inode->i_flock) && (fl->fl_flags & FL_POSIX)) {
- locks_free_lock(new_fl);
- return (-EBUSY);
+ if (change) {
+ /* N.B. What if the wait argument is false? */
+ locks_delete_lock(before, !unlock);
+ /*
+ * If we waited, another lock may have been added ...
+ */
+ if (!unlock)
+ goto search;
}
+ if (unlock)
+ goto out;
+
+repeat:
+ /* Check signals each time we start */
+ error = -ERESTARTSYS;
+ if (current->signal & ~current->blocked)
+ goto out;
+ error = -EBUSY;
+ if ((fl = filp->f_inode->i_flock) && (fl->fl_flags & FL_POSIX))
+ goto out;
while (fl != NULL) {
if (!flock_locks_conflict(new_fl, fl)) {
fl = fl->fl_next;
continue;
}
- if (!wait) {
- locks_free_lock(new_fl);
- return (-EAGAIN);
- }
- if (current->signal & ~current->blocked) {
- /* Note: new_fl is not in any queue at this
- * point, so we must use locks_free_lock()
- * instead of locks_delete_lock()
- * Dmitry Gorodchanin 09/02/96.
- */
- locks_free_lock(new_fl);
- return (-ERESTARTSYS);
- }
+ error = -EAGAIN;
+ if (!wait)
+ goto out;
locks_insert_block(fl, new_fl);
interruptible_sleep_on(&new_fl->fl_wait);
locks_delete_block(fl, new_fl);
- if (current->signal & ~current->blocked) {
- /* Awakened by a signal. Free the new
- * lock and return an error.
- */
- locks_free_lock(new_fl);
- return (-ERESTARTSYS);
- }
goto repeat;
}
locks_insert_lock(&filp->f_inode->i_flock, new_fl);
- return (0);
+ new_fl = NULL;
+ error = 0;
+
+out:
+ if (new_fl)
+ locks_free_lock(new_fl);
+ return (error);
}
/* Add a POSIX style lock to a file.
@@ -791,44 +821,62 @@
unsigned int wait)
{
struct file_lock *fl;
- struct file_lock *new_fl;
+ struct file_lock *new_fl, *new_fl2;
struct file_lock *left = NULL;
struct file_lock *right = NULL;
struct file_lock **before;
+ int error;
int added = 0;
+ /*
+ * We may need two file_lock structures for this operation,
+ * so we get them in advance to avoid races.
+ */
+ new_fl = locks_empty_lock();
+ new_fl2 = locks_empty_lock();
+ error = -ENOLCK; /* "no luck" */
+ if (!(new_fl && new_fl2))
+ goto out;
+
if (caller->fl_type != F_UNLCK) {
repeat:
+ error = -EBUSY;
if ((fl = filp->f_inode->i_flock) && (fl->fl_flags & FL_FLOCK))
- return (-EBUSY);
+ goto out;
while (fl != NULL) {
if (!posix_locks_conflict(caller, fl)) {
fl = fl->fl_next;
continue;
}
+ error = -EAGAIN;
if (!wait)
- return (-EAGAIN);
- if (current->signal & ~current->blocked)
- return (-ERESTARTSYS);
+ goto out;
+ error = -EDEADLK;
if (posix_locks_deadlock(caller->fl_owner, fl->fl_owner))
- return (-EDEADLK);
+ goto out;
+ error = -ERESTARTSYS;
+ if (current->signal & ~current->blocked)
+ goto out;
locks_insert_block(fl, caller);
interruptible_sleep_on(&caller->fl_wait);
locks_delete_block(fl, caller);
- if (current->signal & ~current->blocked)
- return (-ERESTARTSYS);
goto repeat;
}
}
- /* Find the first old lock with the same owner as the new lock.
+ /*
+ * We've allocated the new locks in advance, so there are no
+ * errors possible (and no blocking operations) from here on.
+ *
+ * Find the first old lock with the same owner as the new lock.
*/
before = &filp->f_inode->i_flock;
+ error = -EBUSY;
if ((*before != NULL) && ((*before)->fl_flags & FL_FLOCK))
- return (-EBUSY);
+ goto out;
/* First skip locks owned by other processes.
*/
@@ -916,25 +964,23 @@
before = &fl->fl_next;
}
+ error = 0;
if (!added) {
if (caller->fl_type == F_UNLCK)
- return (0);
- if ((new_fl = locks_alloc_lock(caller)) == NULL)
- return (-ENOLCK);
+ goto out;
+ locks_init_lock(new_fl, caller);
locks_insert_lock(before, new_fl);
+ new_fl = NULL;
}
if (right) {
if (left == right) {
- /* The new lock breaks the old one in two pieces, so we
- * have to allocate one more lock (in this case, even
- * F_UNLCK may fail!).
+ /* The new lock breaks the old one in two pieces,
+ * so we have to use the second new lock (in this
+ * case, even F_UNLCK may fail!).
*/
- if ((left = locks_alloc_lock(right)) == NULL) {
- if (!added)
- locks_delete_lock(before, 0);
- return (-ENOLCK);
- }
+ left = locks_init_lock(new_fl2, right);
locks_insert_lock(before, left);
+ new_fl2 = NULL;
}
right->fl_start = caller->fl_end + 1;
locks_wake_up_blocks(right, 0);
@@ -943,32 +989,44 @@
left->fl_end = caller->fl_start - 1;
locks_wake_up_blocks(left, 0);
}
- return (0);
+out:
+ /*
+ * Free any unused locks. (They haven't
+ * ever been used, so we use kfree().)
+ */
+ if (new_fl)
+ kfree(new_fl);
+ if (new_fl2)
+ kfree(new_fl2);
+ return error;
}
-/* Allocate new lock.
- * Initialize its fields from fl. The lock is not inserted into any
- * lists until locks_insert_lock() or locks_insert_block() are called.
+/*
+ * Allocate an empty lock structure. We can use GFP_KERNEL now that
+ * all allocations are done in advance.
*/
-static struct file_lock *locks_alloc_lock(struct file_lock *fl)
+static struct file_lock *locks_empty_lock(void)
{
- struct file_lock *tmp;
-
- /* Okay, let's make a new file_lock structure... */
- if ((tmp = (struct file_lock *)kmalloc(sizeof(struct file_lock),
- GFP_ATOMIC)) == NULL)
- return (tmp);
-
- memset(tmp, 0, sizeof(*tmp));
-
- tmp->fl_flags = fl->fl_flags;
- tmp->fl_owner = fl->fl_owner;
- tmp->fl_file = fl->fl_file;
- tmp->fl_type = fl->fl_type;
- tmp->fl_start = fl->fl_start;
- tmp->fl_end = fl->fl_end;
+ return ((struct file_lock *) kmalloc(sizeof(struct file_lock),
+ GFP_KERNEL));
+}
- return (tmp);
+/*
+ * Initialize a new lock from an existing file_lock structure.
+ */
+static struct file_lock *locks_init_lock(struct file_lock *new,
+ struct file_lock *fl)
+{
+ if (new) {
+ memset(new, 0, sizeof(*new));
+ new->fl_owner = fl->fl_owner;
+ new->fl_file = fl->fl_file;
+ new->fl_flags = fl->fl_flags;
+ new->fl_type = fl->fl_type;
+ new->fl_start = fl->fl_start;
+ new->fl_end = fl->fl_end;
+ }
+ return new;
}
/* Insert file lock fl into an inode's lock list at the position indicated
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov