patch-2.4.10 linux/mm/swap_state.c
Next file: linux/mm/swapfile.c
Previous file: linux/mm/swap.c
Back to the patch index
Back to the overall index
- Lines: 255
- Date:
Sat Sep 22 20:36:50 2001
- Orig file:
v2.4.9/linux/mm/swap_state.c
- Orig date:
Wed Jul 25 17:10:26 2001
diff -u --recursive --new-file v2.4.9/linux/mm/swap_state.c linux/mm/swap_state.c
@@ -23,17 +23,11 @@
*/
static int swap_writepage(struct page *page)
{
- /* One for the page cache, one for this user, one for page->buffers */
- if (page_count(page) > 2 + !!page->buffers)
- goto in_use;
- if (swap_count(page) > 1)
- goto in_use;
-
- /* We could remove it here, but page_launder will do it anyway */
- UnlockPage(page);
- return 0;
-
-in_use:
+ if (exclusive_swap_page(page)) {
+ delete_from_swap_cache(page);
+ UnlockPage(page);
+ return 0;
+ }
rw_swap_page(WRITE, page);
return 0;
}
@@ -75,75 +69,57 @@
#endif
if (!PageLocked(page))
BUG();
- if (PageTestandSetSwapCache(page))
- BUG();
if (page->mapping)
BUG();
- flags = page->flags & ~((1 << PG_error) | (1 << PG_arch_1));
+
+ /* clear PG_dirty so a subsequent set_page_dirty takes effect */
+ flags = page->flags & ~(1 << PG_error | 1 << PG_dirty | 1 << PG_arch_1 | 1 << PG_referenced);
page->flags = flags | (1 << PG_uptodate);
- page->age = PAGE_AGE_START;
add_to_page_cache_locked(page, &swapper_space, entry.val);
}
-static inline void remove_from_swap_cache(struct page *page)
-{
- struct address_space *mapping = page->mapping;
-
- if (mapping != &swapper_space)
- BUG();
- if (!PageSwapCache(page) || !PageLocked(page))
- PAGE_BUG(page);
-
- PageClearSwapCache(page);
- ClearPageDirty(page);
- __remove_inode_page(page);
-}
-
/*
* This must be called only on pages that have
* been verified to be in the swap cache.
*/
void __delete_from_swap_cache(struct page *page)
{
- swp_entry_t entry;
-
- entry.val = page->index;
-
#ifdef SWAP_CACHE_INFO
swap_cache_del_total++;
#endif
- remove_from_swap_cache(page);
- swap_free(entry);
+ if (!PageLocked(page))
+ BUG();
+ if (!PageSwapCache(page))
+ BUG();
+
+ ClearPageDirty(page);
+ __remove_inode_page(page);
}
/*
- * This will never put the page into the free list, the caller has
- * a reference on the page.
+ * This must be called only on pages that have
+ * been verified to be in the swap cache and locked.
+ * It will never put the page into the free list,
+ * the caller has a reference on the page.
*/
-void delete_from_swap_cache_nolock(struct page *page)
+void delete_from_swap_cache(struct page *page)
{
+ swp_entry_t entry;
+
if (!PageLocked(page))
BUG();
if (block_flushpage(page, 0))
lru_cache_del(page);
+ entry.val = page->index;
+
spin_lock(&pagecache_lock);
- ClearPageDirty(page);
__delete_from_swap_cache(page);
spin_unlock(&pagecache_lock);
- page_cache_release(page);
-}
-/*
- * This must be called only on pages that have
- * been verified to be in the swap cache and locked.
- */
-void delete_from_swap_cache(struct page *page)
-{
- lock_page(page);
- delete_from_swap_cache_nolock(page);
- UnlockPage(page);
+ swap_free(entry);
+ page_cache_release(page);
}
/*
@@ -163,20 +139,18 @@
*/
if (PageSwapCache(page) && !TryLockPage(page)) {
if (exclusive_swap_page(page))
- delete_from_swap_cache_nolock(page);
+ delete_from_swap_cache(page);
UnlockPage(page);
}
page_cache_release(page);
}
-
/*
* Lookup a swap entry in the swap cache. A found page will be returned
* unlocked and with its refcount incremented - we rely on the kernel
* lock getting page table operations atomic even if we drop the page
* lock before returning.
*/
-
struct page * lookup_swap_cache(swp_entry_t entry)
{
struct page *found;
@@ -184,72 +158,80 @@
#ifdef SWAP_CACHE_INFO
swap_cache_find_total++;
#endif
- while (1) {
- /*
- * Right now the pagecache is 32-bit only. But it's a 32 bit index. =)
- */
- found = find_get_swapcache_page(&swapper_space, entry.val);
- if (!found)
- return 0;
- if (!PageSwapCache(found))
- BUG();
- if (found->mapping != &swapper_space)
- BUG();
+ found = find_get_page(&swapper_space, entry.val);
+ /*
+ * Unsafe to assert PageSwapCache and mapping on page found:
+ * if SMP nothing prevents swapoff from deleting this page from
+ * the swap cache at this moment. find_lock_page would prevent
+ * that, but no need to change: we _have_ got the right page.
+ */
#ifdef SWAP_CACHE_INFO
+ if (found)
swap_cache_find_success++;
#endif
- return found;
- }
+ return found;
}
/*
* Locate a page of swap in physical memory, reserving swap cache space
- * and reading the disk if it is not already cached. If wait==0, we are
- * only doing readahead, so don't worry if the page is already locked.
- *
+ * and reading the disk if it is not already cached.
* A failure return means that either the page allocation failed or that
* the swap entry is no longer in use.
*/
-
struct page * read_swap_cache_async(swp_entry_t entry)
{
- struct page *found_page = 0, *new_page;
+ struct page *found_page, *new_page;
+ struct page **hash;
/*
- * Make sure the swap entry is still in use.
+ * Look for the page in the swap cache. Since we normally call
+ * this only after lookup_swap_cache() failed, re-calling that
+ * would confuse the statistics: use __find_get_page() directly.
*/
- if (!swap_duplicate(entry)) /* Account for the swap cache */
- goto out;
- /*
- * Look for the page in the swap cache.
- */
- found_page = lookup_swap_cache(entry);
+ hash = page_hash(&swapper_space, entry.val);
+ found_page = __find_get_page(&swapper_space, entry.val, hash);
if (found_page)
- goto out_free_swap;
+ goto out;
new_page = alloc_page(GFP_HIGHUSER);
if (!new_page)
- goto out_free_swap; /* Out of memory */
+ goto out; /* Out of memory */
+ if (TryLockPage(new_page))
+ BUG();
/*
* Check the swap cache again, in case we stalled above.
+ * swap_list_lock is guarding against races between this check
+ * and where the new page is added to the swap cache below.
+ * It is also guarding against race where try_to_swap_out
+ * allocates entry with get_swap_page then adds to cache.
*/
- found_page = lookup_swap_cache(entry);
+ swap_list_lock();
+ found_page = __find_get_page(&swapper_space, entry.val, hash);
if (found_page)
goto out_free_page;
+
+ /*
+ * Make sure the swap entry is still in use. It could have gone
+ * since caller dropped page_table_lock, while allocating page above,
+ * or while allocating page in prior call via swapin_readahead.
+ */
+ if (!swap_duplicate(entry)) /* Account for the swap cache */
+ goto out_free_page;
+
/*
* Add it to the swap cache and read its contents.
*/
- if (TryLockPage(new_page))
- BUG();
add_to_swap_cache(new_page, entry);
+ swap_list_unlock();
+
rw_swap_page(READ, new_page);
return new_page;
out_free_page:
+ swap_list_unlock();
+ UnlockPage(new_page);
page_cache_release(new_page);
-out_free_swap:
- swap_free(entry);
out:
return found_page;
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)