dect
/
linux-2.6
Archived
13
0
Fork 0

[PATCH] splice: LRU fixups

Nick says that the current construct isn't safe. This goes back to the
original, but sets PIPE_BUF_FLAG_LRU on user pages as well as they all
seem to be on the LRU in the first place.

Signed-off-by: Jens Axboe <axboe@suse.de>
This commit is contained in:
Jens Axboe 2006-05-03 10:35:26 +02:00 committed by Jens Axboe
parent bfc4ee39fd
commit 1432873af7
2 changed files with 14 additions and 24 deletions

View File

@ -78,6 +78,7 @@ static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
return 1;
}
buf->flags |= PIPE_BUF_FLAG_LRU;
return 0;
}
@ -85,6 +86,7 @@ static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
struct pipe_buffer *buf)
{
page_cache_release(buf->page);
buf->flags &= ~PIPE_BUF_FLAG_LRU;
}
static int page_cache_pipe_buf_pin(struct pipe_inode_info *info,
@ -141,6 +143,7 @@ static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
if (!(buf->flags & PIPE_BUF_FLAG_GIFT))
return 1;
buf->flags |= PIPE_BUF_FLAG_LRU;
return generic_pipe_buf_steal(pipe, buf);
}
@ -566,37 +569,23 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
*/
if ((sd->flags & SPLICE_F_MOVE) && this_len == PAGE_CACHE_SIZE) {
/*
* If steal succeeds, buf->page is now pruned from the vm
* side (page cache) and we can reuse it. The page will also
* be locked on successful return.
* If steal succeeds, buf->page is now pruned from the
* pagecache and we can reuse it. The page will also be
* locked on successful return.
*/
if (buf->ops->steal(info, buf))
goto find_page;
page = buf->page;
page_cache_get(page);
/*
* page must be on the LRU for adding to the pagecache.
* Check this without grabbing the zone lock, if it isn't
* the do grab the zone lock, recheck, and add if necessary.
*/
if (!PageLRU(page)) {
struct zone *zone = page_zone(page);
spin_lock_irq(&zone->lru_lock);
if (!PageLRU(page)) {
SetPageLRU(page);
add_page_to_inactive_list(zone, page);
}
spin_unlock_irq(&zone->lru_lock);
}
if (add_to_page_cache(page, mapping, index, gfp_mask)) {
page_cache_release(page);
unlock_page(page);
goto find_page;
}
page_cache_get(page);
if (!(buf->flags & PIPE_BUF_FLAG_LRU))
lru_cache_add(page);
} else {
find_page:
page = find_lock_page(mapping, index);

View File

@ -5,8 +5,9 @@
#define PIPE_BUFFERS (16)
#define PIPE_BUF_FLAG_ATOMIC 0x01 /* was atomically mapped */
#define PIPE_BUF_FLAG_GIFT 0x02 /* page is a gift */
#define PIPE_BUF_FLAG_LRU 0x01 /* page is on the LRU */
#define PIPE_BUF_FLAG_ATOMIC 0x02 /* was atomically mapped */
#define PIPE_BUF_FLAG_GIFT 0x04 /* page is a gift */
struct pipe_buffer {
struct page *page;