f2fs: convert f2fs_vm_page_mkwrite() to use folio
[ Upstream commit aec5755951b74e3bbb5ddee39ac142a788547854 ] Convert to use folio, so that we can get rid of 'page->index' to prepare for removal of 'index' field in structure page [1]. [1] https://lore.kernel.org/all/Zp8fgUSIBGQ1TN0D@casper.infradead.org/ Cc: Matthew Wilcox <willy@infradead.org> Signed-off-by: Chao Yu <chao@kernel.org> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org> Stable-dep-of: ba8dac350faf ("f2fs: fix to zero post-eof page") Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
committed by
Greg Kroah-Hartman
parent
7ac8a61e55
commit
d1ccd98edd
@@ -53,7 +53,7 @@ static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
|
|||||||
|
|
||||||
static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
|
static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
|
||||||
{
|
{
|
||||||
struct page *page = vmf->page;
|
struct folio *folio = page_folio(vmf->page);
|
||||||
struct inode *inode = file_inode(vmf->vma->vm_file);
|
struct inode *inode = file_inode(vmf->vma->vm_file);
|
||||||
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
||||||
struct dnode_of_data dn;
|
struct dnode_of_data dn;
|
||||||
@@ -85,7 +85,7 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
|
|||||||
|
|
||||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||||
if (f2fs_compressed_file(inode)) {
|
if (f2fs_compressed_file(inode)) {
|
||||||
int ret = f2fs_is_compressed_cluster(inode, page->index);
|
int ret = f2fs_is_compressed_cluster(inode, folio->index);
|
||||||
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
err = ret;
|
err = ret;
|
||||||
@@ -105,11 +105,11 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
|
|||||||
|
|
||||||
file_update_time(vmf->vma->vm_file);
|
file_update_time(vmf->vma->vm_file);
|
||||||
filemap_invalidate_lock_shared(inode->i_mapping);
|
filemap_invalidate_lock_shared(inode->i_mapping);
|
||||||
lock_page(page);
|
folio_lock(folio);
|
||||||
if (unlikely(page->mapping != inode->i_mapping ||
|
if (unlikely(folio->mapping != inode->i_mapping ||
|
||||||
page_offset(page) > i_size_read(inode) ||
|
folio_pos(folio) > i_size_read(inode) ||
|
||||||
!PageUptodate(page))) {
|
!folio_test_uptodate(folio))) {
|
||||||
unlock_page(page);
|
folio_unlock(folio);
|
||||||
err = -EFAULT;
|
err = -EFAULT;
|
||||||
goto out_sem;
|
goto out_sem;
|
||||||
}
|
}
|
||||||
@@ -117,9 +117,9 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
|
|||||||
set_new_dnode(&dn, inode, NULL, NULL, 0);
|
set_new_dnode(&dn, inode, NULL, NULL, 0);
|
||||||
if (need_alloc) {
|
if (need_alloc) {
|
||||||
/* block allocation */
|
/* block allocation */
|
||||||
err = f2fs_get_block_locked(&dn, page->index);
|
err = f2fs_get_block_locked(&dn, folio->index);
|
||||||
} else {
|
} else {
|
||||||
err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
|
err = f2fs_get_dnode_of_data(&dn, folio->index, LOOKUP_NODE);
|
||||||
f2fs_put_dnode(&dn);
|
f2fs_put_dnode(&dn);
|
||||||
if (f2fs_is_pinned_file(inode) &&
|
if (f2fs_is_pinned_file(inode) &&
|
||||||
!__is_valid_data_blkaddr(dn.data_blkaddr))
|
!__is_valid_data_blkaddr(dn.data_blkaddr))
|
||||||
@@ -127,11 +127,11 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (err) {
|
if (err) {
|
||||||
unlock_page(page);
|
folio_unlock(folio);
|
||||||
goto out_sem;
|
goto out_sem;
|
||||||
}
|
}
|
||||||
|
|
||||||
f2fs_wait_on_page_writeback(page, DATA, false, true);
|
f2fs_wait_on_page_writeback(folio_page(folio, 0), DATA, false, true);
|
||||||
|
|
||||||
/* wait for GCed page writeback via META_MAPPING */
|
/* wait for GCed page writeback via META_MAPPING */
|
||||||
f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
|
f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
|
||||||
@@ -139,18 +139,18 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
|
|||||||
/*
|
/*
|
||||||
* check to see if the page is mapped already (no holes)
|
* check to see if the page is mapped already (no holes)
|
||||||
*/
|
*/
|
||||||
if (PageMappedToDisk(page))
|
if (folio_test_mappedtodisk(folio))
|
||||||
goto out_sem;
|
goto out_sem;
|
||||||
|
|
||||||
/* page is wholly or partially inside EOF */
|
/* page is wholly or partially inside EOF */
|
||||||
if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
|
if (((loff_t)(folio->index + 1) << PAGE_SHIFT) >
|
||||||
i_size_read(inode)) {
|
i_size_read(inode)) {
|
||||||
loff_t offset;
|
loff_t offset;
|
||||||
|
|
||||||
offset = i_size_read(inode) & ~PAGE_MASK;
|
offset = i_size_read(inode) & ~PAGE_MASK;
|
||||||
zero_user_segment(page, offset, PAGE_SIZE);
|
folio_zero_segment(folio, offset, folio_size(folio));
|
||||||
}
|
}
|
||||||
set_page_dirty(page);
|
folio_mark_dirty(folio);
|
||||||
|
|
||||||
f2fs_update_iostat(sbi, inode, APP_MAPPED_IO, F2FS_BLKSIZE);
|
f2fs_update_iostat(sbi, inode, APP_MAPPED_IO, F2FS_BLKSIZE);
|
||||||
f2fs_update_time(sbi, REQ_TIME);
|
f2fs_update_time(sbi, REQ_TIME);
|
||||||
@@ -162,7 +162,7 @@ out_sem:
|
|||||||
out:
|
out:
|
||||||
ret = vmf_fs_error(err);
|
ret = vmf_fs_error(err);
|
||||||
|
|
||||||
trace_f2fs_vm_page_mkwrite(inode, page->index, vmf->vma->vm_flags, ret);
|
trace_f2fs_vm_page_mkwrite(inode, folio->index, vmf->vma->vm_flags, ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user