@@ -824,12 +824,11 @@ static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
824824
825825out_unlock :
826826 __iomap_put_folio (iter , pos , 0 , folio );
827- iomap_write_failed (iter -> inode , pos , len );
828827
829828 return status ;
830829}
831830
832- static size_t __iomap_write_end (struct inode * inode , loff_t pos , size_t len ,
831+ static bool __iomap_write_end (struct inode * inode , loff_t pos , size_t len ,
833832 size_t copied , struct folio * folio )
834833{
835834 flush_dcache_folio (folio );
@@ -846,14 +845,14 @@ static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
846845 * redo the whole thing.
847846 */
848847 if (unlikely (copied < len && !folio_test_uptodate (folio )))
849- return 0 ;
848+ return false ;
850849 iomap_set_range_uptodate (folio , offset_in_folio (folio , pos ), len );
851850 iomap_set_range_dirty (folio , offset_in_folio (folio , pos ), copied );
852851 filemap_dirty_folio (inode -> i_mapping , folio );
853- return copied ;
852+ return true ;
854853}
855854
856- static size_t iomap_write_end_inline (const struct iomap_iter * iter ,
855+ static void iomap_write_end_inline (const struct iomap_iter * iter ,
857856 struct folio * folio , loff_t pos , size_t copied )
858857{
859858 const struct iomap * iomap = & iter -> iomap ;
@@ -868,59 +867,51 @@ static size_t iomap_write_end_inline(const struct iomap_iter *iter,
868867 kunmap_local (addr );
869868
870869 mark_inode_dirty (iter -> inode );
871- return copied ;
872870}
873871
874- /* Returns the number of bytes copied. May be 0. Cannot be an errno. */
875- static size_t iomap_write_end (struct iomap_iter * iter , loff_t pos , size_t len ,
872+ /*
873+ * Returns true if all copied bytes have been written to the pagecache,
874+ * otherwise return false.
875+ */
876+ static bool iomap_write_end (struct iomap_iter * iter , loff_t pos , size_t len ,
876877 size_t copied , struct folio * folio )
877878{
878879 const struct iomap * srcmap = iomap_iter_srcmap (iter );
879- loff_t old_size = iter -> inode -> i_size ;
880- size_t ret ;
881880
882881 if (srcmap -> type == IOMAP_INLINE ) {
883- ret = iomap_write_end_inline (iter , folio , pos , copied );
884- } else if (srcmap -> flags & IOMAP_F_BUFFER_HEAD ) {
885- ret = block_write_end (NULL , iter -> inode -> i_mapping , pos , len ,
886- copied , & folio -> page , NULL );
887- } else {
888- ret = __iomap_write_end (iter -> inode , pos , len , copied , folio );
882+ iomap_write_end_inline (iter , folio , pos , copied );
883+ return true;
889884 }
890885
891- /*
892- * Update the in-memory inode size after copying the data into the page
893- * cache. It's up to the file system to write the updated size to disk,
894- * preferably after I/O completion so that no stale data is exposed.
895- */
896- if (pos + ret > old_size ) {
897- i_size_write (iter -> inode , pos + ret );
898- iter -> iomap .flags |= IOMAP_F_SIZE_CHANGED ;
886+ if (srcmap -> flags & IOMAP_F_BUFFER_HEAD ) {
887+ size_t bh_written ;
888+
889+ bh_written = block_write_end (NULL , iter -> inode -> i_mapping , pos ,
890+ len , copied , & folio -> page , NULL );
891+ WARN_ON_ONCE (bh_written != copied && bh_written != 0 );
892+ return bh_written == copied ;
899893 }
900- __iomap_put_folio (iter , pos , ret , folio );
901894
902- if (old_size < pos )
903- pagecache_isize_extended (iter -> inode , old_size , pos );
904- if (ret < len )
905- iomap_write_failed (iter -> inode , pos + ret , len - ret );
906- return ret ;
895+ return __iomap_write_end (iter -> inode , pos , len , copied , folio );
907896}
908897
909898static loff_t iomap_write_iter (struct iomap_iter * iter , struct iov_iter * i )
910899{
911900 loff_t length = iomap_length (iter );
912901 size_t chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER ;
913902 loff_t pos = iter -> pos ;
914- ssize_t written = 0 ;
903+ ssize_t total_written = 0 ;
915904 long status = 0 ;
916905 struct address_space * mapping = iter -> inode -> i_mapping ;
917906 unsigned int bdp_flags = (iter -> flags & IOMAP_NOWAIT ) ? BDP_ASYNC : 0 ;
918907
919908 do {
920909 struct folio * folio ;
910+ loff_t old_size ;
921911 size_t offset ; /* Offset into folio */
922912 size_t bytes ; /* Bytes to write to folio */
923913 size_t copied ; /* Bytes copied from user */
914+ size_t written ; /* Bytes have been written */
924915
925916 bytes = iov_iter_count (i );
926917retry :
@@ -950,8 +941,10 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
950941 }
951942
952943 status = iomap_write_begin (iter , pos , bytes , & folio );
953- if (unlikely (status ))
944+ if (unlikely (status )) {
945+ iomap_write_failed (iter -> inode , pos , bytes );
954946 break ;
947+ }
955948 if (iter -> iomap .flags & IOMAP_F_STALE )
956949 break ;
957950
@@ -963,37 +956,55 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
963956 flush_dcache_folio (folio );
964957
965958 copied = copy_folio_from_iter_atomic (folio , offset , bytes , i );
966- status = iomap_write_end (iter , pos , bytes , copied , folio );
959+ written = iomap_write_end (iter , pos , bytes , copied , folio ) ?
960+ copied : 0 ;
961+
962+ /*
963+ * Update the in-memory inode size after copying the data into
964+ * the page cache. It's up to the file system to write the
965+ * updated size to disk, preferably after I/O completion so that
966+ * no stale data is exposed. Only once that's done can we
967+ * unlock and release the folio.
968+ */
969+ old_size = iter -> inode -> i_size ;
970+ if (pos + written > old_size ) {
971+ i_size_write (iter -> inode , pos + written );
972+ iter -> iomap .flags |= IOMAP_F_SIZE_CHANGED ;
973+ }
974+ __iomap_put_folio (iter , pos , written , folio );
967975
968- if (unlikely ( copied != status ) )
969- iov_iter_revert ( i , copied - status );
976+ if (old_size < pos )
977+ pagecache_isize_extended ( iter -> inode , old_size , pos );
970978
971979 cond_resched ();
972- if (unlikely (status == 0 )) {
980+ if (unlikely (written == 0 )) {
973981 /*
974982 * A short copy made iomap_write_end() reject the
975983 * thing entirely. Might be memory poisoning
976984 * halfway through, might be a race with munmap,
977985 * might be severe memory pressure.
978986 */
987+ iomap_write_failed (iter -> inode , pos , bytes );
988+ iov_iter_revert (i , copied );
989+
979990 if (chunk > PAGE_SIZE )
980991 chunk /= 2 ;
981992 if (copied ) {
982993 bytes = copied ;
983994 goto retry ;
984995 }
985996 } else {
986- pos += status ;
987- written += status ;
988- length -= status ;
997+ pos += written ;
998+ total_written += written ;
999+ length -= written ;
9891000 }
9901001 } while (iov_iter_count (i ) && length );
9911002
9921003 if (status == - EAGAIN ) {
993- iov_iter_revert (i , written );
1004+ iov_iter_revert (i , total_written );
9941005 return - EAGAIN ;
9951006 }
996- return written ? written : status ;
1007+ return total_written ? total_written : status ;
9971008}
9981009
9991010ssize_t
@@ -1322,6 +1333,7 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
13221333 int status ;
13231334 size_t offset ;
13241335 size_t bytes = min_t (u64 , SIZE_MAX , length );
1336+ bool ret ;
13251337
13261338 status = iomap_write_begin (iter , pos , bytes , & folio );
13271339 if (unlikely (status ))
@@ -1333,8 +1345,9 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
13331345 if (bytes > folio_size (folio ) - offset )
13341346 bytes = folio_size (folio ) - offset ;
13351347
1336- bytes = iomap_write_end (iter , pos , bytes , bytes , folio );
1337- if (WARN_ON_ONCE (bytes == 0 ))
1348+ ret = iomap_write_end (iter , pos , bytes , bytes , folio );
1349+ __iomap_put_folio (iter , pos , bytes , folio );
1350+ if (WARN_ON_ONCE (!ret ))
13381351 return - EIO ;
13391352
13401353 cond_resched ();
@@ -1383,6 +1396,7 @@ static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
13831396 int status ;
13841397 size_t offset ;
13851398 size_t bytes = min_t (u64 , SIZE_MAX , length );
1399+ bool ret ;
13861400
13871401 status = iomap_write_begin (iter , pos , bytes , & folio );
13881402 if (status )
@@ -1397,8 +1411,9 @@ static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
13971411 folio_zero_range (folio , offset , bytes );
13981412 folio_mark_accessed (folio );
13991413
1400- bytes = iomap_write_end (iter , pos , bytes , bytes , folio );
1401- if (WARN_ON_ONCE (bytes == 0 ))
1414+ ret = iomap_write_end (iter , pos , bytes , bytes , folio );
1415+ __iomap_put_folio (iter , pos , bytes , folio );
1416+ if (WARN_ON_ONCE (!ret ))
14021417 return - EIO ;
14031418
14041419 pos += bytes ;
@@ -1958,18 +1973,13 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
19581973 return error ;
19591974}
19601975
1961- static int iomap_do_writepage (struct folio * folio ,
1962- struct writeback_control * wbc , void * data )
1963- {
1964- return iomap_writepage_map (data , wbc , folio );
1965- }
1966-
19671976int
19681977iomap_writepages (struct address_space * mapping , struct writeback_control * wbc ,
19691978 struct iomap_writepage_ctx * wpc ,
19701979 const struct iomap_writeback_ops * ops )
19711980{
1972- int ret ;
1981+ struct folio * folio = NULL ;
1982+ int error ;
19731983
19741984 /*
19751985 * Writeback from reclaim context should never happen except in the case
@@ -1980,8 +1990,9 @@ iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
19801990 return - EIO ;
19811991
19821992 wpc -> ops = ops ;
1983- ret = write_cache_pages (mapping , wbc , iomap_do_writepage , wpc );
1984- return iomap_submit_ioend (wpc , ret );
1993+ while ((folio = writeback_iter (mapping , wbc , folio , & error )))
1994+ error = iomap_writepage_map (wpc , wbc , folio );
1995+ return iomap_submit_ioend (wpc , error );
19851996}
19861997EXPORT_SYMBOL_GPL (iomap_writepages );
19871998
0 commit comments