1 /* 2 * linux/fs/ext4/inode.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * from 10 * 11 * linux/fs/minix/inode.c 12 * 13 * Copyright (C) 1991, 1992 Linus Torvalds 14 * 15 * Goal-directed block allocation by Stephen Tweedie 16 * (sct@redhat.com), 1993, 1998 17 * Big-endian to little-endian byte-swapping/bitmaps by 18 * David S. Miller (davem@caip.rutgers.edu), 1995 19 * 64-bit file support on 64-bit platforms by Jakub Jelinek 20 * (jj@sunsite.ms.mff.cuni.cz) 21 * 22 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 23 */ 24 25 #include <linux/module.h> 26 #include <linux/fs.h> 27 #include <linux/time.h> 28 #include <linux/jbd2.h> 29 #include <linux/highuid.h> 30 #include <linux/pagemap.h> 31 #include <linux/quotaops.h> 32 #include <linux/string.h> 33 #include <linux/buffer_head.h> 34 #include <linux/writeback.h> 35 #include <linux/pagevec.h> 36 #include <linux/mpage.h> 37 #include <linux/namei.h> 38 #include <linux/uio.h> 39 #include <linux/bio.h> 40 #include <linux/workqueue.h> 41 #include <linux/kernel.h> 42 #include <linux/printk.h> 43 #include <linux/slab.h> 44 #include <linux/ratelimit.h> 45 46 #include "ext4_jbd2.h" 47 #include "xattr.h" 48 #include "acl.h" 49 #include "ext4_extents.h" 50 51 #include <trace/events/ext4.h> 52 53 #define MPAGE_DA_EXTENT_TAIL 0x01 54 55 static inline int ext4_begin_ordered_truncate(struct inode *inode, 56 loff_t new_size) 57 { 58 trace_ext4_begin_ordered_truncate(inode, new_size); 59 /* 60 * If jinode is zero, then we never opened the file for 61 * writing, so there's no need to call 62 * jbd2_journal_begin_ordered_truncate() since there's no 63 * outstanding writes we need to flush. 64 */ 65 if (!EXT4_I(inode)->jinode) 66 return 0; 67 return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode), 68 EXT4_I(inode)->jinode, 69 new_size); 70 } 71 72 static void ext4_invalidatepage(struct page *page, unsigned long offset); 73 static int noalloc_get_block_write(struct inode *inode, sector_t iblock, 74 struct buffer_head *bh_result, int create); 75 static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode); 76 static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate); 77 static int __ext4_journalled_writepage(struct page *page, unsigned int len); 78 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh); 79 80 /* 81 * Test whether an inode is a fast symlink. 82 */ 83 static int ext4_inode_is_fast_symlink(struct inode *inode) 84 { 85 int ea_blocks = EXT4_I(inode)->i_file_acl ? 86 (inode->i_sb->s_blocksize >> 9) : 0; 87 88 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); 89 } 90 91 /* 92 * Work out how many blocks we need to proceed with the next chunk of a 93 * truncate transaction. 94 */ 95 static unsigned long blocks_for_truncate(struct inode *inode) 96 { 97 ext4_lblk_t needed; 98 99 needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9); 100 101 /* Give ourselves just enough room to cope with inodes in which 102 * i_blocks is corrupt: we've seen disk corruptions in the past 103 * which resulted in random data in an inode which looked enough 104 * like a regular file for ext4 to try to delete it. Things 105 * will go a bit crazy if that happens, but at least we should 106 * try not to panic the whole kernel. */ 107 if (needed < 2) 108 needed = 2; 109 110 /* But we need to bound the transaction so we don't overflow the 111 * journal. */ 112 if (needed > EXT4_MAX_TRANS_DATA) 113 needed = EXT4_MAX_TRANS_DATA; 114 115 return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed; 116 } 117 118 /* 119 * Truncate transactions can be complex and absolutely huge. So we need to 120 * be able to restart the transaction at a conventient checkpoint to make 121 * sure we don't overflow the journal. 122 * 123 * start_transaction gets us a new handle for a truncate transaction, 124 * and extend_transaction tries to extend the existing one a bit. If 125 * extend fails, we need to propagate the failure up and restart the 126 * transaction in the top-level truncate loop. --sct 127 */ 128 static handle_t *start_transaction(struct inode *inode) 129 { 130 handle_t *result; 131 132 result = ext4_journal_start(inode, blocks_for_truncate(inode)); 133 if (!IS_ERR(result)) 134 return result; 135 136 ext4_std_error(inode->i_sb, PTR_ERR(result)); 137 return result; 138 } 139 140 /* 141 * Try to extend this transaction for the purposes of truncation. 142 * 143 * Returns 0 if we managed to create more room. If we can't create more 144 * room, and the transaction must be restarted we return 1. 145 */ 146 static int try_to_extend_transaction(handle_t *handle, struct inode *inode) 147 { 148 if (!ext4_handle_valid(handle)) 149 return 0; 150 if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1)) 151 return 0; 152 if (!ext4_journal_extend(handle, blocks_for_truncate(inode))) 153 return 0; 154 return 1; 155 } 156 157 /* 158 * Restart the transaction associated with *handle. This does a commit, 159 * so before we call here everything must be consistently dirtied against 160 * this transaction. 161 */ 162 int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode, 163 int nblocks) 164 { 165 int ret; 166 167 /* 168 * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this 169 * moment, get_block can be called only for blocks inside i_size since 170 * page cache has been already dropped and writes are blocked by 171 * i_mutex. So we can safely drop the i_data_sem here. 172 */ 173 BUG_ON(EXT4_JOURNAL(inode) == NULL); 174 jbd_debug(2, "restarting handle %p\n", handle); 175 up_write(&EXT4_I(inode)->i_data_sem); 176 ret = ext4_journal_restart(handle, nblocks); 177 down_write(&EXT4_I(inode)->i_data_sem); 178 ext4_discard_preallocations(inode); 179 180 return ret; 181 } 182 183 /* 184 * Called at the last iput() if i_nlink is zero. 185 */ 186 void ext4_evict_inode(struct inode *inode) 187 { 188 handle_t *handle; 189 int err; 190 191 trace_ext4_evict_inode(inode); 192 if (inode->i_nlink) { 193 truncate_inode_pages(&inode->i_data, 0); 194 goto no_delete; 195 } 196 197 if (!is_bad_inode(inode)) 198 dquot_initialize(inode); 199 200 if (ext4_should_order_data(inode)) 201 ext4_begin_ordered_truncate(inode, 0); 202 truncate_inode_pages(&inode->i_data, 0); 203 204 if (is_bad_inode(inode)) 205 goto no_delete; 206 207 handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3); 208 if (IS_ERR(handle)) { 209 ext4_std_error(inode->i_sb, PTR_ERR(handle)); 210 /* 211 * If we're going to skip the normal cleanup, we still need to 212 * make sure that the in-core orphan linked list is properly 213 * cleaned up. 214 */ 215 ext4_orphan_del(NULL, inode); 216 goto no_delete; 217 } 218 219 if (IS_SYNC(inode)) 220 ext4_handle_sync(handle); 221 inode->i_size = 0; 222 err = ext4_mark_inode_dirty(handle, inode); 223 if (err) { 224 ext4_warning(inode->i_sb, 225 "couldn't mark inode dirty (err %d)", err); 226 goto stop_handle; 227 } 228 if (inode->i_blocks) 229 ext4_truncate(inode); 230 231 /* 232 * ext4_ext_truncate() doesn't reserve any slop when it 233 * restarts journal transactions; therefore there may not be 234 * enough credits left in the handle to remove the inode from 235 * the orphan list and set the dtime field. 236 */ 237 if (!ext4_handle_has_enough_credits(handle, 3)) { 238 err = ext4_journal_extend(handle, 3); 239 if (err > 0) 240 err = ext4_journal_restart(handle, 3); 241 if (err != 0) { 242 ext4_warning(inode->i_sb, 243 "couldn't extend journal (err %d)", err); 244 stop_handle: 245 ext4_journal_stop(handle); 246 ext4_orphan_del(NULL, inode); 247 goto no_delete; 248 } 249 } 250 251 /* 252 * Kill off the orphan record which ext4_truncate created. 253 * AKPM: I think this can be inside the above `if'. 254 * Note that ext4_orphan_del() has to be able to cope with the 255 * deletion of a non-existent orphan - this is because we don't 256 * know if ext4_truncate() actually created an orphan record. 257 * (Well, we could do this if we need to, but heck - it works) 258 */ 259 ext4_orphan_del(handle, inode); 260 EXT4_I(inode)->i_dtime = get_seconds(); 261 262 /* 263 * One subtle ordering requirement: if anything has gone wrong 264 * (transaction abort, IO errors, whatever), then we can still 265 * do these next steps (the fs will already have been marked as 266 * having errors), but we can't free the inode if the mark_dirty 267 * fails. 268 */ 269 if (ext4_mark_inode_dirty(handle, inode)) 270 /* If that failed, just do the required in-core inode clear. */ 271 ext4_clear_inode(inode); 272 else 273 ext4_free_inode(handle, inode); 274 ext4_journal_stop(handle); 275 return; 276 no_delete: 277 ext4_clear_inode(inode); /* We must guarantee clearing of inode... */ 278 } 279 280 typedef struct { 281 __le32 *p; 282 __le32 key; 283 struct buffer_head *bh; 284 } Indirect; 285 286 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v) 287 { 288 p->key = *(p->p = v); 289 p->bh = bh; 290 } 291 292 /** 293 * ext4_block_to_path - parse the block number into array of offsets 294 * @inode: inode in question (we are only interested in its superblock) 295 * @i_block: block number to be parsed 296 * @offsets: array to store the offsets in 297 * @boundary: set this non-zero if the referred-to block is likely to be 298 * followed (on disk) by an indirect block. 299 * 300 * To store the locations of file's data ext4 uses a data structure common 301 * for UNIX filesystems - tree of pointers anchored in the inode, with 302 * data blocks at leaves and indirect blocks in intermediate nodes. 303 * This function translates the block number into path in that tree - 304 * return value is the path length and @offsets[n] is the offset of 305 * pointer to (n+1)th node in the nth one. If @block is out of range 306 * (negative or too large) warning is printed and zero returned. 307 * 308 * Note: function doesn't find node addresses, so no IO is needed. All 309 * we need to know is the capacity of indirect blocks (taken from the 310 * inode->i_sb). 311 */ 312 313 /* 314 * Portability note: the last comparison (check that we fit into triple 315 * indirect block) is spelled differently, because otherwise on an 316 * architecture with 32-bit longs and 8Kb pages we might get into trouble 317 * if our filesystem had 8Kb blocks. We might use long long, but that would 318 * kill us on x86. Oh, well, at least the sign propagation does not matter - 319 * i_block would have to be negative in the very beginning, so we would not 320 * get there at all. 321 */ 322 323 static int ext4_block_to_path(struct inode *inode, 324 ext4_lblk_t i_block, 325 ext4_lblk_t offsets[4], int *boundary) 326 { 327 int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb); 328 int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb); 329 const long direct_blocks = EXT4_NDIR_BLOCKS, 330 indirect_blocks = ptrs, 331 double_blocks = (1 << (ptrs_bits * 2)); 332 int n = 0; 333 int final = 0; 334 335 if (i_block < direct_blocks) { 336 offsets[n++] = i_block; 337 final = direct_blocks; 338 } else if ((i_block -= direct_blocks) < indirect_blocks) { 339 offsets[n++] = EXT4_IND_BLOCK; 340 offsets[n++] = i_block; 341 final = ptrs; 342 } else if ((i_block -= indirect_blocks) < double_blocks) { 343 offsets[n++] = EXT4_DIND_BLOCK; 344 offsets[n++] = i_block >> ptrs_bits; 345 offsets[n++] = i_block & (ptrs - 1); 346 final = ptrs; 347 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { 348 offsets[n++] = EXT4_TIND_BLOCK; 349 offsets[n++] = i_block >> (ptrs_bits * 2); 350 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); 351 offsets[n++] = i_block & (ptrs - 1); 352 final = ptrs; 353 } else { 354 ext4_warning(inode->i_sb, "block %lu > max in inode %lu", 355 i_block + direct_blocks + 356 indirect_blocks + double_blocks, inode->i_ino); 357 } 358 if (boundary) 359 *boundary = final - 1 - (i_block & (ptrs - 1)); 360 return n; 361 } 362 363 static int __ext4_check_blockref(const char *function, unsigned int line, 364 struct inode *inode, 365 __le32 *p, unsigned int max) 366 { 367 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; 368 __le32 *bref = p; 369 unsigned int blk; 370 371 while (bref < p+max) { 372 blk = le32_to_cpu(*bref++); 373 if (blk && 374 unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb), 375 blk, 1))) { 376 es->s_last_error_block = cpu_to_le64(blk); 377 ext4_error_inode(inode, function, line, blk, 378 "invalid block"); 379 return -EIO; 380 } 381 } 382 return 0; 383 } 384 385 386 #define ext4_check_indirect_blockref(inode, bh) \ 387 __ext4_check_blockref(__func__, __LINE__, inode, \ 388 (__le32 *)(bh)->b_data, \ 389 EXT4_ADDR_PER_BLOCK((inode)->i_sb)) 390 391 #define ext4_check_inode_blockref(inode) \ 392 __ext4_check_blockref(__func__, __LINE__, inode, \ 393 EXT4_I(inode)->i_data, \ 394 EXT4_NDIR_BLOCKS) 395 396 /** 397 * ext4_get_branch - read the chain of indirect blocks leading to data 398 * @inode: inode in question 399 * @depth: depth of the chain (1 - direct pointer, etc.) 400 * @offsets: offsets of pointers in inode/indirect blocks 401 * @chain: place to store the result 402 * @err: here we store the error value 403 * 404 * Function fills the array of triples <key, p, bh> and returns %NULL 405 * if everything went OK or the pointer to the last filled triple 406 * (incomplete one) otherwise. Upon the return chain[i].key contains 407 * the number of (i+1)-th block in the chain (as it is stored in memory, 408 * i.e. little-endian 32-bit), chain[i].p contains the address of that 409 * number (it points into struct inode for i==0 and into the bh->b_data 410 * for i>0) and chain[i].bh points to the buffer_head of i-th indirect 411 * block for i>0 and NULL for i==0. In other words, it holds the block 412 * numbers of the chain, addresses they were taken from (and where we can 413 * verify that chain did not change) and buffer_heads hosting these 414 * numbers. 415 * 416 * Function stops when it stumbles upon zero pointer (absent block) 417 * (pointer to last triple returned, *@err == 0) 418 * or when it gets an IO error reading an indirect block 419 * (ditto, *@err == -EIO) 420 * or when it reads all @depth-1 indirect blocks successfully and finds 421 * the whole chain, all way to the data (returns %NULL, *err == 0). 422 * 423 * Need to be called with 424 * down_read(&EXT4_I(inode)->i_data_sem) 425 */ 426 static Indirect *ext4_get_branch(struct inode *inode, int depth, 427 ext4_lblk_t *offsets, 428 Indirect chain[4], int *err) 429 { 430 struct super_block *sb = inode->i_sb; 431 Indirect *p = chain; 432 struct buffer_head *bh; 433 434 *err = 0; 435 /* i_data is not going away, no lock needed */ 436 add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets); 437 if (!p->key) 438 goto no_block; 439 while (--depth) { 440 bh = sb_getblk(sb, le32_to_cpu(p->key)); 441 if (unlikely(!bh)) 442 goto failure; 443 444 if (!bh_uptodate_or_lock(bh)) { 445 if (bh_submit_read(bh) < 0) { 446 put_bh(bh); 447 goto failure; 448 } 449 /* validate block references */ 450 if (ext4_check_indirect_blockref(inode, bh)) { 451 put_bh(bh); 452 goto failure; 453 } 454 } 455 456 add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets); 457 /* Reader: end */ 458 if (!p->key) 459 goto no_block; 460 } 461 return NULL; 462 463 failure: 464 *err = -EIO; 465 no_block: 466 return p; 467 } 468 469 /** 470 * ext4_find_near - find a place for allocation with sufficient locality 471 * @inode: owner 472 * @ind: descriptor of indirect block. 473 * 474 * This function returns the preferred place for block allocation. 475 * It is used when heuristic for sequential allocation fails. 476 * Rules are: 477 * + if there is a block to the left of our position - allocate near it. 478 * + if pointer will live in indirect block - allocate near that block. 479 * + if pointer will live in inode - allocate in the same 480 * cylinder group. 481 * 482 * In the latter case we colour the starting block by the callers PID to 483 * prevent it from clashing with concurrent allocations for a different inode 484 * in the same block group. The PID is used here so that functionally related 485 * files will be close-by on-disk. 486 * 487 * Caller must make sure that @ind is valid and will stay that way. 488 */ 489 static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind) 490 { 491 struct ext4_inode_info *ei = EXT4_I(inode); 492 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data; 493 __le32 *p; 494 ext4_fsblk_t bg_start; 495 ext4_fsblk_t last_block; 496 ext4_grpblk_t colour; 497 ext4_group_t block_group; 498 int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb)); 499 500 /* Try to find previous block */ 501 for (p = ind->p - 1; p >= start; p--) { 502 if (*p) 503 return le32_to_cpu(*p); 504 } 505 506 /* No such thing, so let's try location of indirect block */ 507 if (ind->bh) 508 return ind->bh->b_blocknr; 509 510 /* 511 * It is going to be referred to from the inode itself? OK, just put it 512 * into the same cylinder group then. 513 */ 514 block_group = ei->i_block_group; 515 if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) { 516 block_group &= ~(flex_size-1); 517 if (S_ISREG(inode->i_mode)) 518 block_group++; 519 } 520 bg_start = ext4_group_first_block_no(inode->i_sb, block_group); 521 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; 522 523 /* 524 * If we are doing delayed allocation, we don't need take 525 * colour into account. 526 */ 527 if (test_opt(inode->i_sb, DELALLOC)) 528 return bg_start; 529 530 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) 531 colour = (current->pid % 16) * 532 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); 533 else 534 colour = (current->pid % 16) * ((last_block - bg_start) / 16); 535 return bg_start + colour; 536 } 537 538 /** 539 * ext4_find_goal - find a preferred place for allocation. 540 * @inode: owner 541 * @block: block we want 542 * @partial: pointer to the last triple within a chain 543 * 544 * Normally this function find the preferred place for block allocation, 545 * returns it. 546 * Because this is only used for non-extent files, we limit the block nr 547 * to 32 bits. 548 */ 549 static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block, 550 Indirect *partial) 551 { 552 ext4_fsblk_t goal; 553 554 /* 555 * XXX need to get goal block from mballoc's data structures 556 */ 557 558 goal = ext4_find_near(inode, partial); 559 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; 560 return goal; 561 } 562 563 /** 564 * ext4_blks_to_allocate - Look up the block map and count the number 565 * of direct blocks need to be allocated for the given branch. 566 * 567 * @branch: chain of indirect blocks 568 * @k: number of blocks need for indirect blocks 569 * @blks: number of data blocks to be mapped. 570 * @blocks_to_boundary: the offset in the indirect block 571 * 572 * return the total number of blocks to be allocate, including the 573 * direct and indirect blocks. 574 */ 575 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks, 576 int blocks_to_boundary) 577 { 578 unsigned int count = 0; 579 580 /* 581 * Simple case, [t,d]Indirect block(s) has not allocated yet 582 * then it's clear blocks on that path have not allocated 583 */ 584 if (k > 0) { 585 /* right now we don't handle cross boundary allocation */ 586 if (blks < blocks_to_boundary + 1) 587 count += blks; 588 else 589 count += blocks_to_boundary + 1; 590 return count; 591 } 592 593 count++; 594 while (count < blks && count <= blocks_to_boundary && 595 le32_to_cpu(*(branch[0].p + count)) == 0) { 596 count++; 597 } 598 return count; 599 } 600 601 /** 602 * ext4_alloc_blocks: multiple allocate blocks needed for a branch 603 * @handle: handle for this transaction 604 * @inode: inode which needs allocated blocks 605 * @iblock: the logical block to start allocated at 606 * @goal: preferred physical block of allocation 607 * @indirect_blks: the number of blocks need to allocate for indirect 608 * blocks 609 * @blks: number of desired blocks 610 * @new_blocks: on return it will store the new block numbers for 611 * the indirect blocks(if needed) and the first direct block, 612 * @err: on return it will store the error code 613 * 614 * This function will return the number of blocks allocated as 615 * requested by the passed-in parameters. 616 */ 617 static int ext4_alloc_blocks(handle_t *handle, struct inode *inode, 618 ext4_lblk_t iblock, ext4_fsblk_t goal, 619 int indirect_blks, int blks, 620 ext4_fsblk_t new_blocks[4], int *err) 621 { 622 struct ext4_allocation_request ar; 623 int target, i; 624 unsigned long count = 0, blk_allocated = 0; 625 int index = 0; 626 ext4_fsblk_t current_block = 0; 627 int ret = 0; 628 629 /* 630 * Here we try to allocate the requested multiple blocks at once, 631 * on a best-effort basis. 632 * To build a branch, we should allocate blocks for 633 * the indirect blocks(if not allocated yet), and at least 634 * the first direct block of this branch. That's the 635 * minimum number of blocks need to allocate(required) 636 */ 637 /* first we try to allocate the indirect blocks */ 638 target = indirect_blks; 639 while (target > 0) { 640 count = target; 641 /* allocating blocks for indirect blocks and direct blocks */ 642 current_block = ext4_new_meta_blocks(handle, inode, goal, 643 0, &count, err); 644 if (*err) 645 goto failed_out; 646 647 if (unlikely(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS)) { 648 EXT4_ERROR_INODE(inode, 649 "current_block %llu + count %lu > %d!", 650 current_block, count, 651 EXT4_MAX_BLOCK_FILE_PHYS); 652 *err = -EIO; 653 goto failed_out; 654 } 655 656 target -= count; 657 /* allocate blocks for indirect blocks */ 658 while (index < indirect_blks && count) { 659 new_blocks[index++] = current_block++; 660 count--; 661 } 662 if (count > 0) { 663 /* 664 * save the new block number 665 * for the first direct block 666 */ 667 new_blocks[index] = current_block; 668 printk(KERN_INFO "%s returned more blocks than " 669 "requested\n", __func__); 670 WARN_ON(1); 671 break; 672 } 673 } 674 675 target = blks - count ; 676 blk_allocated = count; 677 if (!target) 678 goto allocated; 679 /* Now allocate data blocks */ 680 memset(&ar, 0, sizeof(ar)); 681 ar.inode = inode; 682 ar.goal = goal; 683 ar.len = target; 684 ar.logical = iblock; 685 if (S_ISREG(inode->i_mode)) 686 /* enable in-core preallocation only for regular files */ 687 ar.flags = EXT4_MB_HINT_DATA; 688 689 current_block = ext4_mb_new_blocks(handle, &ar, err); 690 if (unlikely(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS)) { 691 EXT4_ERROR_INODE(inode, 692 "current_block %llu + ar.len %d > %d!", 693 current_block, ar.len, 694 EXT4_MAX_BLOCK_FILE_PHYS); 695 *err = -EIO; 696 goto failed_out; 697 } 698 699 if (*err && (target == blks)) { 700 /* 701 * if the allocation failed and we didn't allocate 702 * any blocks before 703 */ 704 goto failed_out; 705 } 706 if (!*err) { 707 if (target == blks) { 708 /* 709 * save the new block number 710 * for the first direct block 711 */ 712 new_blocks[index] = current_block; 713 } 714 blk_allocated += ar.len; 715 } 716 allocated: 717 /* total number of blocks allocated for direct blocks */ 718 ret = blk_allocated; 719 *err = 0; 720 return ret; 721 failed_out: 722 for (i = 0; i < index; i++) 723 ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0); 724 return ret; 725 } 726 727 /** 728 * ext4_alloc_branch - allocate and set up a chain of blocks. 729 * @handle: handle for this transaction 730 * @inode: owner 731 * @indirect_blks: number of allocated indirect blocks 732 * @blks: number of allocated direct blocks 733 * @goal: preferred place for allocation 734 * @offsets: offsets (in the blocks) to store the pointers to next. 735 * @branch: place to store the chain in. 736 * 737 * This function allocates blocks, zeroes out all but the last one, 738 * links them into chain and (if we are synchronous) writes them to disk. 739 * In other words, it prepares a branch that can be spliced onto the 740 * inode. It stores the information about that chain in the branch[], in 741 * the same format as ext4_get_branch() would do. We are calling it after 742 * we had read the existing part of chain and partial points to the last 743 * triple of that (one with zero ->key). Upon the exit we have the same 744 * picture as after the successful ext4_get_block(), except that in one 745 * place chain is disconnected - *branch->p is still zero (we did not 746 * set the last link), but branch->key contains the number that should 747 * be placed into *branch->p to fill that gap. 748 * 749 * If allocation fails we free all blocks we've allocated (and forget 750 * their buffer_heads) and return the error value the from failed 751 * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain 752 * as described above and return 0. 753 */ 754 static int ext4_alloc_branch(handle_t *handle, struct inode *inode, 755 ext4_lblk_t iblock, int indirect_blks, 756 int *blks, ext4_fsblk_t goal, 757 ext4_lblk_t *offsets, Indirect *branch) 758 { 759 int blocksize = inode->i_sb->s_blocksize; 760 int i, n = 0; 761 int err = 0; 762 struct buffer_head *bh; 763 int num; 764 ext4_fsblk_t new_blocks[4]; 765 ext4_fsblk_t current_block; 766 767 num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks, 768 *blks, new_blocks, &err); 769 if (err) 770 return err; 771 772 branch[0].key = cpu_to_le32(new_blocks[0]); 773 /* 774 * metadata blocks and data blocks are allocated. 775 */ 776 for (n = 1; n <= indirect_blks; n++) { 777 /* 778 * Get buffer_head for parent block, zero it out 779 * and set the pointer to new one, then send 780 * parent to disk. 781 */ 782 bh = sb_getblk(inode->i_sb, new_blocks[n-1]); 783 if (unlikely(!bh)) { 784 err = -EIO; 785 goto failed; 786 } 787 788 branch[n].bh = bh; 789 lock_buffer(bh); 790 BUFFER_TRACE(bh, "call get_create_access"); 791 err = ext4_journal_get_create_access(handle, bh); 792 if (err) { 793 /* Don't brelse(bh) here; it's done in 794 * ext4_journal_forget() below */ 795 unlock_buffer(bh); 796 goto failed; 797 } 798 799 memset(bh->b_data, 0, blocksize); 800 branch[n].p = (__le32 *) bh->b_data + offsets[n]; 801 branch[n].key = cpu_to_le32(new_blocks[n]); 802 *branch[n].p = branch[n].key; 803 if (n == indirect_blks) { 804 current_block = new_blocks[n]; 805 /* 806 * End of chain, update the last new metablock of 807 * the chain to point to the new allocated 808 * data blocks numbers 809 */ 810 for (i = 1; i < num; i++) 811 *(branch[n].p + i) = cpu_to_le32(++current_block); 812 } 813 BUFFER_TRACE(bh, "marking uptodate"); 814 set_buffer_uptodate(bh); 815 unlock_buffer(bh); 816 817 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 818 err = ext4_handle_dirty_metadata(handle, inode, bh); 819 if (err) 820 goto failed; 821 } 822 *blks = num; 823 return err; 824 failed: 825 /* Allocation failed, free what we already allocated */ 826 ext4_free_blocks(handle, inode, NULL, new_blocks[0], 1, 0); 827 for (i = 1; i <= n ; i++) { 828 /* 829 * branch[i].bh is newly allocated, so there is no 830 * need to revoke the block, which is why we don't 831 * need to set EXT4_FREE_BLOCKS_METADATA. 832 */ 833 ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 834 EXT4_FREE_BLOCKS_FORGET); 835 } 836 for (i = n+1; i < indirect_blks; i++) 837 ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0); 838 839 ext4_free_blocks(handle, inode, NULL, new_blocks[i], num, 0); 840 841 return err; 842 } 843 844 /** 845 * ext4_splice_branch - splice the allocated branch onto inode. 846 * @handle: handle for this transaction 847 * @inode: owner 848 * @block: (logical) number of block we are adding 849 * @chain: chain of indirect blocks (with a missing link - see 850 * ext4_alloc_branch) 851 * @where: location of missing link 852 * @num: number of indirect blocks we are adding 853 * @blks: number of direct blocks we are adding 854 * 855 * This function fills the missing link and does all housekeeping needed in 856 * inode (->i_blocks, etc.). In case of success we end up with the full 857 * chain to new block and return 0. 858 */ 859 static int ext4_splice_branch(handle_t *handle, struct inode *inode, 860 ext4_lblk_t block, Indirect *where, int num, 861 int blks) 862 { 863 int i; 864 int err = 0; 865 ext4_fsblk_t current_block; 866 867 /* 868 * If we're splicing into a [td]indirect block (as opposed to the 869 * inode) then we need to get write access to the [td]indirect block 870 * before the splice. 871 */ 872 if (where->bh) { 873 BUFFER_TRACE(where->bh, "get_write_access"); 874 err = ext4_journal_get_write_access(handle, where->bh); 875 if (err) 876 goto err_out; 877 } 878 /* That's it */ 879 880 *where->p = where->key; 881 882 /* 883 * Update the host buffer_head or inode to point to more just allocated 884 * direct blocks blocks 885 */ 886 if (num == 0 && blks > 1) { 887 current_block = le32_to_cpu(where->key) + 1; 888 for (i = 1; i < blks; i++) 889 *(where->p + i) = cpu_to_le32(current_block++); 890 } 891 892 /* We are done with atomic stuff, now do the rest of housekeeping */ 893 /* had we spliced it onto indirect block? */ 894 if (where->bh) { 895 /* 896 * If we spliced it onto an indirect block, we haven't 897 * altered the inode. Note however that if it is being spliced 898 * onto an indirect block at the very end of the file (the 899 * file is growing) then we *will* alter the inode to reflect 900 * the new i_size. But that is not done here - it is done in 901 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode. 902 */ 903 jbd_debug(5, "splicing indirect only\n"); 904 BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata"); 905 err = ext4_handle_dirty_metadata(handle, inode, where->bh); 906 if (err) 907 goto err_out; 908 } else { 909 /* 910 * OK, we spliced it into the inode itself on a direct block. 911 */ 912 ext4_mark_inode_dirty(handle, inode); 913 jbd_debug(5, "splicing direct\n"); 914 } 915 return err; 916 917 err_out: 918 for (i = 1; i <= num; i++) { 919 /* 920 * branch[i].bh is newly allocated, so there is no 921 * need to revoke the block, which is why we don't 922 * need to set EXT4_FREE_BLOCKS_METADATA. 923 */ 924 ext4_free_blocks(handle, inode, where[i].bh, 0, 1, 925 EXT4_FREE_BLOCKS_FORGET); 926 } 927 ext4_free_blocks(handle, inode, NULL, le32_to_cpu(where[num].key), 928 blks, 0); 929 930 return err; 931 } 932 933 /* 934 * The ext4_ind_map_blocks() function handles non-extents inodes 935 * (i.e., using the traditional indirect/double-indirect i_blocks 936 * scheme) for ext4_map_blocks(). 937 * 938 * Allocation strategy is simple: if we have to allocate something, we will 939 * have to go the whole way to leaf. So let's do it before attaching anything 940 * to tree, set linkage between the newborn blocks, write them if sync is 941 * required, recheck the path, free and repeat if check fails, otherwise 942 * set the last missing link (that will protect us from any truncate-generated 943 * removals - all blocks on the path are immune now) and possibly force the 944 * write on the parent block. 945 * That has a nice additional property: no special recovery from the failed 946 * allocations is needed - we simply release blocks and do not touch anything 947 * reachable from inode. 948 * 949 * `handle' can be NULL if create == 0. 950 * 951 * return > 0, # of blocks mapped or allocated. 952 * return = 0, if plain lookup failed. 953 * return < 0, error case. 954 * 955 * The ext4_ind_get_blocks() function should be called with 956 * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem 957 * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or 958 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system 959 * blocks. 960 */ 961 static int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, 962 struct ext4_map_blocks *map, 963 int flags) 964 { 965 int err = -EIO; 966 ext4_lblk_t offsets[4]; 967 Indirect chain[4]; 968 Indirect *partial; 969 ext4_fsblk_t goal; 970 int indirect_blks; 971 int blocks_to_boundary = 0; 972 int depth; 973 int count = 0; 974 ext4_fsblk_t first_block = 0; 975 976 trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); 977 J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))); 978 J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0); 979 depth = ext4_block_to_path(inode, map->m_lblk, offsets, 980 &blocks_to_boundary); 981 982 if (depth == 0) 983 goto out; 984 985 partial = ext4_get_branch(inode, depth, offsets, chain, &err); 986 987 /* Simplest case - block found, no allocation needed */ 988 if (!partial) { 989 first_block = le32_to_cpu(chain[depth - 1].key); 990 count++; 991 /*map more blocks*/ 992 while (count < map->m_len && count <= blocks_to_boundary) { 993 ext4_fsblk_t blk; 994 995 blk = le32_to_cpu(*(chain[depth-1].p + count)); 996 997 if (blk == first_block + count) 998 count++; 999 else 1000 break; 1001 } 1002 goto got_it; 1003 } 1004 1005 /* Next simple case - plain lookup or failed read of indirect block */ 1006 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO) 1007 goto cleanup; 1008 1009 /* 1010 * Okay, we need to do block allocation. 1011 */ 1012 goal = ext4_find_goal(inode, map->m_lblk, partial); 1013 1014 /* the number of blocks need to allocate for [d,t]indirect blocks */ 1015 indirect_blks = (chain + depth) - partial - 1; 1016 1017 /* 1018 * Next look up the indirect map to count the totoal number of 1019 * direct blocks to allocate for this branch. 1020 */ 1021 count = ext4_blks_to_allocate(partial, indirect_blks, 1022 map->m_len, blocks_to_boundary); 1023 /* 1024 * Block out ext4_truncate while we alter the tree 1025 */ 1026 err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks, 1027 &count, goal, 1028 offsets + (partial - chain), partial); 1029 1030 /* 1031 * The ext4_splice_branch call will free and forget any buffers 1032 * on the new chain if there is a failure, but that risks using 1033 * up transaction credits, especially for bitmaps where the 1034 * credits cannot be returned. Can we handle this somehow? We 1035 * may need to return -EAGAIN upwards in the worst case. --sct 1036 */ 1037 if (!err) 1038 err = ext4_splice_branch(handle, inode, map->m_lblk, 1039 partial, indirect_blks, count); 1040 if (err) 1041 goto cleanup; 1042 1043 map->m_flags |= EXT4_MAP_NEW; 1044 1045 ext4_update_inode_fsync_trans(handle, inode, 1); 1046 got_it: 1047 map->m_flags |= EXT4_MAP_MAPPED; 1048 map->m_pblk = le32_to_cpu(chain[depth-1].key); 1049 map->m_len = count; 1050 if (count > blocks_to_boundary) 1051 map->m_flags |= EXT4_MAP_BOUNDARY; 1052 err = count; 1053 /* Clean up and exit */ 1054 partial = chain + depth - 1; /* the whole chain */ 1055 cleanup: 1056 while (partial > chain) { 1057 BUFFER_TRACE(partial->bh, "call brelse"); 1058 brelse(partial->bh); 1059 partial--; 1060 } 1061 out: 1062 trace_ext4_ind_map_blocks_exit(inode, map->m_lblk, 1063 map->m_pblk, map->m_len, err); 1064 return err; 1065 } 1066 1067 #ifdef CONFIG_QUOTA 1068 qsize_t *ext4_get_reserved_space(struct inode *inode) 1069 { 1070 return &EXT4_I(inode)->i_reserved_quota; 1071 } 1072 #endif 1073 1074 /* 1075 * Calculate the number of metadata blocks need to reserve 1076 * to allocate a new block at @lblocks for non extent file based file 1077 */ 1078 static int ext4_indirect_calc_metadata_amount(struct inode *inode, 1079 sector_t lblock) 1080 { 1081 struct ext4_inode_info *ei = EXT4_I(inode); 1082 sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1); 1083 int blk_bits; 1084 1085 if (lblock < EXT4_NDIR_BLOCKS) 1086 return 0; 1087 1088 lblock -= EXT4_NDIR_BLOCKS; 1089 1090 if (ei->i_da_metadata_calc_len && 1091 (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) { 1092 ei->i_da_metadata_calc_len++; 1093 return 0; 1094 } 1095 ei->i_da_metadata_calc_last_lblock = lblock & dind_mask; 1096 ei->i_da_metadata_calc_len = 1; 1097 blk_bits = order_base_2(lblock); 1098 return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1; 1099 } 1100 1101 /* 1102 * Calculate the number of metadata blocks need to reserve 1103 * to allocate a block located at @lblock 1104 */ 1105 static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) 1106 { 1107 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 1108 return ext4_ext_calc_metadata_amount(inode, lblock); 1109 1110 return ext4_indirect_calc_metadata_amount(inode, lblock); 1111 } 1112 1113 /* 1114 * Called with i_data_sem down, which is important since we can call 1115 * ext4_discard_preallocations() from here. 1116 */ 1117 void ext4_da_update_reserve_space(struct inode *inode, 1118 int used, int quota_claim) 1119 { 1120 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1121 struct ext4_inode_info *ei = EXT4_I(inode); 1122 1123 spin_lock(&ei->i_block_reservation_lock); 1124 trace_ext4_da_update_reserve_space(inode, used); 1125 if (unlikely(used > ei->i_reserved_data_blocks)) { 1126 ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d " 1127 "with only %d reserved data blocks\n", 1128 __func__, inode->i_ino, used, 1129 ei->i_reserved_data_blocks); 1130 WARN_ON(1); 1131 used = ei->i_reserved_data_blocks; 1132 } 1133 1134 /* Update per-inode reservations */ 1135 ei->i_reserved_data_blocks -= used; 1136 ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks; 1137 percpu_counter_sub(&sbi->s_dirtyblocks_counter, 1138 used + ei->i_allocated_meta_blocks); 1139 ei->i_allocated_meta_blocks = 0; 1140 1141 if (ei->i_reserved_data_blocks == 0) { 1142 /* 1143 * We can release all of the reserved metadata blocks 1144 * only when we have written all of the delayed 1145 * allocation blocks. 1146 */ 1147 percpu_counter_sub(&sbi->s_dirtyblocks_counter, 1148 ei->i_reserved_meta_blocks); 1149 ei->i_reserved_meta_blocks = 0; 1150 ei->i_da_metadata_calc_len = 0; 1151 } 1152 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1153 1154 /* Update quota subsystem for data blocks */ 1155 if (quota_claim) 1156 dquot_claim_block(inode, used); 1157 else { 1158 /* 1159 * We did fallocate with an offset that is already delayed 1160 * allocated. So on delayed allocated writeback we should 1161 * not re-claim the quota for fallocated blocks. 1162 */ 1163 dquot_release_reservation_block(inode, used); 1164 } 1165 1166 /* 1167 * If we have done all the pending block allocations and if 1168 * there aren't any writers on the inode, we can discard the 1169 * inode's preallocations. 1170 */ 1171 if ((ei->i_reserved_data_blocks == 0) && 1172 (atomic_read(&inode->i_writecount) == 0)) 1173 ext4_discard_preallocations(inode); 1174 } 1175 1176 static int __check_block_validity(struct inode *inode, const char *func, 1177 unsigned int line, 1178 struct ext4_map_blocks *map) 1179 { 1180 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, 1181 map->m_len)) { 1182 ext4_error_inode(inode, func, line, map->m_pblk, 1183 "lblock %lu mapped to illegal pblock " 1184 "(length %d)", (unsigned long) map->m_lblk, 1185 map->m_len); 1186 return -EIO; 1187 } 1188 return 0; 1189 } 1190 1191 #define check_block_validity(inode, map) \ 1192 __check_block_validity((inode), __func__, __LINE__, (map)) 1193 1194 /* 1195 * Return the number of contiguous dirty pages in a given inode 1196 * starting at page frame idx. 1197 */ 1198 static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx, 1199 unsigned int max_pages) 1200 { 1201 struct address_space *mapping = inode->i_mapping; 1202 pgoff_t index; 1203 struct pagevec pvec; 1204 pgoff_t num = 0; 1205 int i, nr_pages, done = 0; 1206 1207 if (max_pages == 0) 1208 return 0; 1209 pagevec_init(&pvec, 0); 1210 while (!done) { 1211 index = idx; 1212 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 1213 PAGECACHE_TAG_DIRTY, 1214 (pgoff_t)PAGEVEC_SIZE); 1215 if (nr_pages == 0) 1216 break; 1217 for (i = 0; i < nr_pages; i++) { 1218 struct page *page = pvec.pages[i]; 1219 struct buffer_head *bh, *head; 1220 1221 lock_page(page); 1222 if (unlikely(page->mapping != mapping) || 1223 !PageDirty(page) || 1224 PageWriteback(page) || 1225 page->index != idx) { 1226 done = 1; 1227 unlock_page(page); 1228 break; 1229 } 1230 if (page_has_buffers(page)) { 1231 bh = head = page_buffers(page); 1232 do { 1233 if (!buffer_delay(bh) && 1234 !buffer_unwritten(bh)) 1235 done = 1; 1236 bh = bh->b_this_page; 1237 } while (!done && (bh != head)); 1238 } 1239 unlock_page(page); 1240 if (done) 1241 break; 1242 idx++; 1243 num++; 1244 if (num >= max_pages) { 1245 done = 1; 1246 break; 1247 } 1248 } 1249 pagevec_release(&pvec); 1250 } 1251 return num; 1252 } 1253 1254 /* 1255 * The ext4_map_blocks() function tries to look up the requested blocks, 1256 * and returns if the blocks are already mapped. 1257 * 1258 * Otherwise it takes the write lock of the i_data_sem and allocate blocks 1259 * and store the allocated blocks in the result buffer head and mark it 1260 * mapped. 1261 * 1262 * If file type is extents based, it will call ext4_ext_map_blocks(), 1263 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping 1264 * based files 1265 * 1266 * On success, it returns the number of blocks being mapped or allocate. 1267 * if create==0 and the blocks are pre-allocated and uninitialized block, 1268 * the result buffer head is unmapped. If the create ==1, it will make sure 1269 * the buffer head is mapped. 1270 * 1271 * It returns 0 if plain look up failed (blocks have not been allocated), in 1272 * that casem, buffer head is unmapped 1273 * 1274 * It returns the error in case of allocation failure. 1275 */ 1276 int ext4_map_blocks(handle_t *handle, struct inode *inode, 1277 struct ext4_map_blocks *map, int flags) 1278 { 1279 int retval; 1280 1281 map->m_flags = 0; 1282 ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u," 1283 "logical block %lu\n", inode->i_ino, flags, map->m_len, 1284 (unsigned long) map->m_lblk); 1285 /* 1286 * Try to see if we can get the block without requesting a new 1287 * file system block. 1288 */ 1289 down_read((&EXT4_I(inode)->i_data_sem)); 1290 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 1291 retval = ext4_ext_map_blocks(handle, inode, map, 0); 1292 } else { 1293 retval = ext4_ind_map_blocks(handle, inode, map, 0); 1294 } 1295 up_read((&EXT4_I(inode)->i_data_sem)); 1296 1297 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 1298 int ret = check_block_validity(inode, map); 1299 if (ret != 0) 1300 return ret; 1301 } 1302 1303 /* If it is only a block(s) look up */ 1304 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) 1305 return retval; 1306 1307 /* 1308 * Returns if the blocks have already allocated 1309 * 1310 * Note that if blocks have been preallocated 1311 * ext4_ext_get_block() returns th create = 0 1312 * with buffer head unmapped. 1313 */ 1314 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) 1315 return retval; 1316 1317 /* 1318 * When we call get_blocks without the create flag, the 1319 * BH_Unwritten flag could have gotten set if the blocks 1320 * requested were part of a uninitialized extent. We need to 1321 * clear this flag now that we are committed to convert all or 1322 * part of the uninitialized extent to be an initialized 1323 * extent. This is because we need to avoid the combination 1324 * of BH_Unwritten and BH_Mapped flags being simultaneously 1325 * set on the buffer_head. 1326 */ 1327 map->m_flags &= ~EXT4_MAP_UNWRITTEN; 1328 1329 /* 1330 * New blocks allocate and/or writing to uninitialized extent 1331 * will possibly result in updating i_data, so we take 1332 * the write lock of i_data_sem, and call get_blocks() 1333 * with create == 1 flag. 1334 */ 1335 down_write((&EXT4_I(inode)->i_data_sem)); 1336 1337 /* 1338 * if the caller is from delayed allocation writeout path 1339 * we have already reserved fs blocks for allocation 1340 * let the underlying get_block() function know to 1341 * avoid double accounting 1342 */ 1343 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 1344 ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED); 1345 /* 1346 * We need to check for EXT4 here because migrate 1347 * could have changed the inode type in between 1348 */ 1349 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 1350 retval = ext4_ext_map_blocks(handle, inode, map, flags); 1351 } else { 1352 retval = ext4_ind_map_blocks(handle, inode, map, flags); 1353 1354 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) { 1355 /* 1356 * We allocated new blocks which will result in 1357 * i_data's format changing. Force the migrate 1358 * to fail by clearing migrate flags 1359 */ 1360 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE); 1361 } 1362 1363 /* 1364 * Update reserved blocks/metadata blocks after successful 1365 * block allocation which had been deferred till now. We don't 1366 * support fallocate for non extent files. So we can update 1367 * reserve space here. 1368 */ 1369 if ((retval > 0) && 1370 (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)) 1371 ext4_da_update_reserve_space(inode, retval, 1); 1372 } 1373 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 1374 ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED); 1375 1376 up_write((&EXT4_I(inode)->i_data_sem)); 1377 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 1378 int ret = check_block_validity(inode, map); 1379 if (ret != 0) 1380 return ret; 1381 } 1382 return retval; 1383 } 1384 1385 /* Maximum number of blocks we map for direct IO at once. */ 1386 #define DIO_MAX_BLOCKS 4096 1387 1388 static int _ext4_get_block(struct inode *inode, sector_t iblock, 1389 struct buffer_head *bh, int flags) 1390 { 1391 handle_t *handle = ext4_journal_current_handle(); 1392 struct ext4_map_blocks map; 1393 int ret = 0, started = 0; 1394 int dio_credits; 1395 1396 map.m_lblk = iblock; 1397 map.m_len = bh->b_size >> inode->i_blkbits; 1398 1399 if (flags && !handle) { 1400 /* Direct IO write... */ 1401 if (map.m_len > DIO_MAX_BLOCKS) 1402 map.m_len = DIO_MAX_BLOCKS; 1403 dio_credits = ext4_chunk_trans_blocks(inode, map.m_len); 1404 handle = ext4_journal_start(inode, dio_credits); 1405 if (IS_ERR(handle)) { 1406 ret = PTR_ERR(handle); 1407 return ret; 1408 } 1409 started = 1; 1410 } 1411 1412 ret = ext4_map_blocks(handle, inode, &map, flags); 1413 if (ret > 0) { 1414 map_bh(bh, inode->i_sb, map.m_pblk); 1415 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; 1416 bh->b_size = inode->i_sb->s_blocksize * map.m_len; 1417 ret = 0; 1418 } 1419 if (started) 1420 ext4_journal_stop(handle); 1421 return ret; 1422 } 1423 1424 int ext4_get_block(struct inode *inode, sector_t iblock, 1425 struct buffer_head *bh, int create) 1426 { 1427 return _ext4_get_block(inode, iblock, bh, 1428 create ? EXT4_GET_BLOCKS_CREATE : 0); 1429 } 1430 1431 /* 1432 * `handle' can be NULL if create is zero 1433 */ 1434 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, 1435 ext4_lblk_t block, int create, int *errp) 1436 { 1437 struct ext4_map_blocks map; 1438 struct buffer_head *bh; 1439 int fatal = 0, err; 1440 1441 J_ASSERT(handle != NULL || create == 0); 1442 1443 map.m_lblk = block; 1444 map.m_len = 1; 1445 err = ext4_map_blocks(handle, inode, &map, 1446 create ? EXT4_GET_BLOCKS_CREATE : 0); 1447 1448 if (err < 0) 1449 *errp = err; 1450 if (err <= 0) 1451 return NULL; 1452 *errp = 0; 1453 1454 bh = sb_getblk(inode->i_sb, map.m_pblk); 1455 if (!bh) { 1456 *errp = -EIO; 1457 return NULL; 1458 } 1459 if (map.m_flags & EXT4_MAP_NEW) { 1460 J_ASSERT(create != 0); 1461 J_ASSERT(handle != NULL); 1462 1463 /* 1464 * Now that we do not always journal data, we should 1465 * keep in mind whether this should always journal the 1466 * new buffer as metadata. For now, regular file 1467 * writes use ext4_get_block instead, so it's not a 1468 * problem. 1469 */ 1470 lock_buffer(bh); 1471 BUFFER_TRACE(bh, "call get_create_access"); 1472 fatal = ext4_journal_get_create_access(handle, bh); 1473 if (!fatal && !buffer_uptodate(bh)) { 1474 memset(bh->b_data, 0, inode->i_sb->s_blocksize); 1475 set_buffer_uptodate(bh); 1476 } 1477 unlock_buffer(bh); 1478 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 1479 err = ext4_handle_dirty_metadata(handle, inode, bh); 1480 if (!fatal) 1481 fatal = err; 1482 } else { 1483 BUFFER_TRACE(bh, "not a new buffer"); 1484 } 1485 if (fatal) { 1486 *errp = fatal; 1487 brelse(bh); 1488 bh = NULL; 1489 } 1490 return bh; 1491 } 1492 1493 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, 1494 ext4_lblk_t block, int create, int *err) 1495 { 1496 struct buffer_head *bh; 1497 1498 bh = ext4_getblk(handle, inode, block, create, err); 1499 if (!bh) 1500 return bh; 1501 if (buffer_uptodate(bh)) 1502 return bh; 1503 ll_rw_block(READ_META, 1, &bh); 1504 wait_on_buffer(bh); 1505 if (buffer_uptodate(bh)) 1506 return bh; 1507 put_bh(bh); 1508 *err = -EIO; 1509 return NULL; 1510 } 1511 1512 static int walk_page_buffers(handle_t *handle, 1513 struct buffer_head *head, 1514 unsigned from, 1515 unsigned to, 1516 int *partial, 1517 int (*fn)(handle_t *handle, 1518 struct buffer_head *bh)) 1519 { 1520 struct buffer_head *bh; 1521 unsigned block_start, block_end; 1522 unsigned blocksize = head->b_size; 1523 int err, ret = 0; 1524 struct buffer_head *next; 1525 1526 for (bh = head, block_start = 0; 1527 ret == 0 && (bh != head || !block_start); 1528 block_start = block_end, bh = next) { 1529 next = bh->b_this_page; 1530 block_end = block_start + blocksize; 1531 if (block_end <= from || block_start >= to) { 1532 if (partial && !buffer_uptodate(bh)) 1533 *partial = 1; 1534 continue; 1535 } 1536 err = (*fn)(handle, bh); 1537 if (!ret) 1538 ret = err; 1539 } 1540 return ret; 1541 } 1542 1543 /* 1544 * To preserve ordering, it is essential that the hole instantiation and 1545 * the data write be encapsulated in a single transaction. We cannot 1546 * close off a transaction and start a new one between the ext4_get_block() 1547 * and the commit_write(). So doing the jbd2_journal_start at the start of 1548 * prepare_write() is the right place. 1549 * 1550 * Also, this function can nest inside ext4_writepage() -> 1551 * block_write_full_page(). In that case, we *know* that ext4_writepage() 1552 * has generated enough buffer credits to do the whole page. So we won't 1553 * block on the journal in that case, which is good, because the caller may 1554 * be PF_MEMALLOC. 1555 * 1556 * By accident, ext4 can be reentered when a transaction is open via 1557 * quota file writes. If we were to commit the transaction while thus 1558 * reentered, there can be a deadlock - we would be holding a quota 1559 * lock, and the commit would never complete if another thread had a 1560 * transaction open and was blocking on the quota lock - a ranking 1561 * violation. 1562 * 1563 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start 1564 * will _not_ run commit under these circumstances because handle->h_ref 1565 * is elevated. We'll still have enough credits for the tiny quotafile 1566 * write. 1567 */ 1568 static int do_journal_get_write_access(handle_t *handle, 1569 struct buffer_head *bh) 1570 { 1571 int dirty = buffer_dirty(bh); 1572 int ret; 1573 1574 if (!buffer_mapped(bh) || buffer_freed(bh)) 1575 return 0; 1576 /* 1577 * __block_write_begin() could have dirtied some buffers. Clean 1578 * the dirty bit as jbd2_journal_get_write_access() could complain 1579 * otherwise about fs integrity issues. Setting of the dirty bit 1580 * by __block_write_begin() isn't a real problem here as we clear 1581 * the bit before releasing a page lock and thus writeback cannot 1582 * ever write the buffer. 1583 */ 1584 if (dirty) 1585 clear_buffer_dirty(bh); 1586 ret = ext4_journal_get_write_access(handle, bh); 1587 if (!ret && dirty) 1588 ret = ext4_handle_dirty_metadata(handle, NULL, bh); 1589 return ret; 1590 } 1591 1592 /* 1593 * Truncate blocks that were not used by write. We have to truncate the 1594 * pagecache as well so that corresponding buffers get properly unmapped. 1595 */ 1596 static void ext4_truncate_failed_write(struct inode *inode) 1597 { 1598 truncate_inode_pages(inode->i_mapping, inode->i_size); 1599 ext4_truncate(inode); 1600 } 1601 1602 static int ext4_get_block_write(struct inode *inode, sector_t iblock, 1603 struct buffer_head *bh_result, int create); 1604 static int ext4_write_begin(struct file *file, struct address_space *mapping, 1605 loff_t pos, unsigned len, unsigned flags, 1606 struct page **pagep, void **fsdata) 1607 { 1608 struct inode *inode = mapping->host; 1609 int ret, needed_blocks; 1610 handle_t *handle; 1611 int retries = 0; 1612 struct page *page; 1613 pgoff_t index; 1614 unsigned from, to; 1615 1616 trace_ext4_write_begin(inode, pos, len, flags); 1617 /* 1618 * Reserve one block more for addition to orphan list in case 1619 * we allocate blocks but write fails for some reason 1620 */ 1621 needed_blocks = ext4_writepage_trans_blocks(inode) + 1; 1622 index = pos >> PAGE_CACHE_SHIFT; 1623 from = pos & (PAGE_CACHE_SIZE - 1); 1624 to = from + len; 1625 1626 retry: 1627 handle = ext4_journal_start(inode, needed_blocks); 1628 if (IS_ERR(handle)) { 1629 ret = PTR_ERR(handle); 1630 goto out; 1631 } 1632 1633 /* We cannot recurse into the filesystem as the transaction is already 1634 * started */ 1635 flags |= AOP_FLAG_NOFS; 1636 1637 page = grab_cache_page_write_begin(mapping, index, flags); 1638 if (!page) { 1639 ext4_journal_stop(handle); 1640 ret = -ENOMEM; 1641 goto out; 1642 } 1643 *pagep = page; 1644 1645 if (ext4_should_dioread_nolock(inode)) 1646 ret = __block_write_begin(page, pos, len, ext4_get_block_write); 1647 else 1648 ret = __block_write_begin(page, pos, len, ext4_get_block); 1649 1650 if (!ret && ext4_should_journal_data(inode)) { 1651 ret = walk_page_buffers(handle, page_buffers(page), 1652 from, to, NULL, do_journal_get_write_access); 1653 } 1654 1655 if (ret) { 1656 unlock_page(page); 1657 page_cache_release(page); 1658 /* 1659 * __block_write_begin may have instantiated a few blocks 1660 * outside i_size. Trim these off again. Don't need 1661 * i_size_read because we hold i_mutex. 1662 * 1663 * Add inode to orphan list in case we crash before 1664 * truncate finishes 1665 */ 1666 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1667 ext4_orphan_add(handle, inode); 1668 1669 ext4_journal_stop(handle); 1670 if (pos + len > inode->i_size) { 1671 ext4_truncate_failed_write(inode); 1672 /* 1673 * If truncate failed early the inode might 1674 * still be on the orphan list; we need to 1675 * make sure the inode is removed from the 1676 * orphan list in that case. 1677 */ 1678 if (inode->i_nlink) 1679 ext4_orphan_del(NULL, inode); 1680 } 1681 } 1682 1683 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 1684 goto retry; 1685 out: 1686 return ret; 1687 } 1688 1689 /* For write_end() in data=journal mode */ 1690 static int write_end_fn(handle_t *handle, struct buffer_head *bh) 1691 { 1692 if (!buffer_mapped(bh) || buffer_freed(bh)) 1693 return 0; 1694 set_buffer_uptodate(bh); 1695 return ext4_handle_dirty_metadata(handle, NULL, bh); 1696 } 1697 1698 static int ext4_generic_write_end(struct file *file, 1699 struct address_space *mapping, 1700 loff_t pos, unsigned len, unsigned copied, 1701 struct page *page, void *fsdata) 1702 { 1703 int i_size_changed = 0; 1704 struct inode *inode = mapping->host; 1705 handle_t *handle = ext4_journal_current_handle(); 1706 1707 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); 1708 1709 /* 1710 * No need to use i_size_read() here, the i_size 1711 * cannot change under us because we hold i_mutex. 1712 * 1713 * But it's important to update i_size while still holding page lock: 1714 * page writeout could otherwise come in and zero beyond i_size. 1715 */ 1716 if (pos + copied > inode->i_size) { 1717 i_size_write(inode, pos + copied); 1718 i_size_changed = 1; 1719 } 1720 1721 if (pos + copied > EXT4_I(inode)->i_disksize) { 1722 /* We need to mark inode dirty even if 1723 * new_i_size is less that inode->i_size 1724 * bu greater than i_disksize.(hint delalloc) 1725 */ 1726 ext4_update_i_disksize(inode, (pos + copied)); 1727 i_size_changed = 1; 1728 } 1729 unlock_page(page); 1730 page_cache_release(page); 1731 1732 /* 1733 * Don't mark the inode dirty under page lock. First, it unnecessarily 1734 * makes the holding time of page lock longer. Second, it forces lock 1735 * ordering of page lock and transaction start for journaling 1736 * filesystems. 1737 */ 1738 if (i_size_changed) 1739 ext4_mark_inode_dirty(handle, inode); 1740 1741 return copied; 1742 } 1743 1744 /* 1745 * We need to pick up the new inode size which generic_commit_write gave us 1746 * `file' can be NULL - eg, when called from page_symlink(). 1747 * 1748 * ext4 never places buffers on inode->i_mapping->private_list. metadata 1749 * buffers are managed internally. 1750 */ 1751 static int ext4_ordered_write_end(struct file *file, 1752 struct address_space *mapping, 1753 loff_t pos, unsigned len, unsigned copied, 1754 struct page *page, void *fsdata) 1755 { 1756 handle_t *handle = ext4_journal_current_handle(); 1757 struct inode *inode = mapping->host; 1758 int ret = 0, ret2; 1759 1760 trace_ext4_ordered_write_end(inode, pos, len, copied); 1761 ret = ext4_jbd2_file_inode(handle, inode); 1762 1763 if (ret == 0) { 1764 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, 1765 page, fsdata); 1766 copied = ret2; 1767 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1768 /* if we have allocated more blocks and copied 1769 * less. We will have blocks allocated outside 1770 * inode->i_size. So truncate them 1771 */ 1772 ext4_orphan_add(handle, inode); 1773 if (ret2 < 0) 1774 ret = ret2; 1775 } 1776 ret2 = ext4_journal_stop(handle); 1777 if (!ret) 1778 ret = ret2; 1779 1780 if (pos + len > inode->i_size) { 1781 ext4_truncate_failed_write(inode); 1782 /* 1783 * If truncate failed early the inode might still be 1784 * on the orphan list; we need to make sure the inode 1785 * is removed from the orphan list in that case. 1786 */ 1787 if (inode->i_nlink) 1788 ext4_orphan_del(NULL, inode); 1789 } 1790 1791 1792 return ret ? ret : copied; 1793 } 1794 1795 static int ext4_writeback_write_end(struct file *file, 1796 struct address_space *mapping, 1797 loff_t pos, unsigned len, unsigned copied, 1798 struct page *page, void *fsdata) 1799 { 1800 handle_t *handle = ext4_journal_current_handle(); 1801 struct inode *inode = mapping->host; 1802 int ret = 0, ret2; 1803 1804 trace_ext4_writeback_write_end(inode, pos, len, copied); 1805 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, 1806 page, fsdata); 1807 copied = ret2; 1808 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1809 /* if we have allocated more blocks and copied 1810 * less. We will have blocks allocated outside 1811 * inode->i_size. So truncate them 1812 */ 1813 ext4_orphan_add(handle, inode); 1814 1815 if (ret2 < 0) 1816 ret = ret2; 1817 1818 ret2 = ext4_journal_stop(handle); 1819 if (!ret) 1820 ret = ret2; 1821 1822 if (pos + len > inode->i_size) { 1823 ext4_truncate_failed_write(inode); 1824 /* 1825 * If truncate failed early the inode might still be 1826 * on the orphan list; we need to make sure the inode 1827 * is removed from the orphan list in that case. 1828 */ 1829 if (inode->i_nlink) 1830 ext4_orphan_del(NULL, inode); 1831 } 1832 1833 return ret ? ret : copied; 1834 } 1835 1836 static int ext4_journalled_write_end(struct file *file, 1837 struct address_space *mapping, 1838 loff_t pos, unsigned len, unsigned copied, 1839 struct page *page, void *fsdata) 1840 { 1841 handle_t *handle = ext4_journal_current_handle(); 1842 struct inode *inode = mapping->host; 1843 int ret = 0, ret2; 1844 int partial = 0; 1845 unsigned from, to; 1846 loff_t new_i_size; 1847 1848 trace_ext4_journalled_write_end(inode, pos, len, copied); 1849 from = pos & (PAGE_CACHE_SIZE - 1); 1850 to = from + len; 1851 1852 if (copied < len) { 1853 if (!PageUptodate(page)) 1854 copied = 0; 1855 page_zero_new_buffers(page, from+copied, to); 1856 } 1857 1858 ret = walk_page_buffers(handle, page_buffers(page), from, 1859 to, &partial, write_end_fn); 1860 if (!partial) 1861 SetPageUptodate(page); 1862 new_i_size = pos + copied; 1863 if (new_i_size > inode->i_size) 1864 i_size_write(inode, pos+copied); 1865 ext4_set_inode_state(inode, EXT4_STATE_JDATA); 1866 if (new_i_size > EXT4_I(inode)->i_disksize) { 1867 ext4_update_i_disksize(inode, new_i_size); 1868 ret2 = ext4_mark_inode_dirty(handle, inode); 1869 if (!ret) 1870 ret = ret2; 1871 } 1872 1873 unlock_page(page); 1874 page_cache_release(page); 1875 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1876 /* if we have allocated more blocks and copied 1877 * less. We will have blocks allocated outside 1878 * inode->i_size. So truncate them 1879 */ 1880 ext4_orphan_add(handle, inode); 1881 1882 ret2 = ext4_journal_stop(handle); 1883 if (!ret) 1884 ret = ret2; 1885 if (pos + len > inode->i_size) { 1886 ext4_truncate_failed_write(inode); 1887 /* 1888 * If truncate failed early the inode might still be 1889 * on the orphan list; we need to make sure the inode 1890 * is removed from the orphan list in that case. 1891 */ 1892 if (inode->i_nlink) 1893 ext4_orphan_del(NULL, inode); 1894 } 1895 1896 return ret ? ret : copied; 1897 } 1898 1899 /* 1900 * Reserve a single block located at lblock 1901 */ 1902 static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) 1903 { 1904 int retries = 0; 1905 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1906 struct ext4_inode_info *ei = EXT4_I(inode); 1907 unsigned long md_needed; 1908 int ret; 1909 1910 /* 1911 * recalculate the amount of metadata blocks to reserve 1912 * in order to allocate nrblocks 1913 * worse case is one extent per block 1914 */ 1915 repeat: 1916 spin_lock(&ei->i_block_reservation_lock); 1917 md_needed = ext4_calc_metadata_amount(inode, lblock); 1918 trace_ext4_da_reserve_space(inode, md_needed); 1919 spin_unlock(&ei->i_block_reservation_lock); 1920 1921 /* 1922 * We will charge metadata quota at writeout time; this saves 1923 * us from metadata over-estimation, though we may go over by 1924 * a small amount in the end. Here we just reserve for data. 1925 */ 1926 ret = dquot_reserve_block(inode, 1); 1927 if (ret) 1928 return ret; 1929 /* 1930 * We do still charge estimated metadata to the sb though; 1931 * we cannot afford to run out of free blocks. 1932 */ 1933 if (ext4_claim_free_blocks(sbi, md_needed + 1, 0)) { 1934 dquot_release_reservation_block(inode, 1); 1935 if (ext4_should_retry_alloc(inode->i_sb, &retries)) { 1936 yield(); 1937 goto repeat; 1938 } 1939 return -ENOSPC; 1940 } 1941 spin_lock(&ei->i_block_reservation_lock); 1942 ei->i_reserved_data_blocks++; 1943 ei->i_reserved_meta_blocks += md_needed; 1944 spin_unlock(&ei->i_block_reservation_lock); 1945 1946 return 0; /* success */ 1947 } 1948 1949 static void ext4_da_release_space(struct inode *inode, int to_free) 1950 { 1951 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1952 struct ext4_inode_info *ei = EXT4_I(inode); 1953 1954 if (!to_free) 1955 return; /* Nothing to release, exit */ 1956 1957 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1958 1959 trace_ext4_da_release_space(inode, to_free); 1960 if (unlikely(to_free > ei->i_reserved_data_blocks)) { 1961 /* 1962 * if there aren't enough reserved blocks, then the 1963 * counter is messed up somewhere. Since this 1964 * function is called from invalidate page, it's 1965 * harmless to return without any action. 1966 */ 1967 ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: " 1968 "ino %lu, to_free %d with only %d reserved " 1969 "data blocks\n", inode->i_ino, to_free, 1970 ei->i_reserved_data_blocks); 1971 WARN_ON(1); 1972 to_free = ei->i_reserved_data_blocks; 1973 } 1974 ei->i_reserved_data_blocks -= to_free; 1975 1976 if (ei->i_reserved_data_blocks == 0) { 1977 /* 1978 * We can release all of the reserved metadata blocks 1979 * only when we have written all of the delayed 1980 * allocation blocks. 1981 */ 1982 percpu_counter_sub(&sbi->s_dirtyblocks_counter, 1983 ei->i_reserved_meta_blocks); 1984 ei->i_reserved_meta_blocks = 0; 1985 ei->i_da_metadata_calc_len = 0; 1986 } 1987 1988 /* update fs dirty data blocks counter */ 1989 percpu_counter_sub(&sbi->s_dirtyblocks_counter, to_free); 1990 1991 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1992 1993 dquot_release_reservation_block(inode, to_free); 1994 } 1995 1996 static void ext4_da_page_release_reservation(struct page *page, 1997 unsigned long offset) 1998 { 1999 int to_release = 0; 2000 struct buffer_head *head, *bh; 2001 unsigned int curr_off = 0; 2002 2003 head = page_buffers(page); 2004 bh = head; 2005 do { 2006 unsigned int next_off = curr_off + bh->b_size; 2007 2008 if ((offset <= curr_off) && (buffer_delay(bh))) { 2009 to_release++; 2010 clear_buffer_delay(bh); 2011 } 2012 curr_off = next_off; 2013 } while ((bh = bh->b_this_page) != head); 2014 ext4_da_release_space(page->mapping->host, to_release); 2015 } 2016 2017 /* 2018 * Delayed allocation stuff 2019 */ 2020 2021 /* 2022 * mpage_da_submit_io - walks through extent of pages and try to write 2023 * them with writepage() call back 2024 * 2025 * @mpd->inode: inode 2026 * @mpd->first_page: first page of the extent 2027 * @mpd->next_page: page after the last page of the extent 2028 * 2029 * By the time mpage_da_submit_io() is called we expect all blocks 2030 * to be allocated. this may be wrong if allocation failed. 2031 * 2032 * As pages are already locked by write_cache_pages(), we can't use it 2033 */ 2034 static int mpage_da_submit_io(struct mpage_da_data *mpd, 2035 struct ext4_map_blocks *map) 2036 { 2037 struct pagevec pvec; 2038 unsigned long index, end; 2039 int ret = 0, err, nr_pages, i; 2040 struct inode *inode = mpd->inode; 2041 struct address_space *mapping = inode->i_mapping; 2042 loff_t size = i_size_read(inode); 2043 unsigned int len, block_start; 2044 struct buffer_head *bh, *page_bufs = NULL; 2045 int journal_data = ext4_should_journal_data(inode); 2046 sector_t pblock = 0, cur_logical = 0; 2047 struct ext4_io_submit io_submit; 2048 2049 BUG_ON(mpd->next_page <= mpd->first_page); 2050 memset(&io_submit, 0, sizeof(io_submit)); 2051 /* 2052 * We need to start from the first_page to the next_page - 1 2053 * to make sure we also write the mapped dirty buffer_heads. 2054 * If we look at mpd->b_blocknr we would only be looking 2055 * at the currently mapped buffer_heads. 2056 */ 2057 index = mpd->first_page; 2058 end = mpd->next_page - 1; 2059 2060 pagevec_init(&pvec, 0); 2061 while (index <= end) { 2062 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 2063 if (nr_pages == 0) 2064 break; 2065 for (i = 0; i < nr_pages; i++) { 2066 int commit_write = 0, skip_page = 0; 2067 struct page *page = pvec.pages[i]; 2068 2069 index = page->index; 2070 if (index > end) 2071 break; 2072 2073 if (index == size >> PAGE_CACHE_SHIFT) 2074 len = size & ~PAGE_CACHE_MASK; 2075 else 2076 len = PAGE_CACHE_SIZE; 2077 if (map) { 2078 cur_logical = index << (PAGE_CACHE_SHIFT - 2079 inode->i_blkbits); 2080 pblock = map->m_pblk + (cur_logical - 2081 map->m_lblk); 2082 } 2083 index++; 2084 2085 BUG_ON(!PageLocked(page)); 2086 BUG_ON(PageWriteback(page)); 2087 2088 /* 2089 * If the page does not have buffers (for 2090 * whatever reason), try to create them using 2091 * __block_write_begin. If this fails, 2092 * skip the page and move on. 2093 */ 2094 if (!page_has_buffers(page)) { 2095 if (__block_write_begin(page, 0, len, 2096 noalloc_get_block_write)) { 2097 skip_page: 2098 unlock_page(page); 2099 continue; 2100 } 2101 commit_write = 1; 2102 } 2103 2104 bh = page_bufs = page_buffers(page); 2105 block_start = 0; 2106 do { 2107 if (!bh) 2108 goto skip_page; 2109 if (map && (cur_logical >= map->m_lblk) && 2110 (cur_logical <= (map->m_lblk + 2111 (map->m_len - 1)))) { 2112 if (buffer_delay(bh)) { 2113 clear_buffer_delay(bh); 2114 bh->b_blocknr = pblock; 2115 } 2116 if (buffer_unwritten(bh) || 2117 buffer_mapped(bh)) 2118 BUG_ON(bh->b_blocknr != pblock); 2119 if (map->m_flags & EXT4_MAP_UNINIT) 2120 set_buffer_uninit(bh); 2121 clear_buffer_unwritten(bh); 2122 } 2123 2124 /* skip page if block allocation undone */ 2125 if (buffer_delay(bh) || buffer_unwritten(bh)) 2126 skip_page = 1; 2127 bh = bh->b_this_page; 2128 block_start += bh->b_size; 2129 cur_logical++; 2130 pblock++; 2131 } while (bh != page_bufs); 2132 2133 if (skip_page) 2134 goto skip_page; 2135 2136 if (commit_write) 2137 /* mark the buffer_heads as dirty & uptodate */ 2138 block_commit_write(page, 0, len); 2139 2140 clear_page_dirty_for_io(page); 2141 /* 2142 * Delalloc doesn't support data journalling, 2143 * but eventually maybe we'll lift this 2144 * restriction. 2145 */ 2146 if (unlikely(journal_data && PageChecked(page))) 2147 err = __ext4_journalled_writepage(page, len); 2148 else if (test_opt(inode->i_sb, MBLK_IO_SUBMIT)) 2149 err = ext4_bio_write_page(&io_submit, page, 2150 len, mpd->wbc); 2151 else 2152 err = block_write_full_page(page, 2153 noalloc_get_block_write, mpd->wbc); 2154 2155 if (!err) 2156 mpd->pages_written++; 2157 /* 2158 * In error case, we have to continue because 2159 * remaining pages are still locked 2160 */ 2161 if (ret == 0) 2162 ret = err; 2163 } 2164 pagevec_release(&pvec); 2165 } 2166 ext4_io_submit(&io_submit); 2167 return ret; 2168 } 2169 2170 static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd) 2171 { 2172 int nr_pages, i; 2173 pgoff_t index, end; 2174 struct pagevec pvec; 2175 struct inode *inode = mpd->inode; 2176 struct address_space *mapping = inode->i_mapping; 2177 2178 index = mpd->first_page; 2179 end = mpd->next_page - 1; 2180 while (index <= end) { 2181 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 2182 if (nr_pages == 0) 2183 break; 2184 for (i = 0; i < nr_pages; i++) { 2185 struct page *page = pvec.pages[i]; 2186 if (page->index > end) 2187 break; 2188 BUG_ON(!PageLocked(page)); 2189 BUG_ON(PageWriteback(page)); 2190 block_invalidatepage(page, 0); 2191 ClearPageUptodate(page); 2192 unlock_page(page); 2193 } 2194 index = pvec.pages[nr_pages - 1]->index + 1; 2195 pagevec_release(&pvec); 2196 } 2197 return; 2198 } 2199 2200 static void ext4_print_free_blocks(struct inode *inode) 2201 { 2202 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2203 printk(KERN_CRIT "Total free blocks count %lld\n", 2204 ext4_count_free_blocks(inode->i_sb)); 2205 printk(KERN_CRIT "Free/Dirty block details\n"); 2206 printk(KERN_CRIT "free_blocks=%lld\n", 2207 (long long) percpu_counter_sum(&sbi->s_freeblocks_counter)); 2208 printk(KERN_CRIT "dirty_blocks=%lld\n", 2209 (long long) percpu_counter_sum(&sbi->s_dirtyblocks_counter)); 2210 printk(KERN_CRIT "Block reservation details\n"); 2211 printk(KERN_CRIT "i_reserved_data_blocks=%u\n", 2212 EXT4_I(inode)->i_reserved_data_blocks); 2213 printk(KERN_CRIT "i_reserved_meta_blocks=%u\n", 2214 EXT4_I(inode)->i_reserved_meta_blocks); 2215 return; 2216 } 2217 2218 /* 2219 * mpage_da_map_and_submit - go through given space, map them 2220 * if necessary, and then submit them for I/O 2221 * 2222 * @mpd - bh describing space 2223 * 2224 * The function skips space we know is already mapped to disk blocks. 2225 * 2226 */ 2227 static void mpage_da_map_and_submit(struct mpage_da_data *mpd) 2228 { 2229 int err, blks, get_blocks_flags; 2230 struct ext4_map_blocks map, *mapp = NULL; 2231 sector_t next = mpd->b_blocknr; 2232 unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits; 2233 loff_t disksize = EXT4_I(mpd->inode)->i_disksize; 2234 handle_t *handle = NULL; 2235 2236 /* 2237 * If the blocks are mapped already, or we couldn't accumulate 2238 * any blocks, then proceed immediately to the submission stage. 2239 */ 2240 if ((mpd->b_size == 0) || 2241 ((mpd->b_state & (1 << BH_Mapped)) && 2242 !(mpd->b_state & (1 << BH_Delay)) && 2243 !(mpd->b_state & (1 << BH_Unwritten)))) 2244 goto submit_io; 2245 2246 handle = ext4_journal_current_handle(); 2247 BUG_ON(!handle); 2248 2249 /* 2250 * Call ext4_map_blocks() to allocate any delayed allocation 2251 * blocks, or to convert an uninitialized extent to be 2252 * initialized (in the case where we have written into 2253 * one or more preallocated blocks). 2254 * 2255 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to 2256 * indicate that we are on the delayed allocation path. This 2257 * affects functions in many different parts of the allocation 2258 * call path. This flag exists primarily because we don't 2259 * want to change *many* call functions, so ext4_map_blocks() 2260 * will set the EXT4_STATE_DELALLOC_RESERVED flag once the 2261 * inode's allocation semaphore is taken. 2262 * 2263 * If the blocks in questions were delalloc blocks, set 2264 * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting 2265 * variables are updated after the blocks have been allocated. 2266 */ 2267 map.m_lblk = next; 2268 map.m_len = max_blocks; 2269 get_blocks_flags = EXT4_GET_BLOCKS_CREATE; 2270 if (ext4_should_dioread_nolock(mpd->inode)) 2271 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; 2272 if (mpd->b_state & (1 << BH_Delay)) 2273 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; 2274 2275 blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags); 2276 if (blks < 0) { 2277 struct super_block *sb = mpd->inode->i_sb; 2278 2279 err = blks; 2280 /* 2281 * If get block returns EAGAIN or ENOSPC and there 2282 * appears to be free blocks we will just let 2283 * mpage_da_submit_io() unlock all of the pages. 2284 */ 2285 if (err == -EAGAIN) 2286 goto submit_io; 2287 2288 if (err == -ENOSPC && 2289 ext4_count_free_blocks(sb)) { 2290 mpd->retval = err; 2291 goto submit_io; 2292 } 2293 2294 /* 2295 * get block failure will cause us to loop in 2296 * writepages, because a_ops->writepage won't be able 2297 * to make progress. The page will be redirtied by 2298 * writepage and writepages will again try to write 2299 * the same. 2300 */ 2301 if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) { 2302 ext4_msg(sb, KERN_CRIT, 2303 "delayed block allocation failed for inode %lu " 2304 "at logical offset %llu with max blocks %zd " 2305 "with error %d", mpd->inode->i_ino, 2306 (unsigned long long) next, 2307 mpd->b_size >> mpd->inode->i_blkbits, err); 2308 ext4_msg(sb, KERN_CRIT, 2309 "This should not happen!! Data will be lost\n"); 2310 if (err == -ENOSPC) 2311 ext4_print_free_blocks(mpd->inode); 2312 } 2313 /* invalidate all the pages */ 2314 ext4_da_block_invalidatepages(mpd); 2315 2316 /* Mark this page range as having been completed */ 2317 mpd->io_done = 1; 2318 return; 2319 } 2320 BUG_ON(blks == 0); 2321 2322 mapp = ↦ 2323 if (map.m_flags & EXT4_MAP_NEW) { 2324 struct block_device *bdev = mpd->inode->i_sb->s_bdev; 2325 int i; 2326 2327 for (i = 0; i < map.m_len; i++) 2328 unmap_underlying_metadata(bdev, map.m_pblk + i); 2329 } 2330 2331 if (ext4_should_order_data(mpd->inode)) { 2332 err = ext4_jbd2_file_inode(handle, mpd->inode); 2333 if (err) 2334 /* This only happens if the journal is aborted */ 2335 return; 2336 } 2337 2338 /* 2339 * Update on-disk size along with block allocation. 2340 */ 2341 disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits; 2342 if (disksize > i_size_read(mpd->inode)) 2343 disksize = i_size_read(mpd->inode); 2344 if (disksize > EXT4_I(mpd->inode)->i_disksize) { 2345 ext4_update_i_disksize(mpd->inode, disksize); 2346 err = ext4_mark_inode_dirty(handle, mpd->inode); 2347 if (err) 2348 ext4_error(mpd->inode->i_sb, 2349 "Failed to mark inode %lu dirty", 2350 mpd->inode->i_ino); 2351 } 2352 2353 submit_io: 2354 mpage_da_submit_io(mpd, mapp); 2355 mpd->io_done = 1; 2356 } 2357 2358 #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \ 2359 (1 << BH_Delay) | (1 << BH_Unwritten)) 2360 2361 /* 2362 * mpage_add_bh_to_extent - try to add one more block to extent of blocks 2363 * 2364 * @mpd->lbh - extent of blocks 2365 * @logical - logical number of the block in the file 2366 * @bh - bh of the block (used to access block's state) 2367 * 2368 * the function is used to collect contig. blocks in same state 2369 */ 2370 static void mpage_add_bh_to_extent(struct mpage_da_data *mpd, 2371 sector_t logical, size_t b_size, 2372 unsigned long b_state) 2373 { 2374 sector_t next; 2375 int nrblocks = mpd->b_size >> mpd->inode->i_blkbits; 2376 2377 /* 2378 * XXX Don't go larger than mballoc is willing to allocate 2379 * This is a stopgap solution. We eventually need to fold 2380 * mpage_da_submit_io() into this function and then call 2381 * ext4_map_blocks() multiple times in a loop 2382 */ 2383 if (nrblocks >= 8*1024*1024/mpd->inode->i_sb->s_blocksize) 2384 goto flush_it; 2385 2386 /* check if thereserved journal credits might overflow */ 2387 if (!(ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS))) { 2388 if (nrblocks >= EXT4_MAX_TRANS_DATA) { 2389 /* 2390 * With non-extent format we are limited by the journal 2391 * credit available. Total credit needed to insert 2392 * nrblocks contiguous blocks is dependent on the 2393 * nrblocks. So limit nrblocks. 2394 */ 2395 goto flush_it; 2396 } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) > 2397 EXT4_MAX_TRANS_DATA) { 2398 /* 2399 * Adding the new buffer_head would make it cross the 2400 * allowed limit for which we have journal credit 2401 * reserved. So limit the new bh->b_size 2402 */ 2403 b_size = (EXT4_MAX_TRANS_DATA - nrblocks) << 2404 mpd->inode->i_blkbits; 2405 /* we will do mpage_da_submit_io in the next loop */ 2406 } 2407 } 2408 /* 2409 * First block in the extent 2410 */ 2411 if (mpd->b_size == 0) { 2412 mpd->b_blocknr = logical; 2413 mpd->b_size = b_size; 2414 mpd->b_state = b_state & BH_FLAGS; 2415 return; 2416 } 2417 2418 next = mpd->b_blocknr + nrblocks; 2419 /* 2420 * Can we merge the block to our big extent? 2421 */ 2422 if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) { 2423 mpd->b_size += b_size; 2424 return; 2425 } 2426 2427 flush_it: 2428 /* 2429 * We couldn't merge the block to our extent, so we 2430 * need to flush current extent and start new one 2431 */ 2432 mpage_da_map_and_submit(mpd); 2433 return; 2434 } 2435 2436 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh) 2437 { 2438 return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh); 2439 } 2440 2441 /* 2442 * This is a special get_blocks_t callback which is used by 2443 * ext4_da_write_begin(). It will either return mapped block or 2444 * reserve space for a single block. 2445 * 2446 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set. 2447 * We also have b_blocknr = -1 and b_bdev initialized properly 2448 * 2449 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set. 2450 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev 2451 * initialized properly. 2452 */ 2453 static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, 2454 struct buffer_head *bh, int create) 2455 { 2456 struct ext4_map_blocks map; 2457 int ret = 0; 2458 sector_t invalid_block = ~((sector_t) 0xffff); 2459 2460 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) 2461 invalid_block = ~0; 2462 2463 BUG_ON(create == 0); 2464 BUG_ON(bh->b_size != inode->i_sb->s_blocksize); 2465 2466 map.m_lblk = iblock; 2467 map.m_len = 1; 2468 2469 /* 2470 * first, we need to know whether the block is allocated already 2471 * preallocated blocks are unmapped but should treated 2472 * the same as allocated blocks. 2473 */ 2474 ret = ext4_map_blocks(NULL, inode, &map, 0); 2475 if (ret < 0) 2476 return ret; 2477 if (ret == 0) { 2478 if (buffer_delay(bh)) 2479 return 0; /* Not sure this could or should happen */ 2480 /* 2481 * XXX: __block_write_begin() unmaps passed block, is it OK? 2482 */ 2483 ret = ext4_da_reserve_space(inode, iblock); 2484 if (ret) 2485 /* not enough space to reserve */ 2486 return ret; 2487 2488 map_bh(bh, inode->i_sb, invalid_block); 2489 set_buffer_new(bh); 2490 set_buffer_delay(bh); 2491 return 0; 2492 } 2493 2494 map_bh(bh, inode->i_sb, map.m_pblk); 2495 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; 2496 2497 if (buffer_unwritten(bh)) { 2498 /* A delayed write to unwritten bh should be marked 2499 * new and mapped. Mapped ensures that we don't do 2500 * get_block multiple times when we write to the same 2501 * offset and new ensures that we do proper zero out 2502 * for partial write. 2503 */ 2504 set_buffer_new(bh); 2505 set_buffer_mapped(bh); 2506 } 2507 return 0; 2508 } 2509 2510 /* 2511 * This function is used as a standard get_block_t calback function 2512 * when there is no desire to allocate any blocks. It is used as a 2513 * callback function for block_write_begin() and block_write_full_page(). 2514 * These functions should only try to map a single block at a time. 2515 * 2516 * Since this function doesn't do block allocations even if the caller 2517 * requests it by passing in create=1, it is critically important that 2518 * any caller checks to make sure that any buffer heads are returned 2519 * by this function are either all already mapped or marked for 2520 * delayed allocation before calling block_write_full_page(). Otherwise, 2521 * b_blocknr could be left unitialized, and the page write functions will 2522 * be taken by surprise. 2523 */ 2524 static int noalloc_get_block_write(struct inode *inode, sector_t iblock, 2525 struct buffer_head *bh_result, int create) 2526 { 2527 BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize); 2528 return _ext4_get_block(inode, iblock, bh_result, 0); 2529 } 2530 2531 static int bget_one(handle_t *handle, struct buffer_head *bh) 2532 { 2533 get_bh(bh); 2534 return 0; 2535 } 2536 2537 static int bput_one(handle_t *handle, struct buffer_head *bh) 2538 { 2539 put_bh(bh); 2540 return 0; 2541 } 2542 2543 static int __ext4_journalled_writepage(struct page *page, 2544 unsigned int len) 2545 { 2546 struct address_space *mapping = page->mapping; 2547 struct inode *inode = mapping->host; 2548 struct buffer_head *page_bufs; 2549 handle_t *handle = NULL; 2550 int ret = 0; 2551 int err; 2552 2553 ClearPageChecked(page); 2554 page_bufs = page_buffers(page); 2555 BUG_ON(!page_bufs); 2556 walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one); 2557 /* As soon as we unlock the page, it can go away, but we have 2558 * references to buffers so we are safe */ 2559 unlock_page(page); 2560 2561 handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode)); 2562 if (IS_ERR(handle)) { 2563 ret = PTR_ERR(handle); 2564 goto out; 2565 } 2566 2567 ret = walk_page_buffers(handle, page_bufs, 0, len, NULL, 2568 do_journal_get_write_access); 2569 2570 err = walk_page_buffers(handle, page_bufs, 0, len, NULL, 2571 write_end_fn); 2572 if (ret == 0) 2573 ret = err; 2574 err = ext4_journal_stop(handle); 2575 if (!ret) 2576 ret = err; 2577 2578 walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one); 2579 ext4_set_inode_state(inode, EXT4_STATE_JDATA); 2580 out: 2581 return ret; 2582 } 2583 2584 static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode); 2585 static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate); 2586 2587 /* 2588 * Note that we don't need to start a transaction unless we're journaling data 2589 * because we should have holes filled from ext4_page_mkwrite(). We even don't 2590 * need to file the inode to the transaction's list in ordered mode because if 2591 * we are writing back data added by write(), the inode is already there and if 2592 * we are writing back data modified via mmap(), no one guarantees in which 2593 * transaction the data will hit the disk. In case we are journaling data, we 2594 * cannot start transaction directly because transaction start ranks above page 2595 * lock so we have to do some magic. 2596 * 2597 * This function can get called via... 2598 * - ext4_da_writepages after taking page lock (have journal handle) 2599 * - journal_submit_inode_data_buffers (no journal handle) 2600 * - shrink_page_list via pdflush (no journal handle) 2601 * - grab_page_cache when doing write_begin (have journal handle) 2602 * 2603 * We don't do any block allocation in this function. If we have page with 2604 * multiple blocks we need to write those buffer_heads that are mapped. This 2605 * is important for mmaped based write. So if we do with blocksize 1K 2606 * truncate(f, 1024); 2607 * a = mmap(f, 0, 4096); 2608 * a[0] = 'a'; 2609 * truncate(f, 4096); 2610 * we have in the page first buffer_head mapped via page_mkwrite call back 2611 * but other bufer_heads would be unmapped but dirty(dirty done via the 2612 * do_wp_page). So writepage should write the first block. If we modify 2613 * the mmap area beyond 1024 we will again get a page_fault and the 2614 * page_mkwrite callback will do the block allocation and mark the 2615 * buffer_heads mapped. 2616 * 2617 * We redirty the page if we have any buffer_heads that is either delay or 2618 * unwritten in the page. 2619 * 2620 * We can get recursively called as show below. 2621 * 2622 * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> 2623 * ext4_writepage() 2624 * 2625 * But since we don't do any block allocation we should not deadlock. 2626 * Page also have the dirty flag cleared so we don't get recurive page_lock. 2627 */ 2628 static int ext4_writepage(struct page *page, 2629 struct writeback_control *wbc) 2630 { 2631 int ret = 0, commit_write = 0; 2632 loff_t size; 2633 unsigned int len; 2634 struct buffer_head *page_bufs = NULL; 2635 struct inode *inode = page->mapping->host; 2636 2637 trace_ext4_writepage(page); 2638 size = i_size_read(inode); 2639 if (page->index == size >> PAGE_CACHE_SHIFT) 2640 len = size & ~PAGE_CACHE_MASK; 2641 else 2642 len = PAGE_CACHE_SIZE; 2643 2644 /* 2645 * If the page does not have buffers (for whatever reason), 2646 * try to create them using __block_write_begin. If this 2647 * fails, redirty the page and move on. 2648 */ 2649 if (!page_has_buffers(page)) { 2650 if (__block_write_begin(page, 0, len, 2651 noalloc_get_block_write)) { 2652 redirty_page: 2653 redirty_page_for_writepage(wbc, page); 2654 unlock_page(page); 2655 return 0; 2656 } 2657 commit_write = 1; 2658 } 2659 page_bufs = page_buffers(page); 2660 if (walk_page_buffers(NULL, page_bufs, 0, len, NULL, 2661 ext4_bh_delay_or_unwritten)) { 2662 /* 2663 * We don't want to do block allocation, so redirty 2664 * the page and return. We may reach here when we do 2665 * a journal commit via journal_submit_inode_data_buffers. 2666 * We can also reach here via shrink_page_list 2667 */ 2668 goto redirty_page; 2669 } 2670 if (commit_write) 2671 /* now mark the buffer_heads as dirty and uptodate */ 2672 block_commit_write(page, 0, len); 2673 2674 if (PageChecked(page) && ext4_should_journal_data(inode)) 2675 /* 2676 * It's mmapped pagecache. Add buffers and journal it. There 2677 * doesn't seem much point in redirtying the page here. 2678 */ 2679 return __ext4_journalled_writepage(page, len); 2680 2681 if (buffer_uninit(page_bufs)) { 2682 ext4_set_bh_endio(page_bufs, inode); 2683 ret = block_write_full_page_endio(page, noalloc_get_block_write, 2684 wbc, ext4_end_io_buffer_write); 2685 } else 2686 ret = block_write_full_page(page, noalloc_get_block_write, 2687 wbc); 2688 2689 return ret; 2690 } 2691 2692 /* 2693 * This is called via ext4_da_writepages() to 2694 * calculate the total number of credits to reserve to fit 2695 * a single extent allocation into a single transaction, 2696 * ext4_da_writpeages() will loop calling this before 2697 * the block allocation. 2698 */ 2699 2700 static int ext4_da_writepages_trans_blocks(struct inode *inode) 2701 { 2702 int max_blocks = EXT4_I(inode)->i_reserved_data_blocks; 2703 2704 /* 2705 * With non-extent format the journal credit needed to 2706 * insert nrblocks contiguous block is dependent on 2707 * number of contiguous block. So we will limit 2708 * number of contiguous block to a sane value 2709 */ 2710 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) && 2711 (max_blocks > EXT4_MAX_TRANS_DATA)) 2712 max_blocks = EXT4_MAX_TRANS_DATA; 2713 2714 return ext4_chunk_trans_blocks(inode, max_blocks); 2715 } 2716 2717 /* 2718 * write_cache_pages_da - walk the list of dirty pages of the given 2719 * address space and accumulate pages that need writing, and call 2720 * mpage_da_map_and_submit to map a single contiguous memory region 2721 * and then write them. 2722 */ 2723 static int write_cache_pages_da(struct address_space *mapping, 2724 struct writeback_control *wbc, 2725 struct mpage_da_data *mpd, 2726 pgoff_t *done_index) 2727 { 2728 struct buffer_head *bh, *head; 2729 struct inode *inode = mapping->host; 2730 struct pagevec pvec; 2731 unsigned int nr_pages; 2732 sector_t logical; 2733 pgoff_t index, end; 2734 long nr_to_write = wbc->nr_to_write; 2735 int i, tag, ret = 0; 2736 2737 memset(mpd, 0, sizeof(struct mpage_da_data)); 2738 mpd->wbc = wbc; 2739 mpd->inode = inode; 2740 pagevec_init(&pvec, 0); 2741 index = wbc->range_start >> PAGE_CACHE_SHIFT; 2742 end = wbc->range_end >> PAGE_CACHE_SHIFT; 2743 2744 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 2745 tag = PAGECACHE_TAG_TOWRITE; 2746 else 2747 tag = PAGECACHE_TAG_DIRTY; 2748 2749 *done_index = index; 2750 while (index <= end) { 2751 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, 2752 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 2753 if (nr_pages == 0) 2754 return 0; 2755 2756 for (i = 0; i < nr_pages; i++) { 2757 struct page *page = pvec.pages[i]; 2758 2759 /* 2760 * At this point, the page may be truncated or 2761 * invalidated (changing page->mapping to NULL), or 2762 * even swizzled back from swapper_space to tmpfs file 2763 * mapping. However, page->index will not change 2764 * because we have a reference on the page. 2765 */ 2766 if (page->index > end) 2767 goto out; 2768 2769 *done_index = page->index + 1; 2770 2771 /* 2772 * If we can't merge this page, and we have 2773 * accumulated an contiguous region, write it 2774 */ 2775 if ((mpd->next_page != page->index) && 2776 (mpd->next_page != mpd->first_page)) { 2777 mpage_da_map_and_submit(mpd); 2778 goto ret_extent_tail; 2779 } 2780 2781 lock_page(page); 2782 2783 /* 2784 * If the page is no longer dirty, or its 2785 * mapping no longer corresponds to inode we 2786 * are writing (which means it has been 2787 * truncated or invalidated), or the page is 2788 * already under writeback and we are not 2789 * doing a data integrity writeback, skip the page 2790 */ 2791 if (!PageDirty(page) || 2792 (PageWriteback(page) && 2793 (wbc->sync_mode == WB_SYNC_NONE)) || 2794 unlikely(page->mapping != mapping)) { 2795 unlock_page(page); 2796 continue; 2797 } 2798 2799 wait_on_page_writeback(page); 2800 BUG_ON(PageWriteback(page)); 2801 2802 if (mpd->next_page != page->index) 2803 mpd->first_page = page->index; 2804 mpd->next_page = page->index + 1; 2805 logical = (sector_t) page->index << 2806 (PAGE_CACHE_SHIFT - inode->i_blkbits); 2807 2808 if (!page_has_buffers(page)) { 2809 mpage_add_bh_to_extent(mpd, logical, 2810 PAGE_CACHE_SIZE, 2811 (1 << BH_Dirty) | (1 << BH_Uptodate)); 2812 if (mpd->io_done) 2813 goto ret_extent_tail; 2814 } else { 2815 /* 2816 * Page with regular buffer heads, 2817 * just add all dirty ones 2818 */ 2819 head = page_buffers(page); 2820 bh = head; 2821 do { 2822 BUG_ON(buffer_locked(bh)); 2823 /* 2824 * We need to try to allocate 2825 * unmapped blocks in the same page. 2826 * Otherwise we won't make progress 2827 * with the page in ext4_writepage 2828 */ 2829 if (ext4_bh_delay_or_unwritten(NULL, bh)) { 2830 mpage_add_bh_to_extent(mpd, logical, 2831 bh->b_size, 2832 bh->b_state); 2833 if (mpd->io_done) 2834 goto ret_extent_tail; 2835 } else if (buffer_dirty(bh) && (buffer_mapped(bh))) { 2836 /* 2837 * mapped dirty buffer. We need 2838 * to update the b_state 2839 * because we look at b_state 2840 * in mpage_da_map_blocks. We 2841 * don't update b_size because 2842 * if we find an unmapped 2843 * buffer_head later we need to 2844 * use the b_state flag of that 2845 * buffer_head. 2846 */ 2847 if (mpd->b_size == 0) 2848 mpd->b_state = bh->b_state & BH_FLAGS; 2849 } 2850 logical++; 2851 } while ((bh = bh->b_this_page) != head); 2852 } 2853 2854 if (nr_to_write > 0) { 2855 nr_to_write--; 2856 if (nr_to_write == 0 && 2857 wbc->sync_mode == WB_SYNC_NONE) 2858 /* 2859 * We stop writing back only if we are 2860 * not doing integrity sync. In case of 2861 * integrity sync we have to keep going 2862 * because someone may be concurrently 2863 * dirtying pages, and we might have 2864 * synced a lot of newly appeared dirty 2865 * pages, but have not synced all of the 2866 * old dirty pages. 2867 */ 2868 goto out; 2869 } 2870 } 2871 pagevec_release(&pvec); 2872 cond_resched(); 2873 } 2874 return 0; 2875 ret_extent_tail: 2876 ret = MPAGE_DA_EXTENT_TAIL; 2877 out: 2878 pagevec_release(&pvec); 2879 cond_resched(); 2880 return ret; 2881 } 2882 2883 2884 static int ext4_da_writepages(struct address_space *mapping, 2885 struct writeback_control *wbc) 2886 { 2887 pgoff_t index; 2888 int range_whole = 0; 2889 handle_t *handle = NULL; 2890 struct mpage_da_data mpd; 2891 struct inode *inode = mapping->host; 2892 int pages_written = 0; 2893 unsigned int max_pages; 2894 int range_cyclic, cycled = 1, io_done = 0; 2895 int needed_blocks, ret = 0; 2896 long desired_nr_to_write, nr_to_writebump = 0; 2897 loff_t range_start = wbc->range_start; 2898 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); 2899 pgoff_t done_index = 0; 2900 pgoff_t end; 2901 2902 trace_ext4_da_writepages(inode, wbc); 2903 2904 /* 2905 * No pages to write? This is mainly a kludge to avoid starting 2906 * a transaction for special inodes like journal inode on last iput() 2907 * because that could violate lock ordering on umount 2908 */ 2909 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 2910 return 0; 2911 2912 /* 2913 * If the filesystem has aborted, it is read-only, so return 2914 * right away instead of dumping stack traces later on that 2915 * will obscure the real source of the problem. We test 2916 * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because 2917 * the latter could be true if the filesystem is mounted 2918 * read-only, and in that case, ext4_da_writepages should 2919 * *never* be called, so if that ever happens, we would want 2920 * the stack trace. 2921 */ 2922 if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) 2923 return -EROFS; 2924 2925 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 2926 range_whole = 1; 2927 2928 range_cyclic = wbc->range_cyclic; 2929 if (wbc->range_cyclic) { 2930 index = mapping->writeback_index; 2931 if (index) 2932 cycled = 0; 2933 wbc->range_start = index << PAGE_CACHE_SHIFT; 2934 wbc->range_end = LLONG_MAX; 2935 wbc->range_cyclic = 0; 2936 end = -1; 2937 } else { 2938 index = wbc->range_start >> PAGE_CACHE_SHIFT; 2939 end = wbc->range_end >> PAGE_CACHE_SHIFT; 2940 } 2941 2942 /* 2943 * This works around two forms of stupidity. The first is in 2944 * the writeback code, which caps the maximum number of pages 2945 * written to be 1024 pages. This is wrong on multiple 2946 * levels; different architectues have a different page size, 2947 * which changes the maximum amount of data which gets 2948 * written. Secondly, 4 megabytes is way too small. XFS 2949 * forces this value to be 16 megabytes by multiplying 2950 * nr_to_write parameter by four, and then relies on its 2951 * allocator to allocate larger extents to make them 2952 * contiguous. Unfortunately this brings us to the second 2953 * stupidity, which is that ext4's mballoc code only allocates 2954 * at most 2048 blocks. So we force contiguous writes up to 2955 * the number of dirty blocks in the inode, or 2956 * sbi->max_writeback_mb_bump whichever is smaller. 2957 */ 2958 max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT); 2959 if (!range_cyclic && range_whole) { 2960 if (wbc->nr_to_write == LONG_MAX) 2961 desired_nr_to_write = wbc->nr_to_write; 2962 else 2963 desired_nr_to_write = wbc->nr_to_write * 8; 2964 } else 2965 desired_nr_to_write = ext4_num_dirty_pages(inode, index, 2966 max_pages); 2967 if (desired_nr_to_write > max_pages) 2968 desired_nr_to_write = max_pages; 2969 2970 if (wbc->nr_to_write < desired_nr_to_write) { 2971 nr_to_writebump = desired_nr_to_write - wbc->nr_to_write; 2972 wbc->nr_to_write = desired_nr_to_write; 2973 } 2974 2975 retry: 2976 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 2977 tag_pages_for_writeback(mapping, index, end); 2978 2979 while (!ret && wbc->nr_to_write > 0) { 2980 2981 /* 2982 * we insert one extent at a time. So we need 2983 * credit needed for single extent allocation. 2984 * journalled mode is currently not supported 2985 * by delalloc 2986 */ 2987 BUG_ON(ext4_should_journal_data(inode)); 2988 needed_blocks = ext4_da_writepages_trans_blocks(inode); 2989 2990 /* start a new transaction*/ 2991 handle = ext4_journal_start(inode, needed_blocks); 2992 if (IS_ERR(handle)) { 2993 ret = PTR_ERR(handle); 2994 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " 2995 "%ld pages, ino %lu; err %d", __func__, 2996 wbc->nr_to_write, inode->i_ino, ret); 2997 goto out_writepages; 2998 } 2999 3000 /* 3001 * Now call write_cache_pages_da() to find the next 3002 * contiguous region of logical blocks that need 3003 * blocks to be allocated by ext4 and submit them. 3004 */ 3005 ret = write_cache_pages_da(mapping, wbc, &mpd, &done_index); 3006 /* 3007 * If we have a contiguous extent of pages and we 3008 * haven't done the I/O yet, map the blocks and submit 3009 * them for I/O. 3010 */ 3011 if (!mpd.io_done && mpd.next_page != mpd.first_page) { 3012 mpage_da_map_and_submit(&mpd); 3013 ret = MPAGE_DA_EXTENT_TAIL; 3014 } 3015 trace_ext4_da_write_pages(inode, &mpd); 3016 wbc->nr_to_write -= mpd.pages_written; 3017 3018 ext4_journal_stop(handle); 3019 3020 if ((mpd.retval == -ENOSPC) && sbi->s_journal) { 3021 /* commit the transaction which would 3022 * free blocks released in the transaction 3023 * and try again 3024 */ 3025 jbd2_journal_force_commit_nested(sbi->s_journal); 3026 ret = 0; 3027 } else if (ret == MPAGE_DA_EXTENT_TAIL) { 3028 /* 3029 * got one extent now try with 3030 * rest of the pages 3031 */ 3032 pages_written += mpd.pages_written; 3033 ret = 0; 3034 io_done = 1; 3035 } else if (wbc->nr_to_write) 3036 /* 3037 * There is no more writeout needed 3038 * or we requested for a noblocking writeout 3039 * and we found the device congested 3040 */ 3041 break; 3042 } 3043 if (!io_done && !cycled) { 3044 cycled = 1; 3045 index = 0; 3046 wbc->range_start = index << PAGE_CACHE_SHIFT; 3047 wbc->range_end = mapping->writeback_index - 1; 3048 goto retry; 3049 } 3050 3051 /* Update index */ 3052 wbc->range_cyclic = range_cyclic; 3053 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 3054 /* 3055 * set the writeback_index so that range_cyclic 3056 * mode will write it back later 3057 */ 3058 mapping->writeback_index = done_index; 3059 3060 out_writepages: 3061 wbc->nr_to_write -= nr_to_writebump; 3062 wbc->range_start = range_start; 3063 trace_ext4_da_writepages_result(inode, wbc, ret, pages_written); 3064 return ret; 3065 } 3066 3067 #define FALL_BACK_TO_NONDELALLOC 1 3068 static int ext4_nonda_switch(struct super_block *sb) 3069 { 3070 s64 free_blocks, dirty_blocks; 3071 struct ext4_sb_info *sbi = EXT4_SB(sb); 3072 3073 /* 3074 * switch to non delalloc mode if we are running low 3075 * on free block. The free block accounting via percpu 3076 * counters can get slightly wrong with percpu_counter_batch getting 3077 * accumulated on each CPU without updating global counters 3078 * Delalloc need an accurate free block accounting. So switch 3079 * to non delalloc when we are near to error range. 3080 */ 3081 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); 3082 dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyblocks_counter); 3083 if (2 * free_blocks < 3 * dirty_blocks || 3084 free_blocks < (dirty_blocks + EXT4_FREEBLOCKS_WATERMARK)) { 3085 /* 3086 * free block count is less than 150% of dirty blocks 3087 * or free blocks is less than watermark 3088 */ 3089 return 1; 3090 } 3091 /* 3092 * Even if we don't switch but are nearing capacity, 3093 * start pushing delalloc when 1/2 of free blocks are dirty. 3094 */ 3095 if (free_blocks < 2 * dirty_blocks) 3096 writeback_inodes_sb_if_idle(sb); 3097 3098 return 0; 3099 } 3100 3101 static int ext4_da_write_begin(struct file *file, struct address_space *mapping, 3102 loff_t pos, unsigned len, unsigned flags, 3103 struct page **pagep, void **fsdata) 3104 { 3105 int ret, retries = 0; 3106 struct page *page; 3107 pgoff_t index; 3108 struct inode *inode = mapping->host; 3109 handle_t *handle; 3110 3111 index = pos >> PAGE_CACHE_SHIFT; 3112 3113 if (ext4_nonda_switch(inode->i_sb)) { 3114 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; 3115 return ext4_write_begin(file, mapping, pos, 3116 len, flags, pagep, fsdata); 3117 } 3118 *fsdata = (void *)0; 3119 trace_ext4_da_write_begin(inode, pos, len, flags); 3120 retry: 3121 /* 3122 * With delayed allocation, we don't log the i_disksize update 3123 * if there is delayed block allocation. But we still need 3124 * to journalling the i_disksize update if writes to the end 3125 * of file which has an already mapped buffer. 3126 */ 3127 handle = ext4_journal_start(inode, 1); 3128 if (IS_ERR(handle)) { 3129 ret = PTR_ERR(handle); 3130 goto out; 3131 } 3132 /* We cannot recurse into the filesystem as the transaction is already 3133 * started */ 3134 flags |= AOP_FLAG_NOFS; 3135 3136 page = grab_cache_page_write_begin(mapping, index, flags); 3137 if (!page) { 3138 ext4_journal_stop(handle); 3139 ret = -ENOMEM; 3140 goto out; 3141 } 3142 *pagep = page; 3143 3144 ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep); 3145 if (ret < 0) { 3146 unlock_page(page); 3147 ext4_journal_stop(handle); 3148 page_cache_release(page); 3149 /* 3150 * block_write_begin may have instantiated a few blocks 3151 * outside i_size. Trim these off again. Don't need 3152 * i_size_read because we hold i_mutex. 3153 */ 3154 if (pos + len > inode->i_size) 3155 ext4_truncate_failed_write(inode); 3156 } 3157 3158 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 3159 goto retry; 3160 out: 3161 return ret; 3162 } 3163 3164 /* 3165 * Check if we should update i_disksize 3166 * when write to the end of file but not require block allocation 3167 */ 3168 static int ext4_da_should_update_i_disksize(struct page *page, 3169 unsigned long offset) 3170 { 3171 struct buffer_head *bh; 3172 struct inode *inode = page->mapping->host; 3173 unsigned int idx; 3174 int i; 3175 3176 bh = page_buffers(page); 3177 idx = offset >> inode->i_blkbits; 3178 3179 for (i = 0; i < idx; i++) 3180 bh = bh->b_this_page; 3181 3182 if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh)) 3183 return 0; 3184 return 1; 3185 } 3186 3187 static int ext4_da_write_end(struct file *file, 3188 struct address_space *mapping, 3189 loff_t pos, unsigned len, unsigned copied, 3190 struct page *page, void *fsdata) 3191 { 3192 struct inode *inode = mapping->host; 3193 int ret = 0, ret2; 3194 handle_t *handle = ext4_journal_current_handle(); 3195 loff_t new_i_size; 3196 unsigned long start, end; 3197 int write_mode = (int)(unsigned long)fsdata; 3198 3199 if (write_mode == FALL_BACK_TO_NONDELALLOC) { 3200 if (ext4_should_order_data(inode)) { 3201 return ext4_ordered_write_end(file, mapping, pos, 3202 len, copied, page, fsdata); 3203 } else if (ext4_should_writeback_data(inode)) { 3204 return ext4_writeback_write_end(file, mapping, pos, 3205 len, copied, page, fsdata); 3206 } else { 3207 BUG(); 3208 } 3209 } 3210 3211 trace_ext4_da_write_end(inode, pos, len, copied); 3212 start = pos & (PAGE_CACHE_SIZE - 1); 3213 end = start + copied - 1; 3214 3215 /* 3216 * generic_write_end() will run mark_inode_dirty() if i_size 3217 * changes. So let's piggyback the i_disksize mark_inode_dirty 3218 * into that. 3219 */ 3220 3221 new_i_size = pos + copied; 3222 if (new_i_size > EXT4_I(inode)->i_disksize) { 3223 if (ext4_da_should_update_i_disksize(page, end)) { 3224 down_write(&EXT4_I(inode)->i_data_sem); 3225 if (new_i_size > EXT4_I(inode)->i_disksize) { 3226 /* 3227 * Updating i_disksize when extending file 3228 * without needing block allocation 3229 */ 3230 if (ext4_should_order_data(inode)) 3231 ret = ext4_jbd2_file_inode(handle, 3232 inode); 3233 3234 EXT4_I(inode)->i_disksize = new_i_size; 3235 } 3236 up_write(&EXT4_I(inode)->i_data_sem); 3237 /* We need to mark inode dirty even if 3238 * new_i_size is less that inode->i_size 3239 * bu greater than i_disksize.(hint delalloc) 3240 */ 3241 ext4_mark_inode_dirty(handle, inode); 3242 } 3243 } 3244 ret2 = generic_write_end(file, mapping, pos, len, copied, 3245 page, fsdata); 3246 copied = ret2; 3247 if (ret2 < 0) 3248 ret = ret2; 3249 ret2 = ext4_journal_stop(handle); 3250 if (!ret) 3251 ret = ret2; 3252 3253 return ret ? ret : copied; 3254 } 3255 3256 static void ext4_da_invalidatepage(struct page *page, unsigned long offset) 3257 { 3258 /* 3259 * Drop reserved blocks 3260 */ 3261 BUG_ON(!PageLocked(page)); 3262 if (!page_has_buffers(page)) 3263 goto out; 3264 3265 ext4_da_page_release_reservation(page, offset); 3266 3267 out: 3268 ext4_invalidatepage(page, offset); 3269 3270 return; 3271 } 3272 3273 /* 3274 * Force all delayed allocation blocks to be allocated for a given inode. 3275 */ 3276 int ext4_alloc_da_blocks(struct inode *inode) 3277 { 3278 trace_ext4_alloc_da_blocks(inode); 3279 3280 if (!EXT4_I(inode)->i_reserved_data_blocks && 3281 !EXT4_I(inode)->i_reserved_meta_blocks) 3282 return 0; 3283 3284 /* 3285 * We do something simple for now. The filemap_flush() will 3286 * also start triggering a write of the data blocks, which is 3287 * not strictly speaking necessary (and for users of 3288 * laptop_mode, not even desirable). However, to do otherwise 3289 * would require replicating code paths in: 3290 * 3291 * ext4_da_writepages() -> 3292 * write_cache_pages() ---> (via passed in callback function) 3293 * __mpage_da_writepage() --> 3294 * mpage_add_bh_to_extent() 3295 * mpage_da_map_blocks() 3296 * 3297 * The problem is that write_cache_pages(), located in 3298 * mm/page-writeback.c, marks pages clean in preparation for 3299 * doing I/O, which is not desirable if we're not planning on 3300 * doing I/O at all. 3301 * 3302 * We could call write_cache_pages(), and then redirty all of 3303 * the pages by calling redirty_page_for_writepage() but that 3304 * would be ugly in the extreme. So instead we would need to 3305 * replicate parts of the code in the above functions, 3306 * simplifying them because we wouldn't actually intend to 3307 * write out the pages, but rather only collect contiguous 3308 * logical block extents, call the multi-block allocator, and 3309 * then update the buffer heads with the block allocations. 3310 * 3311 * For now, though, we'll cheat by calling filemap_flush(), 3312 * which will map the blocks, and start the I/O, but not 3313 * actually wait for the I/O to complete. 3314 */ 3315 return filemap_flush(inode->i_mapping); 3316 } 3317 3318 /* 3319 * bmap() is special. It gets used by applications such as lilo and by 3320 * the swapper to find the on-disk block of a specific piece of data. 3321 * 3322 * Naturally, this is dangerous if the block concerned is still in the 3323 * journal. If somebody makes a swapfile on an ext4 data-journaling 3324 * filesystem and enables swap, then they may get a nasty shock when the 3325 * data getting swapped to that swapfile suddenly gets overwritten by 3326 * the original zero's written out previously to the journal and 3327 * awaiting writeback in the kernel's buffer cache. 3328 * 3329 * So, if we see any bmap calls here on a modified, data-journaled file, 3330 * take extra steps to flush any blocks which might be in the cache. 3331 */ 3332 static sector_t ext4_bmap(struct address_space *mapping, sector_t block) 3333 { 3334 struct inode *inode = mapping->host; 3335 journal_t *journal; 3336 int err; 3337 3338 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && 3339 test_opt(inode->i_sb, DELALLOC)) { 3340 /* 3341 * With delalloc we want to sync the file 3342 * so that we can make sure we allocate 3343 * blocks for file 3344 */ 3345 filemap_write_and_wait(mapping); 3346 } 3347 3348 if (EXT4_JOURNAL(inode) && 3349 ext4_test_inode_state(inode, EXT4_STATE_JDATA)) { 3350 /* 3351 * This is a REALLY heavyweight approach, but the use of 3352 * bmap on dirty files is expected to be extremely rare: 3353 * only if we run lilo or swapon on a freshly made file 3354 * do we expect this to happen. 3355 * 3356 * (bmap requires CAP_SYS_RAWIO so this does not 3357 * represent an unprivileged user DOS attack --- we'd be 3358 * in trouble if mortal users could trigger this path at 3359 * will.) 3360 * 3361 * NB. EXT4_STATE_JDATA is not set on files other than 3362 * regular files. If somebody wants to bmap a directory 3363 * or symlink and gets confused because the buffer 3364 * hasn't yet been flushed to disk, they deserve 3365 * everything they get. 3366 */ 3367 3368 ext4_clear_inode_state(inode, EXT4_STATE_JDATA); 3369 journal = EXT4_JOURNAL(inode); 3370 jbd2_journal_lock_updates(journal); 3371 err = jbd2_journal_flush(journal); 3372 jbd2_journal_unlock_updates(journal); 3373 3374 if (err) 3375 return 0; 3376 } 3377 3378 return generic_block_bmap(mapping, block, ext4_get_block); 3379 } 3380 3381 static int ext4_readpage(struct file *file, struct page *page) 3382 { 3383 trace_ext4_readpage(page); 3384 return mpage_readpage(page, ext4_get_block); 3385 } 3386 3387 static int 3388 ext4_readpages(struct file *file, struct address_space *mapping, 3389 struct list_head *pages, unsigned nr_pages) 3390 { 3391 return mpage_readpages(mapping, pages, nr_pages, ext4_get_block); 3392 } 3393 3394 static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offset) 3395 { 3396 struct buffer_head *head, *bh; 3397 unsigned int curr_off = 0; 3398 3399 if (!page_has_buffers(page)) 3400 return; 3401 head = bh = page_buffers(page); 3402 do { 3403 if (offset <= curr_off && test_clear_buffer_uninit(bh) 3404 && bh->b_private) { 3405 ext4_free_io_end(bh->b_private); 3406 bh->b_private = NULL; 3407 bh->b_end_io = NULL; 3408 } 3409 curr_off = curr_off + bh->b_size; 3410 bh = bh->b_this_page; 3411 } while (bh != head); 3412 } 3413 3414 static void ext4_invalidatepage(struct page *page, unsigned long offset) 3415 { 3416 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3417 3418 trace_ext4_invalidatepage(page, offset); 3419 3420 /* 3421 * free any io_end structure allocated for buffers to be discarded 3422 */ 3423 if (ext4_should_dioread_nolock(page->mapping->host)) 3424 ext4_invalidatepage_free_endio(page, offset); 3425 /* 3426 * If it's a full truncate we just forget about the pending dirtying 3427 */ 3428 if (offset == 0) 3429 ClearPageChecked(page); 3430 3431 if (journal) 3432 jbd2_journal_invalidatepage(journal, page, offset); 3433 else 3434 block_invalidatepage(page, offset); 3435 } 3436 3437 static int ext4_releasepage(struct page *page, gfp_t wait) 3438 { 3439 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3440 3441 trace_ext4_releasepage(page); 3442 3443 WARN_ON(PageChecked(page)); 3444 if (!page_has_buffers(page)) 3445 return 0; 3446 if (journal) 3447 return jbd2_journal_try_to_free_buffers(journal, page, wait); 3448 else 3449 return try_to_free_buffers(page); 3450 } 3451 3452 /* 3453 * O_DIRECT for ext3 (or indirect map) based files 3454 * 3455 * If the O_DIRECT write will extend the file then add this inode to the 3456 * orphan list. So recovery will truncate it back to the original size 3457 * if the machine crashes during the write. 3458 * 3459 * If the O_DIRECT write is intantiating holes inside i_size and the machine 3460 * crashes then stale disk data _may_ be exposed inside the file. But current 3461 * VFS code falls back into buffered path in that case so we are safe. 3462 */ 3463 static ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb, 3464 const struct iovec *iov, loff_t offset, 3465 unsigned long nr_segs) 3466 { 3467 struct file *file = iocb->ki_filp; 3468 struct inode *inode = file->f_mapping->host; 3469 struct ext4_inode_info *ei = EXT4_I(inode); 3470 handle_t *handle; 3471 ssize_t ret; 3472 int orphan = 0; 3473 size_t count = iov_length(iov, nr_segs); 3474 int retries = 0; 3475 3476 if (rw == WRITE) { 3477 loff_t final_size = offset + count; 3478 3479 if (final_size > inode->i_size) { 3480 /* Credits for sb + inode write */ 3481 handle = ext4_journal_start(inode, 2); 3482 if (IS_ERR(handle)) { 3483 ret = PTR_ERR(handle); 3484 goto out; 3485 } 3486 ret = ext4_orphan_add(handle, inode); 3487 if (ret) { 3488 ext4_journal_stop(handle); 3489 goto out; 3490 } 3491 orphan = 1; 3492 ei->i_disksize = inode->i_size; 3493 ext4_journal_stop(handle); 3494 } 3495 } 3496 3497 retry: 3498 if (rw == READ && ext4_should_dioread_nolock(inode)) 3499 ret = __blockdev_direct_IO(rw, iocb, inode, 3500 inode->i_sb->s_bdev, iov, 3501 offset, nr_segs, 3502 ext4_get_block, NULL, NULL, 0); 3503 else { 3504 ret = blockdev_direct_IO(rw, iocb, inode, iov, 3505 offset, nr_segs, ext4_get_block); 3506 3507 if (unlikely((rw & WRITE) && ret < 0)) { 3508 loff_t isize = i_size_read(inode); 3509 loff_t end = offset + iov_length(iov, nr_segs); 3510 3511 if (end > isize) 3512 ext4_truncate_failed_write(inode); 3513 } 3514 } 3515 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 3516 goto retry; 3517 3518 if (orphan) { 3519 int err; 3520 3521 /* Credits for sb + inode write */ 3522 handle = ext4_journal_start(inode, 2); 3523 if (IS_ERR(handle)) { 3524 /* This is really bad luck. We've written the data 3525 * but cannot extend i_size. Bail out and pretend 3526 * the write failed... */ 3527 ret = PTR_ERR(handle); 3528 if (inode->i_nlink) 3529 ext4_orphan_del(NULL, inode); 3530 3531 goto out; 3532 } 3533 if (inode->i_nlink) 3534 ext4_orphan_del(handle, inode); 3535 if (ret > 0) { 3536 loff_t end = offset + ret; 3537 if (end > inode->i_size) { 3538 ei->i_disksize = end; 3539 i_size_write(inode, end); 3540 /* 3541 * We're going to return a positive `ret' 3542 * here due to non-zero-length I/O, so there's 3543 * no way of reporting error returns from 3544 * ext4_mark_inode_dirty() to userspace. So 3545 * ignore it. 3546 */ 3547 ext4_mark_inode_dirty(handle, inode); 3548 } 3549 } 3550 err = ext4_journal_stop(handle); 3551 if (ret == 0) 3552 ret = err; 3553 } 3554 out: 3555 return ret; 3556 } 3557 3558 /* 3559 * ext4_get_block used when preparing for a DIO write or buffer write. 3560 * We allocate an uinitialized extent if blocks haven't been allocated. 3561 * The extent will be converted to initialized after the IO is complete. 3562 */ 3563 static int ext4_get_block_write(struct inode *inode, sector_t iblock, 3564 struct buffer_head *bh_result, int create) 3565 { 3566 ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n", 3567 inode->i_ino, create); 3568 return _ext4_get_block(inode, iblock, bh_result, 3569 EXT4_GET_BLOCKS_IO_CREATE_EXT); 3570 } 3571 3572 static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, 3573 ssize_t size, void *private, int ret, 3574 bool is_async) 3575 { 3576 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; 3577 ext4_io_end_t *io_end = iocb->private; 3578 struct workqueue_struct *wq; 3579 unsigned long flags; 3580 struct ext4_inode_info *ei; 3581 3582 /* if not async direct IO or dio with 0 bytes write, just return */ 3583 if (!io_end || !size) 3584 goto out; 3585 3586 ext_debug("ext4_end_io_dio(): io_end 0x%p" 3587 "for inode %lu, iocb 0x%p, offset %llu, size %llu\n", 3588 iocb->private, io_end->inode->i_ino, iocb, offset, 3589 size); 3590 3591 /* if not aio dio with unwritten extents, just free io and return */ 3592 if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { 3593 ext4_free_io_end(io_end); 3594 iocb->private = NULL; 3595 out: 3596 if (is_async) 3597 aio_complete(iocb, ret, 0); 3598 inode_dio_done(inode); 3599 return; 3600 } 3601 3602 io_end->offset = offset; 3603 io_end->size = size; 3604 if (is_async) { 3605 io_end->iocb = iocb; 3606 io_end->result = ret; 3607 } 3608 wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq; 3609 3610 /* Add the io_end to per-inode completed aio dio list*/ 3611 ei = EXT4_I(io_end->inode); 3612 spin_lock_irqsave(&ei->i_completed_io_lock, flags); 3613 list_add_tail(&io_end->list, &ei->i_completed_io_list); 3614 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); 3615 3616 /* queue the work to convert unwritten extents to written */ 3617 queue_work(wq, &io_end->work); 3618 iocb->private = NULL; 3619 3620 /* XXX: probably should move into the real I/O completion handler */ 3621 inode_dio_done(inode); 3622 } 3623 3624 static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate) 3625 { 3626 ext4_io_end_t *io_end = bh->b_private; 3627 struct workqueue_struct *wq; 3628 struct inode *inode; 3629 unsigned long flags; 3630 3631 if (!test_clear_buffer_uninit(bh) || !io_end) 3632 goto out; 3633 3634 if (!(io_end->inode->i_sb->s_flags & MS_ACTIVE)) { 3635 printk("sb umounted, discard end_io request for inode %lu\n", 3636 io_end->inode->i_ino); 3637 ext4_free_io_end(io_end); 3638 goto out; 3639 } 3640 3641 io_end->flag = EXT4_IO_END_UNWRITTEN; 3642 inode = io_end->inode; 3643 3644 /* Add the io_end to per-inode completed io list*/ 3645 spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags); 3646 list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list); 3647 spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags); 3648 3649 wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq; 3650 /* queue the work to convert unwritten extents to written */ 3651 queue_work(wq, &io_end->work); 3652 out: 3653 bh->b_private = NULL; 3654 bh->b_end_io = NULL; 3655 clear_buffer_uninit(bh); 3656 end_buffer_async_write(bh, uptodate); 3657 } 3658 3659 static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode) 3660 { 3661 ext4_io_end_t *io_end; 3662 struct page *page = bh->b_page; 3663 loff_t offset = (sector_t)page->index << PAGE_CACHE_SHIFT; 3664 size_t size = bh->b_size; 3665 3666 retry: 3667 io_end = ext4_init_io_end(inode, GFP_ATOMIC); 3668 if (!io_end) { 3669 pr_warn_ratelimited("%s: allocation fail\n", __func__); 3670 schedule(); 3671 goto retry; 3672 } 3673 io_end->offset = offset; 3674 io_end->size = size; 3675 /* 3676 * We need to hold a reference to the page to make sure it 3677 * doesn't get evicted before ext4_end_io_work() has a chance 3678 * to convert the extent from written to unwritten. 3679 */ 3680 io_end->page = page; 3681 get_page(io_end->page); 3682 3683 bh->b_private = io_end; 3684 bh->b_end_io = ext4_end_io_buffer_write; 3685 return 0; 3686 } 3687 3688 /* 3689 * For ext4 extent files, ext4 will do direct-io write to holes, 3690 * preallocated extents, and those write extend the file, no need to 3691 * fall back to buffered IO. 3692 * 3693 * For holes, we fallocate those blocks, mark them as uninitialized 3694 * If those blocks were preallocated, we mark sure they are splited, but 3695 * still keep the range to write as uninitialized. 3696 * 3697 * The unwrritten extents will be converted to written when DIO is completed. 3698 * For async direct IO, since the IO may still pending when return, we 3699 * set up an end_io call back function, which will do the conversion 3700 * when async direct IO completed. 3701 * 3702 * If the O_DIRECT write will extend the file then add this inode to the 3703 * orphan list. So recovery will truncate it back to the original size 3704 * if the machine crashes during the write. 3705 * 3706 */ 3707 static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, 3708 const struct iovec *iov, loff_t offset, 3709 unsigned long nr_segs) 3710 { 3711 struct file *file = iocb->ki_filp; 3712 struct inode *inode = file->f_mapping->host; 3713 ssize_t ret; 3714 size_t count = iov_length(iov, nr_segs); 3715 3716 loff_t final_size = offset + count; 3717 if (rw == WRITE && final_size <= inode->i_size) { 3718 /* 3719 * We could direct write to holes and fallocate. 3720 * 3721 * Allocated blocks to fill the hole are marked as uninitialized 3722 * to prevent parallel buffered read to expose the stale data 3723 * before DIO complete the data IO. 3724 * 3725 * As to previously fallocated extents, ext4 get_block 3726 * will just simply mark the buffer mapped but still 3727 * keep the extents uninitialized. 3728 * 3729 * for non AIO case, we will convert those unwritten extents 3730 * to written after return back from blockdev_direct_IO. 3731 * 3732 * for async DIO, the conversion needs to be defered when 3733 * the IO is completed. The ext4 end_io callback function 3734 * will be called to take care of the conversion work. 3735 * Here for async case, we allocate an io_end structure to 3736 * hook to the iocb. 3737 */ 3738 iocb->private = NULL; 3739 EXT4_I(inode)->cur_aio_dio = NULL; 3740 if (!is_sync_kiocb(iocb)) { 3741 iocb->private = ext4_init_io_end(inode, GFP_NOFS); 3742 if (!iocb->private) 3743 return -ENOMEM; 3744 /* 3745 * we save the io structure for current async 3746 * direct IO, so that later ext4_map_blocks() 3747 * could flag the io structure whether there 3748 * is a unwritten extents needs to be converted 3749 * when IO is completed. 3750 */ 3751 EXT4_I(inode)->cur_aio_dio = iocb->private; 3752 } 3753 3754 ret = __blockdev_direct_IO(rw, iocb, inode, 3755 inode->i_sb->s_bdev, iov, 3756 offset, nr_segs, 3757 ext4_get_block_write, 3758 ext4_end_io_dio, 3759 NULL, 3760 DIO_LOCKING | DIO_SKIP_HOLES); 3761 if (iocb->private) 3762 EXT4_I(inode)->cur_aio_dio = NULL; 3763 /* 3764 * The io_end structure takes a reference to the inode, 3765 * that structure needs to be destroyed and the 3766 * reference to the inode need to be dropped, when IO is 3767 * complete, even with 0 byte write, or failed. 3768 * 3769 * In the successful AIO DIO case, the io_end structure will be 3770 * desctroyed and the reference to the inode will be dropped 3771 * after the end_io call back function is called. 3772 * 3773 * In the case there is 0 byte write, or error case, since 3774 * VFS direct IO won't invoke the end_io call back function, 3775 * we need to free the end_io structure here. 3776 */ 3777 if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) { 3778 ext4_free_io_end(iocb->private); 3779 iocb->private = NULL; 3780 } else if (ret > 0 && ext4_test_inode_state(inode, 3781 EXT4_STATE_DIO_UNWRITTEN)) { 3782 int err; 3783 /* 3784 * for non AIO case, since the IO is already 3785 * completed, we could do the conversion right here 3786 */ 3787 err = ext4_convert_unwritten_extents(inode, 3788 offset, ret); 3789 if (err < 0) 3790 ret = err; 3791 ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 3792 } 3793 return ret; 3794 } 3795 3796 /* for write the the end of file case, we fall back to old way */ 3797 return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); 3798 } 3799 3800 static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, 3801 const struct iovec *iov, loff_t offset, 3802 unsigned long nr_segs) 3803 { 3804 struct file *file = iocb->ki_filp; 3805 struct inode *inode = file->f_mapping->host; 3806 ssize_t ret; 3807 3808 trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw); 3809 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3810 ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs); 3811 else 3812 ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); 3813 trace_ext4_direct_IO_exit(inode, offset, 3814 iov_length(iov, nr_segs), rw, ret); 3815 return ret; 3816 } 3817 3818 /* 3819 * Pages can be marked dirty completely asynchronously from ext4's journalling 3820 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do 3821 * much here because ->set_page_dirty is called under VFS locks. The page is 3822 * not necessarily locked. 3823 * 3824 * We cannot just dirty the page and leave attached buffers clean, because the 3825 * buffers' dirty state is "definitive". We cannot just set the buffers dirty 3826 * or jbddirty because all the journalling code will explode. 3827 * 3828 * So what we do is to mark the page "pending dirty" and next time writepage 3829 * is called, propagate that into the buffers appropriately. 3830 */ 3831 static int ext4_journalled_set_page_dirty(struct page *page) 3832 { 3833 SetPageChecked(page); 3834 return __set_page_dirty_nobuffers(page); 3835 } 3836 3837 static const struct address_space_operations ext4_ordered_aops = { 3838 .readpage = ext4_readpage, 3839 .readpages = ext4_readpages, 3840 .writepage = ext4_writepage, 3841 .write_begin = ext4_write_begin, 3842 .write_end = ext4_ordered_write_end, 3843 .bmap = ext4_bmap, 3844 .invalidatepage = ext4_invalidatepage, 3845 .releasepage = ext4_releasepage, 3846 .direct_IO = ext4_direct_IO, 3847 .migratepage = buffer_migrate_page, 3848 .is_partially_uptodate = block_is_partially_uptodate, 3849 .error_remove_page = generic_error_remove_page, 3850 }; 3851 3852 static const struct address_space_operations ext4_writeback_aops = { 3853 .readpage = ext4_readpage, 3854 .readpages = ext4_readpages, 3855 .writepage = ext4_writepage, 3856 .write_begin = ext4_write_begin, 3857 .write_end = ext4_writeback_write_end, 3858 .bmap = ext4_bmap, 3859 .invalidatepage = ext4_invalidatepage, 3860 .releasepage = ext4_releasepage, 3861 .direct_IO = ext4_direct_IO, 3862 .migratepage = buffer_migrate_page, 3863 .is_partially_uptodate = block_is_partially_uptodate, 3864 .error_remove_page = generic_error_remove_page, 3865 }; 3866 3867 static const struct address_space_operations ext4_journalled_aops = { 3868 .readpage = ext4_readpage, 3869 .readpages = ext4_readpages, 3870 .writepage = ext4_writepage, 3871 .write_begin = ext4_write_begin, 3872 .write_end = ext4_journalled_write_end, 3873 .set_page_dirty = ext4_journalled_set_page_dirty, 3874 .bmap = ext4_bmap, 3875 .invalidatepage = ext4_invalidatepage, 3876 .releasepage = ext4_releasepage, 3877 .is_partially_uptodate = block_is_partially_uptodate, 3878 .error_remove_page = generic_error_remove_page, 3879 }; 3880 3881 static const struct address_space_operations ext4_da_aops = { 3882 .readpage = ext4_readpage, 3883 .readpages = ext4_readpages, 3884 .writepage = ext4_writepage, 3885 .writepages = ext4_da_writepages, 3886 .write_begin = ext4_da_write_begin, 3887 .write_end = ext4_da_write_end, 3888 .bmap = ext4_bmap, 3889 .invalidatepage = ext4_da_invalidatepage, 3890 .releasepage = ext4_releasepage, 3891 .direct_IO = ext4_direct_IO, 3892 .migratepage = buffer_migrate_page, 3893 .is_partially_uptodate = block_is_partially_uptodate, 3894 .error_remove_page = generic_error_remove_page, 3895 }; 3896 3897 void ext4_set_aops(struct inode *inode) 3898 { 3899 if (ext4_should_order_data(inode) && 3900 test_opt(inode->i_sb, DELALLOC)) 3901 inode->i_mapping->a_ops = &ext4_da_aops; 3902 else if (ext4_should_order_data(inode)) 3903 inode->i_mapping->a_ops = &ext4_ordered_aops; 3904 else if (ext4_should_writeback_data(inode) && 3905 test_opt(inode->i_sb, DELALLOC)) 3906 inode->i_mapping->a_ops = &ext4_da_aops; 3907 else if (ext4_should_writeback_data(inode)) 3908 inode->i_mapping->a_ops = &ext4_writeback_aops; 3909 else 3910 inode->i_mapping->a_ops = &ext4_journalled_aops; 3911 } 3912 3913 /* 3914 * ext4_block_truncate_page() zeroes out a mapping from file offset `from' 3915 * up to the end of the block which corresponds to `from'. 3916 * This required during truncate. We need to physically zero the tail end 3917 * of that block so it doesn't yield old data if the file is later grown. 3918 */ 3919 int ext4_block_truncate_page(handle_t *handle, 3920 struct address_space *mapping, loff_t from) 3921 { 3922 unsigned offset = from & (PAGE_CACHE_SIZE-1); 3923 unsigned length; 3924 unsigned blocksize; 3925 struct inode *inode = mapping->host; 3926 3927 blocksize = inode->i_sb->s_blocksize; 3928 length = blocksize - (offset & (blocksize - 1)); 3929 3930 return ext4_block_zero_page_range(handle, mapping, from, length); 3931 } 3932 3933 /* 3934 * ext4_block_zero_page_range() zeros out a mapping of length 'length' 3935 * starting from file offset 'from'. The range to be zero'd must 3936 * be contained with in one block. If the specified range exceeds 3937 * the end of the block it will be shortened to end of the block 3938 * that cooresponds to 'from' 3939 */ 3940 int ext4_block_zero_page_range(handle_t *handle, 3941 struct address_space *mapping, loff_t from, loff_t length) 3942 { 3943 ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; 3944 unsigned offset = from & (PAGE_CACHE_SIZE-1); 3945 unsigned blocksize, max, pos; 3946 ext4_lblk_t iblock; 3947 struct inode *inode = mapping->host; 3948 struct buffer_head *bh; 3949 struct page *page; 3950 int err = 0; 3951 3952 page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT, 3953 mapping_gfp_mask(mapping) & ~__GFP_FS); 3954 if (!page) 3955 return -EINVAL; 3956 3957 blocksize = inode->i_sb->s_blocksize; 3958 max = blocksize - (offset & (blocksize - 1)); 3959 3960 /* 3961 * correct length if it does not fall between 3962 * 'from' and the end of the block 3963 */ 3964 if (length > max || length < 0) 3965 length = max; 3966 3967 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 3968 3969 if (!page_has_buffers(page)) 3970 create_empty_buffers(page, blocksize, 0); 3971 3972 /* Find the buffer that contains "offset" */ 3973 bh = page_buffers(page); 3974 pos = blocksize; 3975 while (offset >= pos) { 3976 bh = bh->b_this_page; 3977 iblock++; 3978 pos += blocksize; 3979 } 3980 3981 err = 0; 3982 if (buffer_freed(bh)) { 3983 BUFFER_TRACE(bh, "freed: skip"); 3984 goto unlock; 3985 } 3986 3987 if (!buffer_mapped(bh)) { 3988 BUFFER_TRACE(bh, "unmapped"); 3989 ext4_get_block(inode, iblock, bh, 0); 3990 /* unmapped? It's a hole - nothing to do */ 3991 if (!buffer_mapped(bh)) { 3992 BUFFER_TRACE(bh, "still unmapped"); 3993 goto unlock; 3994 } 3995 } 3996 3997 /* Ok, it's mapped. Make sure it's up-to-date */ 3998 if (PageUptodate(page)) 3999 set_buffer_uptodate(bh); 4000 4001 if (!buffer_uptodate(bh)) { 4002 err = -EIO; 4003 ll_rw_block(READ, 1, &bh); 4004 wait_on_buffer(bh); 4005 /* Uhhuh. Read error. Complain and punt. */ 4006 if (!buffer_uptodate(bh)) 4007 goto unlock; 4008 } 4009 4010 if (ext4_should_journal_data(inode)) { 4011 BUFFER_TRACE(bh, "get write access"); 4012 err = ext4_journal_get_write_access(handle, bh); 4013 if (err) 4014 goto unlock; 4015 } 4016 4017 zero_user(page, offset, length); 4018 4019 BUFFER_TRACE(bh, "zeroed end of block"); 4020 4021 err = 0; 4022 if (ext4_should_journal_data(inode)) { 4023 err = ext4_handle_dirty_metadata(handle, inode, bh); 4024 } else { 4025 if (ext4_should_order_data(inode) && EXT4_I(inode)->jinode) 4026 err = ext4_jbd2_file_inode(handle, inode); 4027 mark_buffer_dirty(bh); 4028 } 4029 4030 unlock: 4031 unlock_page(page); 4032 page_cache_release(page); 4033 return err; 4034 } 4035 4036 /* 4037 * Probably it should be a library function... search for first non-zero word 4038 * or memcmp with zero_page, whatever is better for particular architecture. 4039 * Linus? 4040 */ 4041 static inline int all_zeroes(__le32 *p, __le32 *q) 4042 { 4043 while (p < q) 4044 if (*p++) 4045 return 0; 4046 return 1; 4047 } 4048 4049 /** 4050 * ext4_find_shared - find the indirect blocks for partial truncation. 4051 * @inode: inode in question 4052 * @depth: depth of the affected branch 4053 * @offsets: offsets of pointers in that branch (see ext4_block_to_path) 4054 * @chain: place to store the pointers to partial indirect blocks 4055 * @top: place to the (detached) top of branch 4056 * 4057 * This is a helper function used by ext4_truncate(). 4058 * 4059 * When we do truncate() we may have to clean the ends of several 4060 * indirect blocks but leave the blocks themselves alive. Block is 4061 * partially truncated if some data below the new i_size is referred 4062 * from it (and it is on the path to the first completely truncated 4063 * data block, indeed). We have to free the top of that path along 4064 * with everything to the right of the path. Since no allocation 4065 * past the truncation point is possible until ext4_truncate() 4066 * finishes, we may safely do the latter, but top of branch may 4067 * require special attention - pageout below the truncation point 4068 * might try to populate it. 4069 * 4070 * We atomically detach the top of branch from the tree, store the 4071 * block number of its root in *@top, pointers to buffer_heads of 4072 * partially truncated blocks - in @chain[].bh and pointers to 4073 * their last elements that should not be removed - in 4074 * @chain[].p. Return value is the pointer to last filled element 4075 * of @chain. 4076 * 4077 * The work left to caller to do the actual freeing of subtrees: 4078 * a) free the subtree starting from *@top 4079 * b) free the subtrees whose roots are stored in 4080 * (@chain[i].p+1 .. end of @chain[i].bh->b_data) 4081 * c) free the subtrees growing from the inode past the @chain[0]. 4082 * (no partially truncated stuff there). */ 4083 4084 static Indirect *ext4_find_shared(struct inode *inode, int depth, 4085 ext4_lblk_t offsets[4], Indirect chain[4], 4086 __le32 *top) 4087 { 4088 Indirect *partial, *p; 4089 int k, err; 4090 4091 *top = 0; 4092 /* Make k index the deepest non-null offset + 1 */ 4093 for (k = depth; k > 1 && !offsets[k-1]; k--) 4094 ; 4095 partial = ext4_get_branch(inode, k, offsets, chain, &err); 4096 /* Writer: pointers */ 4097 if (!partial) 4098 partial = chain + k-1; 4099 /* 4100 * If the branch acquired continuation since we've looked at it - 4101 * fine, it should all survive and (new) top doesn't belong to us. 4102 */ 4103 if (!partial->key && *partial->p) 4104 /* Writer: end */ 4105 goto no_top; 4106 for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--) 4107 ; 4108 /* 4109 * OK, we've found the last block that must survive. The rest of our 4110 * branch should be detached before unlocking. However, if that rest 4111 * of branch is all ours and does not grow immediately from the inode 4112 * it's easier to cheat and just decrement partial->p. 4113 */ 4114 if (p == chain + k - 1 && p > chain) { 4115 p->p--; 4116 } else { 4117 *top = *p->p; 4118 /* Nope, don't do this in ext4. Must leave the tree intact */ 4119 #if 0 4120 *p->p = 0; 4121 #endif 4122 } 4123 /* Writer: end */ 4124 4125 while (partial > p) { 4126 brelse(partial->bh); 4127 partial--; 4128 } 4129 no_top: 4130 return partial; 4131 } 4132 4133 /* 4134 * Zero a number of block pointers in either an inode or an indirect block. 4135 * If we restart the transaction we must again get write access to the 4136 * indirect block for further modification. 4137 * 4138 * We release `count' blocks on disk, but (last - first) may be greater 4139 * than `count' because there can be holes in there. 4140 * 4141 * Return 0 on success, 1 on invalid block range 4142 * and < 0 on fatal error. 4143 */ 4144 static int ext4_clear_blocks(handle_t *handle, struct inode *inode, 4145 struct buffer_head *bh, 4146 ext4_fsblk_t block_to_free, 4147 unsigned long count, __le32 *first, 4148 __le32 *last) 4149 { 4150 __le32 *p; 4151 int flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED; 4152 int err; 4153 4154 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 4155 flags |= EXT4_FREE_BLOCKS_METADATA; 4156 4157 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free, 4158 count)) { 4159 EXT4_ERROR_INODE(inode, "attempt to clear invalid " 4160 "blocks %llu len %lu", 4161 (unsigned long long) block_to_free, count); 4162 return 1; 4163 } 4164 4165 if (try_to_extend_transaction(handle, inode)) { 4166 if (bh) { 4167 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 4168 err = ext4_handle_dirty_metadata(handle, inode, bh); 4169 if (unlikely(err)) 4170 goto out_err; 4171 } 4172 err = ext4_mark_inode_dirty(handle, inode); 4173 if (unlikely(err)) 4174 goto out_err; 4175 err = ext4_truncate_restart_trans(handle, inode, 4176 blocks_for_truncate(inode)); 4177 if (unlikely(err)) 4178 goto out_err; 4179 if (bh) { 4180 BUFFER_TRACE(bh, "retaking write access"); 4181 err = ext4_journal_get_write_access(handle, bh); 4182 if (unlikely(err)) 4183 goto out_err; 4184 } 4185 } 4186 4187 for (p = first; p < last; p++) 4188 *p = 0; 4189 4190 ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags); 4191 return 0; 4192 out_err: 4193 ext4_std_error(inode->i_sb, err); 4194 return err; 4195 } 4196 4197 /** 4198 * ext4_free_data - free a list of data blocks 4199 * @handle: handle for this transaction 4200 * @inode: inode we are dealing with 4201 * @this_bh: indirect buffer_head which contains *@first and *@last 4202 * @first: array of block numbers 4203 * @last: points immediately past the end of array 4204 * 4205 * We are freeing all blocks referred from that array (numbers are stored as 4206 * little-endian 32-bit) and updating @inode->i_blocks appropriately. 4207 * 4208 * We accumulate contiguous runs of blocks to free. Conveniently, if these 4209 * blocks are contiguous then releasing them at one time will only affect one 4210 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't 4211 * actually use a lot of journal space. 4212 * 4213 * @this_bh will be %NULL if @first and @last point into the inode's direct 4214 * block pointers. 4215 */ 4216 static void ext4_free_data(handle_t *handle, struct inode *inode, 4217 struct buffer_head *this_bh, 4218 __le32 *first, __le32 *last) 4219 { 4220 ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */ 4221 unsigned long count = 0; /* Number of blocks in the run */ 4222 __le32 *block_to_free_p = NULL; /* Pointer into inode/ind 4223 corresponding to 4224 block_to_free */ 4225 ext4_fsblk_t nr; /* Current block # */ 4226 __le32 *p; /* Pointer into inode/ind 4227 for current block */ 4228 int err = 0; 4229 4230 if (this_bh) { /* For indirect block */ 4231 BUFFER_TRACE(this_bh, "get_write_access"); 4232 err = ext4_journal_get_write_access(handle, this_bh); 4233 /* Important: if we can't update the indirect pointers 4234 * to the blocks, we can't free them. */ 4235 if (err) 4236 return; 4237 } 4238 4239 for (p = first; p < last; p++) { 4240 nr = le32_to_cpu(*p); 4241 if (nr) { 4242 /* accumulate blocks to free if they're contiguous */ 4243 if (count == 0) { 4244 block_to_free = nr; 4245 block_to_free_p = p; 4246 count = 1; 4247 } else if (nr == block_to_free + count) { 4248 count++; 4249 } else { 4250 err = ext4_clear_blocks(handle, inode, this_bh, 4251 block_to_free, count, 4252 block_to_free_p, p); 4253 if (err) 4254 break; 4255 block_to_free = nr; 4256 block_to_free_p = p; 4257 count = 1; 4258 } 4259 } 4260 } 4261 4262 if (!err && count > 0) 4263 err = ext4_clear_blocks(handle, inode, this_bh, block_to_free, 4264 count, block_to_free_p, p); 4265 if (err < 0) 4266 /* fatal error */ 4267 return; 4268 4269 if (this_bh) { 4270 BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata"); 4271 4272 /* 4273 * The buffer head should have an attached journal head at this 4274 * point. However, if the data is corrupted and an indirect 4275 * block pointed to itself, it would have been detached when 4276 * the block was cleared. Check for this instead of OOPSing. 4277 */ 4278 if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh)) 4279 ext4_handle_dirty_metadata(handle, inode, this_bh); 4280 else 4281 EXT4_ERROR_INODE(inode, 4282 "circular indirect block detected at " 4283 "block %llu", 4284 (unsigned long long) this_bh->b_blocknr); 4285 } 4286 } 4287 4288 /** 4289 * ext4_free_branches - free an array of branches 4290 * @handle: JBD handle for this transaction 4291 * @inode: inode we are dealing with 4292 * @parent_bh: the buffer_head which contains *@first and *@last 4293 * @first: array of block numbers 4294 * @last: pointer immediately past the end of array 4295 * @depth: depth of the branches to free 4296 * 4297 * We are freeing all blocks referred from these branches (numbers are 4298 * stored as little-endian 32-bit) and updating @inode->i_blocks 4299 * appropriately. 4300 */ 4301 static void ext4_free_branches(handle_t *handle, struct inode *inode, 4302 struct buffer_head *parent_bh, 4303 __le32 *first, __le32 *last, int depth) 4304 { 4305 ext4_fsblk_t nr; 4306 __le32 *p; 4307 4308 if (ext4_handle_is_aborted(handle)) 4309 return; 4310 4311 if (depth--) { 4312 struct buffer_head *bh; 4313 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); 4314 p = last; 4315 while (--p >= first) { 4316 nr = le32_to_cpu(*p); 4317 if (!nr) 4318 continue; /* A hole */ 4319 4320 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), 4321 nr, 1)) { 4322 EXT4_ERROR_INODE(inode, 4323 "invalid indirect mapped " 4324 "block %lu (level %d)", 4325 (unsigned long) nr, depth); 4326 break; 4327 } 4328 4329 /* Go read the buffer for the next level down */ 4330 bh = sb_bread(inode->i_sb, nr); 4331 4332 /* 4333 * A read failure? Report error and clear slot 4334 * (should be rare). 4335 */ 4336 if (!bh) { 4337 EXT4_ERROR_INODE_BLOCK(inode, nr, 4338 "Read failure"); 4339 continue; 4340 } 4341 4342 /* This zaps the entire block. Bottom up. */ 4343 BUFFER_TRACE(bh, "free child branches"); 4344 ext4_free_branches(handle, inode, bh, 4345 (__le32 *) bh->b_data, 4346 (__le32 *) bh->b_data + addr_per_block, 4347 depth); 4348 brelse(bh); 4349 4350 /* 4351 * Everything below this this pointer has been 4352 * released. Now let this top-of-subtree go. 4353 * 4354 * We want the freeing of this indirect block to be 4355 * atomic in the journal with the updating of the 4356 * bitmap block which owns it. So make some room in 4357 * the journal. 4358 * 4359 * We zero the parent pointer *after* freeing its 4360 * pointee in the bitmaps, so if extend_transaction() 4361 * for some reason fails to put the bitmap changes and 4362 * the release into the same transaction, recovery 4363 * will merely complain about releasing a free block, 4364 * rather than leaking blocks. 4365 */ 4366 if (ext4_handle_is_aborted(handle)) 4367 return; 4368 if (try_to_extend_transaction(handle, inode)) { 4369 ext4_mark_inode_dirty(handle, inode); 4370 ext4_truncate_restart_trans(handle, inode, 4371 blocks_for_truncate(inode)); 4372 } 4373 4374 /* 4375 * The forget flag here is critical because if 4376 * we are journaling (and not doing data 4377 * journaling), we have to make sure a revoke 4378 * record is written to prevent the journal 4379 * replay from overwriting the (former) 4380 * indirect block if it gets reallocated as a 4381 * data block. This must happen in the same 4382 * transaction where the data blocks are 4383 * actually freed. 4384 */ 4385 ext4_free_blocks(handle, inode, NULL, nr, 1, 4386 EXT4_FREE_BLOCKS_METADATA| 4387 EXT4_FREE_BLOCKS_FORGET); 4388 4389 if (parent_bh) { 4390 /* 4391 * The block which we have just freed is 4392 * pointed to by an indirect block: journal it 4393 */ 4394 BUFFER_TRACE(parent_bh, "get_write_access"); 4395 if (!ext4_journal_get_write_access(handle, 4396 parent_bh)){ 4397 *p = 0; 4398 BUFFER_TRACE(parent_bh, 4399 "call ext4_handle_dirty_metadata"); 4400 ext4_handle_dirty_metadata(handle, 4401 inode, 4402 parent_bh); 4403 } 4404 } 4405 } 4406 } else { 4407 /* We have reached the bottom of the tree. */ 4408 BUFFER_TRACE(parent_bh, "free data blocks"); 4409 ext4_free_data(handle, inode, parent_bh, first, last); 4410 } 4411 } 4412 4413 int ext4_can_truncate(struct inode *inode) 4414 { 4415 if (S_ISREG(inode->i_mode)) 4416 return 1; 4417 if (S_ISDIR(inode->i_mode)) 4418 return 1; 4419 if (S_ISLNK(inode->i_mode)) 4420 return !ext4_inode_is_fast_symlink(inode); 4421 return 0; 4422 } 4423 4424 /* 4425 * ext4_punch_hole: punches a hole in a file by releaseing the blocks 4426 * associated with the given offset and length 4427 * 4428 * @inode: File inode 4429 * @offset: The offset where the hole will begin 4430 * @len: The length of the hole 4431 * 4432 * Returns: 0 on sucess or negative on failure 4433 */ 4434 4435 int ext4_punch_hole(struct file *file, loff_t offset, loff_t length) 4436 { 4437 struct inode *inode = file->f_path.dentry->d_inode; 4438 if (!S_ISREG(inode->i_mode)) 4439 return -ENOTSUPP; 4440 4441 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 4442 /* TODO: Add support for non extent hole punching */ 4443 return -ENOTSUPP; 4444 } 4445 4446 return ext4_ext_punch_hole(file, offset, length); 4447 } 4448 4449 /* 4450 * ext4_truncate() 4451 * 4452 * We block out ext4_get_block() block instantiations across the entire 4453 * transaction, and VFS/VM ensures that ext4_truncate() cannot run 4454 * simultaneously on behalf of the same inode. 4455 * 4456 * As we work through the truncate and commmit bits of it to the journal there 4457 * is one core, guiding principle: the file's tree must always be consistent on 4458 * disk. We must be able to restart the truncate after a crash. 4459 * 4460 * The file's tree may be transiently inconsistent in memory (although it 4461 * probably isn't), but whenever we close off and commit a journal transaction, 4462 * the contents of (the filesystem + the journal) must be consistent and 4463 * restartable. It's pretty simple, really: bottom up, right to left (although 4464 * left-to-right works OK too). 4465 * 4466 * Note that at recovery time, journal replay occurs *before* the restart of 4467 * truncate against the orphan inode list. 4468 * 4469 * The committed inode has the new, desired i_size (which is the same as 4470 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see 4471 * that this inode's truncate did not complete and it will again call 4472 * ext4_truncate() to have another go. So there will be instantiated blocks 4473 * to the right of the truncation point in a crashed ext4 filesystem. But 4474 * that's fine - as long as they are linked from the inode, the post-crash 4475 * ext4_truncate() run will find them and release them. 4476 */ 4477 void ext4_truncate(struct inode *inode) 4478 { 4479 handle_t *handle; 4480 struct ext4_inode_info *ei = EXT4_I(inode); 4481 __le32 *i_data = ei->i_data; 4482 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); 4483 struct address_space *mapping = inode->i_mapping; 4484 ext4_lblk_t offsets[4]; 4485 Indirect chain[4]; 4486 Indirect *partial; 4487 __le32 nr = 0; 4488 int n = 0; 4489 ext4_lblk_t last_block, max_block; 4490 unsigned blocksize = inode->i_sb->s_blocksize; 4491 4492 trace_ext4_truncate_enter(inode); 4493 4494 if (!ext4_can_truncate(inode)) 4495 return; 4496 4497 ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 4498 4499 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) 4500 ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); 4501 4502 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 4503 ext4_ext_truncate(inode); 4504 trace_ext4_truncate_exit(inode); 4505 return; 4506 } 4507 4508 handle = start_transaction(inode); 4509 if (IS_ERR(handle)) 4510 return; /* AKPM: return what? */ 4511 4512 last_block = (inode->i_size + blocksize-1) 4513 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); 4514 max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1) 4515 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); 4516 4517 if (inode->i_size & (blocksize - 1)) 4518 if (ext4_block_truncate_page(handle, mapping, inode->i_size)) 4519 goto out_stop; 4520 4521 if (last_block != max_block) { 4522 n = ext4_block_to_path(inode, last_block, offsets, NULL); 4523 if (n == 0) 4524 goto out_stop; /* error */ 4525 } 4526 4527 /* 4528 * OK. This truncate is going to happen. We add the inode to the 4529 * orphan list, so that if this truncate spans multiple transactions, 4530 * and we crash, we will resume the truncate when the filesystem 4531 * recovers. It also marks the inode dirty, to catch the new size. 4532 * 4533 * Implication: the file must always be in a sane, consistent 4534 * truncatable state while each transaction commits. 4535 */ 4536 if (ext4_orphan_add(handle, inode)) 4537 goto out_stop; 4538 4539 /* 4540 * From here we block out all ext4_get_block() callers who want to 4541 * modify the block allocation tree. 4542 */ 4543 down_write(&ei->i_data_sem); 4544 4545 ext4_discard_preallocations(inode); 4546 4547 /* 4548 * The orphan list entry will now protect us from any crash which 4549 * occurs before the truncate completes, so it is now safe to propagate 4550 * the new, shorter inode size (held for now in i_size) into the 4551 * on-disk inode. We do this via i_disksize, which is the value which 4552 * ext4 *really* writes onto the disk inode. 4553 */ 4554 ei->i_disksize = inode->i_size; 4555 4556 if (last_block == max_block) { 4557 /* 4558 * It is unnecessary to free any data blocks if last_block is 4559 * equal to the indirect block limit. 4560 */ 4561 goto out_unlock; 4562 } else if (n == 1) { /* direct blocks */ 4563 ext4_free_data(handle, inode, NULL, i_data+offsets[0], 4564 i_data + EXT4_NDIR_BLOCKS); 4565 goto do_indirects; 4566 } 4567 4568 partial = ext4_find_shared(inode, n, offsets, chain, &nr); 4569 /* Kill the top of shared branch (not detached) */ 4570 if (nr) { 4571 if (partial == chain) { 4572 /* Shared branch grows from the inode */ 4573 ext4_free_branches(handle, inode, NULL, 4574 &nr, &nr+1, (chain+n-1) - partial); 4575 *partial->p = 0; 4576 /* 4577 * We mark the inode dirty prior to restart, 4578 * and prior to stop. No need for it here. 4579 */ 4580 } else { 4581 /* Shared branch grows from an indirect block */ 4582 BUFFER_TRACE(partial->bh, "get_write_access"); 4583 ext4_free_branches(handle, inode, partial->bh, 4584 partial->p, 4585 partial->p+1, (chain+n-1) - partial); 4586 } 4587 } 4588 /* Clear the ends of indirect blocks on the shared branch */ 4589 while (partial > chain) { 4590 ext4_free_branches(handle, inode, partial->bh, partial->p + 1, 4591 (__le32*)partial->bh->b_data+addr_per_block, 4592 (chain+n-1) - partial); 4593 BUFFER_TRACE(partial->bh, "call brelse"); 4594 brelse(partial->bh); 4595 partial--; 4596 } 4597 do_indirects: 4598 /* Kill the remaining (whole) subtrees */ 4599 switch (offsets[0]) { 4600 default: 4601 nr = i_data[EXT4_IND_BLOCK]; 4602 if (nr) { 4603 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1); 4604 i_data[EXT4_IND_BLOCK] = 0; 4605 } 4606 case EXT4_IND_BLOCK: 4607 nr = i_data[EXT4_DIND_BLOCK]; 4608 if (nr) { 4609 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2); 4610 i_data[EXT4_DIND_BLOCK] = 0; 4611 } 4612 case EXT4_DIND_BLOCK: 4613 nr = i_data[EXT4_TIND_BLOCK]; 4614 if (nr) { 4615 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3); 4616 i_data[EXT4_TIND_BLOCK] = 0; 4617 } 4618 case EXT4_TIND_BLOCK: 4619 ; 4620 } 4621 4622 out_unlock: 4623 up_write(&ei->i_data_sem); 4624 inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 4625 ext4_mark_inode_dirty(handle, inode); 4626 4627 /* 4628 * In a multi-transaction truncate, we only make the final transaction 4629 * synchronous 4630 */ 4631 if (IS_SYNC(inode)) 4632 ext4_handle_sync(handle); 4633 out_stop: 4634 /* 4635 * If this was a simple ftruncate(), and the file will remain alive 4636 * then we need to clear up the orphan record which we created above. 4637 * However, if this was a real unlink then we were called by 4638 * ext4_delete_inode(), and we allow that function to clean up the 4639 * orphan info for us. 4640 */ 4641 if (inode->i_nlink) 4642 ext4_orphan_del(handle, inode); 4643 4644 ext4_journal_stop(handle); 4645 trace_ext4_truncate_exit(inode); 4646 } 4647 4648 /* 4649 * ext4_get_inode_loc returns with an extra refcount against the inode's 4650 * underlying buffer_head on success. If 'in_mem' is true, we have all 4651 * data in memory that is needed to recreate the on-disk version of this 4652 * inode. 4653 */ 4654 static int __ext4_get_inode_loc(struct inode *inode, 4655 struct ext4_iloc *iloc, int in_mem) 4656 { 4657 struct ext4_group_desc *gdp; 4658 struct buffer_head *bh; 4659 struct super_block *sb = inode->i_sb; 4660 ext4_fsblk_t block; 4661 int inodes_per_block, inode_offset; 4662 4663 iloc->bh = NULL; 4664 if (!ext4_valid_inum(sb, inode->i_ino)) 4665 return -EIO; 4666 4667 iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); 4668 gdp = ext4_get_group_desc(sb, iloc->block_group, NULL); 4669 if (!gdp) 4670 return -EIO; 4671 4672 /* 4673 * Figure out the offset within the block group inode table 4674 */ 4675 inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; 4676 inode_offset = ((inode->i_ino - 1) % 4677 EXT4_INODES_PER_GROUP(sb)); 4678 block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); 4679 iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); 4680 4681 bh = sb_getblk(sb, block); 4682 if (!bh) { 4683 EXT4_ERROR_INODE_BLOCK(inode, block, 4684 "unable to read itable block"); 4685 return -EIO; 4686 } 4687 if (!buffer_uptodate(bh)) { 4688 lock_buffer(bh); 4689 4690 /* 4691 * If the buffer has the write error flag, we have failed 4692 * to write out another inode in the same block. In this 4693 * case, we don't have to read the block because we may 4694 * read the old inode data successfully. 4695 */ 4696 if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) 4697 set_buffer_uptodate(bh); 4698 4699 if (buffer_uptodate(bh)) { 4700 /* someone brought it uptodate while we waited */ 4701 unlock_buffer(bh); 4702 goto has_buffer; 4703 } 4704 4705 /* 4706 * If we have all information of the inode in memory and this 4707 * is the only valid inode in the block, we need not read the 4708 * block. 4709 */ 4710 if (in_mem) { 4711 struct buffer_head *bitmap_bh; 4712 int i, start; 4713 4714 start = inode_offset & ~(inodes_per_block - 1); 4715 4716 /* Is the inode bitmap in cache? */ 4717 bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); 4718 if (!bitmap_bh) 4719 goto make_io; 4720 4721 /* 4722 * If the inode bitmap isn't in cache then the 4723 * optimisation may end up performing two reads instead 4724 * of one, so skip it. 4725 */ 4726 if (!buffer_uptodate(bitmap_bh)) { 4727 brelse(bitmap_bh); 4728 goto make_io; 4729 } 4730 for (i = start; i < start + inodes_per_block; i++) { 4731 if (i == inode_offset) 4732 continue; 4733 if (ext4_test_bit(i, bitmap_bh->b_data)) 4734 break; 4735 } 4736 brelse(bitmap_bh); 4737 if (i == start + inodes_per_block) { 4738 /* all other inodes are free, so skip I/O */ 4739 memset(bh->b_data, 0, bh->b_size); 4740 set_buffer_uptodate(bh); 4741 unlock_buffer(bh); 4742 goto has_buffer; 4743 } 4744 } 4745 4746 make_io: 4747 /* 4748 * If we need to do any I/O, try to pre-readahead extra 4749 * blocks from the inode table. 4750 */ 4751 if (EXT4_SB(sb)->s_inode_readahead_blks) { 4752 ext4_fsblk_t b, end, table; 4753 unsigned num; 4754 4755 table = ext4_inode_table(sb, gdp); 4756 /* s_inode_readahead_blks is always a power of 2 */ 4757 b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1); 4758 if (table > b) 4759 b = table; 4760 end = b + EXT4_SB(sb)->s_inode_readahead_blks; 4761 num = EXT4_INODES_PER_GROUP(sb); 4762 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 4763 EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) 4764 num -= ext4_itable_unused_count(sb, gdp); 4765 table += num / inodes_per_block; 4766 if (end > table) 4767 end = table; 4768 while (b <= end) 4769 sb_breadahead(sb, b++); 4770 } 4771 4772 /* 4773 * There are other valid inodes in the buffer, this inode 4774 * has in-inode xattrs, or we don't have this inode in memory. 4775 * Read the block from disk. 4776 */ 4777 trace_ext4_load_inode(inode); 4778 get_bh(bh); 4779 bh->b_end_io = end_buffer_read_sync; 4780 submit_bh(READ_META, bh); 4781 wait_on_buffer(bh); 4782 if (!buffer_uptodate(bh)) { 4783 EXT4_ERROR_INODE_BLOCK(inode, block, 4784 "unable to read itable block"); 4785 brelse(bh); 4786 return -EIO; 4787 } 4788 } 4789 has_buffer: 4790 iloc->bh = bh; 4791 return 0; 4792 } 4793 4794 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) 4795 { 4796 /* We have all inode data except xattrs in memory here. */ 4797 return __ext4_get_inode_loc(inode, iloc, 4798 !ext4_test_inode_state(inode, EXT4_STATE_XATTR)); 4799 } 4800 4801 void ext4_set_inode_flags(struct inode *inode) 4802 { 4803 unsigned int flags = EXT4_I(inode)->i_flags; 4804 4805 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); 4806 if (flags & EXT4_SYNC_FL) 4807 inode->i_flags |= S_SYNC; 4808 if (flags & EXT4_APPEND_FL) 4809 inode->i_flags |= S_APPEND; 4810 if (flags & EXT4_IMMUTABLE_FL) 4811 inode->i_flags |= S_IMMUTABLE; 4812 if (flags & EXT4_NOATIME_FL) 4813 inode->i_flags |= S_NOATIME; 4814 if (flags & EXT4_DIRSYNC_FL) 4815 inode->i_flags |= S_DIRSYNC; 4816 } 4817 4818 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ 4819 void ext4_get_inode_flags(struct ext4_inode_info *ei) 4820 { 4821 unsigned int vfs_fl; 4822 unsigned long old_fl, new_fl; 4823 4824 do { 4825 vfs_fl = ei->vfs_inode.i_flags; 4826 old_fl = ei->i_flags; 4827 new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL| 4828 EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL| 4829 EXT4_DIRSYNC_FL); 4830 if (vfs_fl & S_SYNC) 4831 new_fl |= EXT4_SYNC_FL; 4832 if (vfs_fl & S_APPEND) 4833 new_fl |= EXT4_APPEND_FL; 4834 if (vfs_fl & S_IMMUTABLE) 4835 new_fl |= EXT4_IMMUTABLE_FL; 4836 if (vfs_fl & S_NOATIME) 4837 new_fl |= EXT4_NOATIME_FL; 4838 if (vfs_fl & S_DIRSYNC) 4839 new_fl |= EXT4_DIRSYNC_FL; 4840 } while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl); 4841 } 4842 4843 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, 4844 struct ext4_inode_info *ei) 4845 { 4846 blkcnt_t i_blocks ; 4847 struct inode *inode = &(ei->vfs_inode); 4848 struct super_block *sb = inode->i_sb; 4849 4850 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 4851 EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) { 4852 /* we are using combined 48 bit field */ 4853 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | 4854 le32_to_cpu(raw_inode->i_blocks_lo); 4855 if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) { 4856 /* i_blocks represent file system block size */ 4857 return i_blocks << (inode->i_blkbits - 9); 4858 } else { 4859 return i_blocks; 4860 } 4861 } else { 4862 return le32_to_cpu(raw_inode->i_blocks_lo); 4863 } 4864 } 4865 4866 struct inode *ext4_iget(struct super_block *sb, unsigned long ino) 4867 { 4868 struct ext4_iloc iloc; 4869 struct ext4_inode *raw_inode; 4870 struct ext4_inode_info *ei; 4871 struct inode *inode; 4872 journal_t *journal = EXT4_SB(sb)->s_journal; 4873 long ret; 4874 int block; 4875 4876 inode = iget_locked(sb, ino); 4877 if (!inode) 4878 return ERR_PTR(-ENOMEM); 4879 if (!(inode->i_state & I_NEW)) 4880 return inode; 4881 4882 ei = EXT4_I(inode); 4883 iloc.bh = NULL; 4884 4885 ret = __ext4_get_inode_loc(inode, &iloc, 0); 4886 if (ret < 0) 4887 goto bad_inode; 4888 raw_inode = ext4_raw_inode(&iloc); 4889 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 4890 inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 4891 inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 4892 if (!(test_opt(inode->i_sb, NO_UID32))) { 4893 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; 4894 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; 4895 } 4896 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); 4897 4898 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ 4899 ei->i_dir_start_lookup = 0; 4900 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); 4901 /* We now have enough fields to check if the inode was active or not. 4902 * This is needed because nfsd might try to access dead inodes 4903 * the test is that same one that e2fsck uses 4904 * NeilBrown 1999oct15 4905 */ 4906 if (inode->i_nlink == 0) { 4907 if (inode->i_mode == 0 || 4908 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) { 4909 /* this inode is deleted */ 4910 ret = -ESTALE; 4911 goto bad_inode; 4912 } 4913 /* The only unlinked inodes we let through here have 4914 * valid i_mode and are being read by the orphan 4915 * recovery code: that's fine, we're about to complete 4916 * the process of deleting those. */ 4917 } 4918 ei->i_flags = le32_to_cpu(raw_inode->i_flags); 4919 inode->i_blocks = ext4_inode_blocks(raw_inode, ei); 4920 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); 4921 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT)) 4922 ei->i_file_acl |= 4923 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; 4924 inode->i_size = ext4_isize(raw_inode); 4925 ei->i_disksize = inode->i_size; 4926 #ifdef CONFIG_QUOTA 4927 ei->i_reserved_quota = 0; 4928 #endif 4929 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 4930 ei->i_block_group = iloc.block_group; 4931 ei->i_last_alloc_group = ~0; 4932 /* 4933 * NOTE! The in-memory inode i_data array is in little-endian order 4934 * even on big-endian machines: we do NOT byteswap the block numbers! 4935 */ 4936 for (block = 0; block < EXT4_N_BLOCKS; block++) 4937 ei->i_data[block] = raw_inode->i_block[block]; 4938 INIT_LIST_HEAD(&ei->i_orphan); 4939 4940 /* 4941 * Set transaction id's of transactions that have to be committed 4942 * to finish f[data]sync. We set them to currently running transaction 4943 * as we cannot be sure that the inode or some of its metadata isn't 4944 * part of the transaction - the inode could have been reclaimed and 4945 * now it is reread from disk. 4946 */ 4947 if (journal) { 4948 transaction_t *transaction; 4949 tid_t tid; 4950 4951 read_lock(&journal->j_state_lock); 4952 if (journal->j_running_transaction) 4953 transaction = journal->j_running_transaction; 4954 else 4955 transaction = journal->j_committing_transaction; 4956 if (transaction) 4957 tid = transaction->t_tid; 4958 else 4959 tid = journal->j_commit_sequence; 4960 read_unlock(&journal->j_state_lock); 4961 ei->i_sync_tid = tid; 4962 ei->i_datasync_tid = tid; 4963 } 4964 4965 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4966 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); 4967 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > 4968 EXT4_INODE_SIZE(inode->i_sb)) { 4969 ret = -EIO; 4970 goto bad_inode; 4971 } 4972 if (ei->i_extra_isize == 0) { 4973 /* The extra space is currently unused. Use it. */ 4974 ei->i_extra_isize = sizeof(struct ext4_inode) - 4975 EXT4_GOOD_OLD_INODE_SIZE; 4976 } else { 4977 __le32 *magic = (void *)raw_inode + 4978 EXT4_GOOD_OLD_INODE_SIZE + 4979 ei->i_extra_isize; 4980 if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) 4981 ext4_set_inode_state(inode, EXT4_STATE_XATTR); 4982 } 4983 } else 4984 ei->i_extra_isize = 0; 4985 4986 EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); 4987 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); 4988 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); 4989 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); 4990 4991 inode->i_version = le32_to_cpu(raw_inode->i_disk_version); 4992 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4993 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 4994 inode->i_version |= 4995 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; 4996 } 4997 4998 ret = 0; 4999 if (ei->i_file_acl && 5000 !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { 5001 EXT4_ERROR_INODE(inode, "bad extended attribute block %llu", 5002 ei->i_file_acl); 5003 ret = -EIO; 5004 goto bad_inode; 5005 } else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 5006 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 5007 (S_ISLNK(inode->i_mode) && 5008 !ext4_inode_is_fast_symlink(inode))) 5009 /* Validate extent which is part of inode */ 5010 ret = ext4_ext_check_inode(inode); 5011 } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 5012 (S_ISLNK(inode->i_mode) && 5013 !ext4_inode_is_fast_symlink(inode))) { 5014 /* Validate block references which are part of inode */ 5015 ret = ext4_check_inode_blockref(inode); 5016 } 5017 if (ret) 5018 goto bad_inode; 5019 5020 if (S_ISREG(inode->i_mode)) { 5021 inode->i_op = &ext4_file_inode_operations; 5022 inode->i_fop = &ext4_file_operations; 5023 ext4_set_aops(inode); 5024 } else if (S_ISDIR(inode->i_mode)) { 5025 inode->i_op = &ext4_dir_inode_operations; 5026 inode->i_fop = &ext4_dir_operations; 5027 } else if (S_ISLNK(inode->i_mode)) { 5028 if (ext4_inode_is_fast_symlink(inode)) { 5029 inode->i_op = &ext4_fast_symlink_inode_operations; 5030 nd_terminate_link(ei->i_data, inode->i_size, 5031 sizeof(ei->i_data) - 1); 5032 } else { 5033 inode->i_op = &ext4_symlink_inode_operations; 5034 ext4_set_aops(inode); 5035 } 5036 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 5037 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 5038 inode->i_op = &ext4_special_inode_operations; 5039 if (raw_inode->i_block[0]) 5040 init_special_inode(inode, inode->i_mode, 5041 old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); 5042 else 5043 init_special_inode(inode, inode->i_mode, 5044 new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 5045 } else { 5046 ret = -EIO; 5047 EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode); 5048 goto bad_inode; 5049 } 5050 brelse(iloc.bh); 5051 ext4_set_inode_flags(inode); 5052 unlock_new_inode(inode); 5053 return inode; 5054 5055 bad_inode: 5056 brelse(iloc.bh); 5057 iget_failed(inode); 5058 return ERR_PTR(ret); 5059 } 5060 5061 static int ext4_inode_blocks_set(handle_t *handle, 5062 struct ext4_inode *raw_inode, 5063 struct ext4_inode_info *ei) 5064 { 5065 struct inode *inode = &(ei->vfs_inode); 5066 u64 i_blocks = inode->i_blocks; 5067 struct super_block *sb = inode->i_sb; 5068 5069 if (i_blocks <= ~0U) { 5070 /* 5071 * i_blocks can be represnted in a 32 bit variable 5072 * as multiple of 512 bytes 5073 */ 5074 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 5075 raw_inode->i_blocks_high = 0; 5076 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 5077 return 0; 5078 } 5079 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) 5080 return -EFBIG; 5081 5082 if (i_blocks <= 0xffffffffffffULL) { 5083 /* 5084 * i_blocks can be represented in a 48 bit variable 5085 * as multiple of 512 bytes 5086 */ 5087 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 5088 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 5089 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 5090 } else { 5091 ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE); 5092 /* i_block is stored in file system block size */ 5093 i_blocks = i_blocks >> (inode->i_blkbits - 9); 5094 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 5095 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 5096 } 5097 return 0; 5098 } 5099 5100 /* 5101 * Post the struct inode info into an on-disk inode location in the 5102 * buffer-cache. This gobbles the caller's reference to the 5103 * buffer_head in the inode location struct. 5104 * 5105 * The caller must have write access to iloc->bh. 5106 */ 5107 static int ext4_do_update_inode(handle_t *handle, 5108 struct inode *inode, 5109 struct ext4_iloc *iloc) 5110 { 5111 struct ext4_inode *raw_inode = ext4_raw_inode(iloc); 5112 struct ext4_inode_info *ei = EXT4_I(inode); 5113 struct buffer_head *bh = iloc->bh; 5114 int err = 0, rc, block; 5115 5116 /* For fields not not tracking in the in-memory inode, 5117 * initialise them to zero for new inodes. */ 5118 if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) 5119 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); 5120 5121 ext4_get_inode_flags(ei); 5122 raw_inode->i_mode = cpu_to_le16(inode->i_mode); 5123 if (!(test_opt(inode->i_sb, NO_UID32))) { 5124 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid)); 5125 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid)); 5126 /* 5127 * Fix up interoperability with old kernels. Otherwise, old inodes get 5128 * re-used with the upper 16 bits of the uid/gid intact 5129 */ 5130 if (!ei->i_dtime) { 5131 raw_inode->i_uid_high = 5132 cpu_to_le16(high_16_bits(inode->i_uid)); 5133 raw_inode->i_gid_high = 5134 cpu_to_le16(high_16_bits(inode->i_gid)); 5135 } else { 5136 raw_inode->i_uid_high = 0; 5137 raw_inode->i_gid_high = 0; 5138 } 5139 } else { 5140 raw_inode->i_uid_low = 5141 cpu_to_le16(fs_high2lowuid(inode->i_uid)); 5142 raw_inode->i_gid_low = 5143 cpu_to_le16(fs_high2lowgid(inode->i_gid)); 5144 raw_inode->i_uid_high = 0; 5145 raw_inode->i_gid_high = 0; 5146 } 5147 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 5148 5149 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); 5150 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); 5151 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); 5152 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); 5153 5154 if (ext4_inode_blocks_set(handle, raw_inode, ei)) 5155 goto out_brelse; 5156 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); 5157 raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF); 5158 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 5159 cpu_to_le32(EXT4_OS_HURD)) 5160 raw_inode->i_file_acl_high = 5161 cpu_to_le16(ei->i_file_acl >> 32); 5162 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); 5163 ext4_isize_set(raw_inode, ei->i_disksize); 5164 if (ei->i_disksize > 0x7fffffffULL) { 5165 struct super_block *sb = inode->i_sb; 5166 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 5167 EXT4_FEATURE_RO_COMPAT_LARGE_FILE) || 5168 EXT4_SB(sb)->s_es->s_rev_level == 5169 cpu_to_le32(EXT4_GOOD_OLD_REV)) { 5170 /* If this is the first large file 5171 * created, add a flag to the superblock. 5172 */ 5173 err = ext4_journal_get_write_access(handle, 5174 EXT4_SB(sb)->s_sbh); 5175 if (err) 5176 goto out_brelse; 5177 ext4_update_dynamic_rev(sb); 5178 EXT4_SET_RO_COMPAT_FEATURE(sb, 5179 EXT4_FEATURE_RO_COMPAT_LARGE_FILE); 5180 sb->s_dirt = 1; 5181 ext4_handle_sync(handle); 5182 err = ext4_handle_dirty_metadata(handle, NULL, 5183 EXT4_SB(sb)->s_sbh); 5184 } 5185 } 5186 raw_inode->i_generation = cpu_to_le32(inode->i_generation); 5187 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 5188 if (old_valid_dev(inode->i_rdev)) { 5189 raw_inode->i_block[0] = 5190 cpu_to_le32(old_encode_dev(inode->i_rdev)); 5191 raw_inode->i_block[1] = 0; 5192 } else { 5193 raw_inode->i_block[0] = 0; 5194 raw_inode->i_block[1] = 5195 cpu_to_le32(new_encode_dev(inode->i_rdev)); 5196 raw_inode->i_block[2] = 0; 5197 } 5198 } else 5199 for (block = 0; block < EXT4_N_BLOCKS; block++) 5200 raw_inode->i_block[block] = ei->i_data[block]; 5201 5202 raw_inode->i_disk_version = cpu_to_le32(inode->i_version); 5203 if (ei->i_extra_isize) { 5204 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 5205 raw_inode->i_version_hi = 5206 cpu_to_le32(inode->i_version >> 32); 5207 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); 5208 } 5209 5210 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 5211 rc = ext4_handle_dirty_metadata(handle, NULL, bh); 5212 if (!err) 5213 err = rc; 5214 ext4_clear_inode_state(inode, EXT4_STATE_NEW); 5215 5216 ext4_update_inode_fsync_trans(handle, inode, 0); 5217 out_brelse: 5218 brelse(bh); 5219 ext4_std_error(inode->i_sb, err); 5220 return err; 5221 } 5222 5223 /* 5224 * ext4_write_inode() 5225 * 5226 * We are called from a few places: 5227 * 5228 * - Within generic_file_write() for O_SYNC files. 5229 * Here, there will be no transaction running. We wait for any running 5230 * trasnaction to commit. 5231 * 5232 * - Within sys_sync(), kupdate and such. 5233 * We wait on commit, if tol to. 5234 * 5235 * - Within prune_icache() (PF_MEMALLOC == true) 5236 * Here we simply return. We can't afford to block kswapd on the 5237 * journal commit. 5238 * 5239 * In all cases it is actually safe for us to return without doing anything, 5240 * because the inode has been copied into a raw inode buffer in 5241 * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for 5242 * knfsd. 5243 * 5244 * Note that we are absolutely dependent upon all inode dirtiers doing the 5245 * right thing: they *must* call mark_inode_dirty() after dirtying info in 5246 * which we are interested. 5247 * 5248 * It would be a bug for them to not do this. The code: 5249 * 5250 * mark_inode_dirty(inode) 5251 * stuff(); 5252 * inode->i_size = expr; 5253 * 5254 * is in error because a kswapd-driven write_inode() could occur while 5255 * `stuff()' is running, and the new i_size will be lost. Plus the inode 5256 * will no longer be on the superblock's dirty inode list. 5257 */ 5258 int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) 5259 { 5260 int err; 5261 5262 if (current->flags & PF_MEMALLOC) 5263 return 0; 5264 5265 if (EXT4_SB(inode->i_sb)->s_journal) { 5266 if (ext4_journal_current_handle()) { 5267 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); 5268 dump_stack(); 5269 return -EIO; 5270 } 5271 5272 if (wbc->sync_mode != WB_SYNC_ALL) 5273 return 0; 5274 5275 err = ext4_force_commit(inode->i_sb); 5276 } else { 5277 struct ext4_iloc iloc; 5278 5279 err = __ext4_get_inode_loc(inode, &iloc, 0); 5280 if (err) 5281 return err; 5282 if (wbc->sync_mode == WB_SYNC_ALL) 5283 sync_dirty_buffer(iloc.bh); 5284 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { 5285 EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr, 5286 "IO error syncing inode"); 5287 err = -EIO; 5288 } 5289 brelse(iloc.bh); 5290 } 5291 return err; 5292 } 5293 5294 /* 5295 * ext4_setattr() 5296 * 5297 * Called from notify_change. 5298 * 5299 * We want to trap VFS attempts to truncate the file as soon as 5300 * possible. In particular, we want to make sure that when the VFS 5301 * shrinks i_size, we put the inode on the orphan list and modify 5302 * i_disksize immediately, so that during the subsequent flushing of 5303 * dirty pages and freeing of disk blocks, we can guarantee that any 5304 * commit will leave the blocks being flushed in an unused state on 5305 * disk. (On recovery, the inode will get truncated and the blocks will 5306 * be freed, so we have a strong guarantee that no future commit will 5307 * leave these blocks visible to the user.) 5308 * 5309 * Another thing we have to assure is that if we are in ordered mode 5310 * and inode is still attached to the committing transaction, we must 5311 * we start writeout of all the dirty pages which are being truncated. 5312 * This way we are sure that all the data written in the previous 5313 * transaction are already on disk (truncate waits for pages under 5314 * writeback). 5315 * 5316 * Called with inode->i_mutex down. 5317 */ 5318 int ext4_setattr(struct dentry *dentry, struct iattr *attr) 5319 { 5320 struct inode *inode = dentry->d_inode; 5321 int error, rc = 0; 5322 int orphan = 0; 5323 const unsigned int ia_valid = attr->ia_valid; 5324 5325 error = inode_change_ok(inode, attr); 5326 if (error) 5327 return error; 5328 5329 if (is_quota_modification(inode, attr)) 5330 dquot_initialize(inode); 5331 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || 5332 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { 5333 handle_t *handle; 5334 5335 /* (user+group)*(old+new) structure, inode write (sb, 5336 * inode block, ? - but truncate inode update has it) */ 5337 handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+ 5338 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3); 5339 if (IS_ERR(handle)) { 5340 error = PTR_ERR(handle); 5341 goto err_out; 5342 } 5343 error = dquot_transfer(inode, attr); 5344 if (error) { 5345 ext4_journal_stop(handle); 5346 return error; 5347 } 5348 /* Update corresponding info in inode so that everything is in 5349 * one transaction */ 5350 if (attr->ia_valid & ATTR_UID) 5351 inode->i_uid = attr->ia_uid; 5352 if (attr->ia_valid & ATTR_GID) 5353 inode->i_gid = attr->ia_gid; 5354 error = ext4_mark_inode_dirty(handle, inode); 5355 ext4_journal_stop(handle); 5356 } 5357 5358 if (attr->ia_valid & ATTR_SIZE) { 5359 inode_dio_wait(inode); 5360 5361 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 5362 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5363 5364 if (attr->ia_size > sbi->s_bitmap_maxbytes) 5365 return -EFBIG; 5366 } 5367 } 5368 5369 if (S_ISREG(inode->i_mode) && 5370 attr->ia_valid & ATTR_SIZE && 5371 (attr->ia_size < inode->i_size)) { 5372 handle_t *handle; 5373 5374 handle = ext4_journal_start(inode, 3); 5375 if (IS_ERR(handle)) { 5376 error = PTR_ERR(handle); 5377 goto err_out; 5378 } 5379 if (ext4_handle_valid(handle)) { 5380 error = ext4_orphan_add(handle, inode); 5381 orphan = 1; 5382 } 5383 EXT4_I(inode)->i_disksize = attr->ia_size; 5384 rc = ext4_mark_inode_dirty(handle, inode); 5385 if (!error) 5386 error = rc; 5387 ext4_journal_stop(handle); 5388 5389 if (ext4_should_order_data(inode)) { 5390 error = ext4_begin_ordered_truncate(inode, 5391 attr->ia_size); 5392 if (error) { 5393 /* Do as much error cleanup as possible */ 5394 handle = ext4_journal_start(inode, 3); 5395 if (IS_ERR(handle)) { 5396 ext4_orphan_del(NULL, inode); 5397 goto err_out; 5398 } 5399 ext4_orphan_del(handle, inode); 5400 orphan = 0; 5401 ext4_journal_stop(handle); 5402 goto err_out; 5403 } 5404 } 5405 } 5406 5407 if (attr->ia_valid & ATTR_SIZE) { 5408 if (attr->ia_size != i_size_read(inode)) { 5409 truncate_setsize(inode, attr->ia_size); 5410 ext4_truncate(inode); 5411 } else if (ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)) 5412 ext4_truncate(inode); 5413 } 5414 5415 if (!rc) { 5416 setattr_copy(inode, attr); 5417 mark_inode_dirty(inode); 5418 } 5419 5420 /* 5421 * If the call to ext4_truncate failed to get a transaction handle at 5422 * all, we need to clean up the in-core orphan list manually. 5423 */ 5424 if (orphan && inode->i_nlink) 5425 ext4_orphan_del(NULL, inode); 5426 5427 if (!rc && (ia_valid & ATTR_MODE)) 5428 rc = ext4_acl_chmod(inode); 5429 5430 err_out: 5431 ext4_std_error(inode->i_sb, error); 5432 if (!error) 5433 error = rc; 5434 return error; 5435 } 5436 5437 int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, 5438 struct kstat *stat) 5439 { 5440 struct inode *inode; 5441 unsigned long delalloc_blocks; 5442 5443 inode = dentry->d_inode; 5444 generic_fillattr(inode, stat); 5445 5446 /* 5447 * We can't update i_blocks if the block allocation is delayed 5448 * otherwise in the case of system crash before the real block 5449 * allocation is done, we will have i_blocks inconsistent with 5450 * on-disk file blocks. 5451 * We always keep i_blocks updated together with real 5452 * allocation. But to not confuse with user, stat 5453 * will return the blocks that include the delayed allocation 5454 * blocks for this file. 5455 */ 5456 delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks; 5457 5458 stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9; 5459 return 0; 5460 } 5461 5462 static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks, 5463 int chunk) 5464 { 5465 int indirects; 5466 5467 /* if nrblocks are contiguous */ 5468 if (chunk) { 5469 /* 5470 * With N contiguous data blocks, we need at most 5471 * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks, 5472 * 2 dindirect blocks, and 1 tindirect block 5473 */ 5474 return DIV_ROUND_UP(nrblocks, 5475 EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4; 5476 } 5477 /* 5478 * if nrblocks are not contiguous, worse case, each block touch 5479 * a indirect block, and each indirect block touch a double indirect 5480 * block, plus a triple indirect block 5481 */ 5482 indirects = nrblocks * 2 + 1; 5483 return indirects; 5484 } 5485 5486 static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) 5487 { 5488 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 5489 return ext4_indirect_trans_blocks(inode, nrblocks, chunk); 5490 return ext4_ext_index_trans_blocks(inode, nrblocks, chunk); 5491 } 5492 5493 /* 5494 * Account for index blocks, block groups bitmaps and block group 5495 * descriptor blocks if modify datablocks and index blocks 5496 * worse case, the indexs blocks spread over different block groups 5497 * 5498 * If datablocks are discontiguous, they are possible to spread over 5499 * different block groups too. If they are contiuguous, with flexbg, 5500 * they could still across block group boundary. 5501 * 5502 * Also account for superblock, inode, quota and xattr blocks 5503 */ 5504 static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk) 5505 { 5506 ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb); 5507 int gdpblocks; 5508 int idxblocks; 5509 int ret = 0; 5510 5511 /* 5512 * How many index blocks need to touch to modify nrblocks? 5513 * The "Chunk" flag indicating whether the nrblocks is 5514 * physically contiguous on disk 5515 * 5516 * For Direct IO and fallocate, they calls get_block to allocate 5517 * one single extent at a time, so they could set the "Chunk" flag 5518 */ 5519 idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk); 5520 5521 ret = idxblocks; 5522 5523 /* 5524 * Now let's see how many group bitmaps and group descriptors need 5525 * to account 5526 */ 5527 groups = idxblocks; 5528 if (chunk) 5529 groups += 1; 5530 else 5531 groups += nrblocks; 5532 5533 gdpblocks = groups; 5534 if (groups > ngroups) 5535 groups = ngroups; 5536 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count) 5537 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count; 5538 5539 /* bitmaps and block group descriptor blocks */ 5540 ret += groups + gdpblocks; 5541 5542 /* Blocks for super block, inode, quota and xattr blocks */ 5543 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); 5544 5545 return ret; 5546 } 5547 5548 /* 5549 * Calculate the total number of credits to reserve to fit 5550 * the modification of a single pages into a single transaction, 5551 * which may include multiple chunks of block allocations. 5552 * 5553 * This could be called via ext4_write_begin() 5554 * 5555 * We need to consider the worse case, when 5556 * one new block per extent. 5557 */ 5558 int ext4_writepage_trans_blocks(struct inode *inode) 5559 { 5560 int bpp = ext4_journal_blocks_per_page(inode); 5561 int ret; 5562 5563 ret = ext4_meta_trans_blocks(inode, bpp, 0); 5564 5565 /* Account for data blocks for journalled mode */ 5566 if (ext4_should_journal_data(inode)) 5567 ret += bpp; 5568 return ret; 5569 } 5570 5571 /* 5572 * Calculate the journal credits for a chunk of data modification. 5573 * 5574 * This is called from DIO, fallocate or whoever calling 5575 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks. 5576 * 5577 * journal buffers for data blocks are not included here, as DIO 5578 * and fallocate do no need to journal data buffers. 5579 */ 5580 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) 5581 { 5582 return ext4_meta_trans_blocks(inode, nrblocks, 1); 5583 } 5584 5585 /* 5586 * The caller must have previously called ext4_reserve_inode_write(). 5587 * Give this, we know that the caller already has write access to iloc->bh. 5588 */ 5589 int ext4_mark_iloc_dirty(handle_t *handle, 5590 struct inode *inode, struct ext4_iloc *iloc) 5591 { 5592 int err = 0; 5593 5594 if (test_opt(inode->i_sb, I_VERSION)) 5595 inode_inc_iversion(inode); 5596 5597 /* the do_update_inode consumes one bh->b_count */ 5598 get_bh(iloc->bh); 5599 5600 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ 5601 err = ext4_do_update_inode(handle, inode, iloc); 5602 put_bh(iloc->bh); 5603 return err; 5604 } 5605 5606 /* 5607 * On success, We end up with an outstanding reference count against 5608 * iloc->bh. This _must_ be cleaned up later. 5609 */ 5610 5611 int 5612 ext4_reserve_inode_write(handle_t *handle, struct inode *inode, 5613 struct ext4_iloc *iloc) 5614 { 5615 int err; 5616 5617 err = ext4_get_inode_loc(inode, iloc); 5618 if (!err) { 5619 BUFFER_TRACE(iloc->bh, "get_write_access"); 5620 err = ext4_journal_get_write_access(handle, iloc->bh); 5621 if (err) { 5622 brelse(iloc->bh); 5623 iloc->bh = NULL; 5624 } 5625 } 5626 ext4_std_error(inode->i_sb, err); 5627 return err; 5628 } 5629 5630 /* 5631 * Expand an inode by new_extra_isize bytes. 5632 * Returns 0 on success or negative error number on failure. 5633 */ 5634 static int ext4_expand_extra_isize(struct inode *inode, 5635 unsigned int new_extra_isize, 5636 struct ext4_iloc iloc, 5637 handle_t *handle) 5638 { 5639 struct ext4_inode *raw_inode; 5640 struct ext4_xattr_ibody_header *header; 5641 5642 if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) 5643 return 0; 5644 5645 raw_inode = ext4_raw_inode(&iloc); 5646 5647 header = IHDR(inode, raw_inode); 5648 5649 /* No extended attributes present */ 5650 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) || 5651 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { 5652 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, 5653 new_extra_isize); 5654 EXT4_I(inode)->i_extra_isize = new_extra_isize; 5655 return 0; 5656 } 5657 5658 /* try to expand with EAs present */ 5659 return ext4_expand_extra_isize_ea(inode, new_extra_isize, 5660 raw_inode, handle); 5661 } 5662 5663 /* 5664 * What we do here is to mark the in-core inode as clean with respect to inode 5665 * dirtiness (it may still be data-dirty). 5666 * This means that the in-core inode may be reaped by prune_icache 5667 * without having to perform any I/O. This is a very good thing, 5668 * because *any* task may call prune_icache - even ones which 5669 * have a transaction open against a different journal. 5670 * 5671 * Is this cheating? Not really. Sure, we haven't written the 5672 * inode out, but prune_icache isn't a user-visible syncing function. 5673 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) 5674 * we start and wait on commits. 5675 * 5676 * Is this efficient/effective? Well, we're being nice to the system 5677 * by cleaning up our inodes proactively so they can be reaped 5678 * without I/O. But we are potentially leaving up to five seconds' 5679 * worth of inodes floating about which prune_icache wants us to 5680 * write out. One way to fix that would be to get prune_icache() 5681 * to do a write_super() to free up some memory. It has the desired 5682 * effect. 5683 */ 5684 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) 5685 { 5686 struct ext4_iloc iloc; 5687 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5688 static unsigned int mnt_count; 5689 int err, ret; 5690 5691 might_sleep(); 5692 trace_ext4_mark_inode_dirty(inode, _RET_IP_); 5693 err = ext4_reserve_inode_write(handle, inode, &iloc); 5694 if (ext4_handle_valid(handle) && 5695 EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && 5696 !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) { 5697 /* 5698 * We need extra buffer credits since we may write into EA block 5699 * with this same handle. If journal_extend fails, then it will 5700 * only result in a minor loss of functionality for that inode. 5701 * If this is felt to be critical, then e2fsck should be run to 5702 * force a large enough s_min_extra_isize. 5703 */ 5704 if ((jbd2_journal_extend(handle, 5705 EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) { 5706 ret = ext4_expand_extra_isize(inode, 5707 sbi->s_want_extra_isize, 5708 iloc, handle); 5709 if (ret) { 5710 ext4_set_inode_state(inode, 5711 EXT4_STATE_NO_EXPAND); 5712 if (mnt_count != 5713 le16_to_cpu(sbi->s_es->s_mnt_count)) { 5714 ext4_warning(inode->i_sb, 5715 "Unable to expand inode %lu. Delete" 5716 " some EAs or run e2fsck.", 5717 inode->i_ino); 5718 mnt_count = 5719 le16_to_cpu(sbi->s_es->s_mnt_count); 5720 } 5721 } 5722 } 5723 } 5724 if (!err) 5725 err = ext4_mark_iloc_dirty(handle, inode, &iloc); 5726 return err; 5727 } 5728 5729 /* 5730 * ext4_dirty_inode() is called from __mark_inode_dirty() 5731 * 5732 * We're really interested in the case where a file is being extended. 5733 * i_size has been changed by generic_commit_write() and we thus need 5734 * to include the updated inode in the current transaction. 5735 * 5736 * Also, dquot_alloc_block() will always dirty the inode when blocks 5737 * are allocated to the file. 5738 * 5739 * If the inode is marked synchronous, we don't honour that here - doing 5740 * so would cause a commit on atime updates, which we don't bother doing. 5741 * We handle synchronous inodes at the highest possible level. 5742 */ 5743 void ext4_dirty_inode(struct inode *inode, int flags) 5744 { 5745 handle_t *handle; 5746 5747 handle = ext4_journal_start(inode, 2); 5748 if (IS_ERR(handle)) 5749 goto out; 5750 5751 ext4_mark_inode_dirty(handle, inode); 5752 5753 ext4_journal_stop(handle); 5754 out: 5755 return; 5756 } 5757 5758 #if 0 5759 /* 5760 * Bind an inode's backing buffer_head into this transaction, to prevent 5761 * it from being flushed to disk early. Unlike 5762 * ext4_reserve_inode_write, this leaves behind no bh reference and 5763 * returns no iloc structure, so the caller needs to repeat the iloc 5764 * lookup to mark the inode dirty later. 5765 */ 5766 static int ext4_pin_inode(handle_t *handle, struct inode *inode) 5767 { 5768 struct ext4_iloc iloc; 5769 5770 int err = 0; 5771 if (handle) { 5772 err = ext4_get_inode_loc(inode, &iloc); 5773 if (!err) { 5774 BUFFER_TRACE(iloc.bh, "get_write_access"); 5775 err = jbd2_journal_get_write_access(handle, iloc.bh); 5776 if (!err) 5777 err = ext4_handle_dirty_metadata(handle, 5778 NULL, 5779 iloc.bh); 5780 brelse(iloc.bh); 5781 } 5782 } 5783 ext4_std_error(inode->i_sb, err); 5784 return err; 5785 } 5786 #endif 5787 5788 int ext4_change_inode_journal_flag(struct inode *inode, int val) 5789 { 5790 journal_t *journal; 5791 handle_t *handle; 5792 int err; 5793 5794 /* 5795 * We have to be very careful here: changing a data block's 5796 * journaling status dynamically is dangerous. If we write a 5797 * data block to the journal, change the status and then delete 5798 * that block, we risk forgetting to revoke the old log record 5799 * from the journal and so a subsequent replay can corrupt data. 5800 * So, first we make sure that the journal is empty and that 5801 * nobody is changing anything. 5802 */ 5803 5804 journal = EXT4_JOURNAL(inode); 5805 if (!journal) 5806 return 0; 5807 if (is_journal_aborted(journal)) 5808 return -EROFS; 5809 5810 jbd2_journal_lock_updates(journal); 5811 jbd2_journal_flush(journal); 5812 5813 /* 5814 * OK, there are no updates running now, and all cached data is 5815 * synced to disk. We are now in a completely consistent state 5816 * which doesn't have anything in the journal, and we know that 5817 * no filesystem updates are running, so it is safe to modify 5818 * the inode's in-core data-journaling state flag now. 5819 */ 5820 5821 if (val) 5822 ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 5823 else 5824 ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 5825 ext4_set_aops(inode); 5826 5827 jbd2_journal_unlock_updates(journal); 5828 5829 /* Finally we can mark the inode as dirty. */ 5830 5831 handle = ext4_journal_start(inode, 1); 5832 if (IS_ERR(handle)) 5833 return PTR_ERR(handle); 5834 5835 err = ext4_mark_inode_dirty(handle, inode); 5836 ext4_handle_sync(handle); 5837 ext4_journal_stop(handle); 5838 ext4_std_error(inode->i_sb, err); 5839 5840 return err; 5841 } 5842 5843 static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) 5844 { 5845 return !buffer_mapped(bh); 5846 } 5847 5848 int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 5849 { 5850 struct page *page = vmf->page; 5851 loff_t size; 5852 unsigned long len; 5853 int ret; 5854 struct file *file = vma->vm_file; 5855 struct inode *inode = file->f_path.dentry->d_inode; 5856 struct address_space *mapping = inode->i_mapping; 5857 handle_t *handle; 5858 get_block_t *get_block; 5859 int retries = 0; 5860 5861 /* 5862 * This check is racy but catches the common case. We rely on 5863 * __block_page_mkwrite() to do a reliable check. 5864 */ 5865 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); 5866 /* Delalloc case is easy... */ 5867 if (test_opt(inode->i_sb, DELALLOC) && 5868 !ext4_should_journal_data(inode) && 5869 !ext4_nonda_switch(inode->i_sb)) { 5870 do { 5871 ret = __block_page_mkwrite(vma, vmf, 5872 ext4_da_get_block_prep); 5873 } while (ret == -ENOSPC && 5874 ext4_should_retry_alloc(inode->i_sb, &retries)); 5875 goto out_ret; 5876 } 5877 5878 lock_page(page); 5879 size = i_size_read(inode); 5880 /* Page got truncated from under us? */ 5881 if (page->mapping != mapping || page_offset(page) > size) { 5882 unlock_page(page); 5883 ret = VM_FAULT_NOPAGE; 5884 goto out; 5885 } 5886 5887 if (page->index == size >> PAGE_CACHE_SHIFT) 5888 len = size & ~PAGE_CACHE_MASK; 5889 else 5890 len = PAGE_CACHE_SIZE; 5891 /* 5892 * Return if we have all the buffers mapped. This avoids the need to do 5893 * journal_start/journal_stop which can block and take a long time 5894 */ 5895 if (page_has_buffers(page)) { 5896 if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, 5897 ext4_bh_unmapped)) { 5898 /* Wait so that we don't change page under IO */ 5899 wait_on_page_writeback(page); 5900 ret = VM_FAULT_LOCKED; 5901 goto out; 5902 } 5903 } 5904 unlock_page(page); 5905 /* OK, we need to fill the hole... */ 5906 if (ext4_should_dioread_nolock(inode)) 5907 get_block = ext4_get_block_write; 5908 else 5909 get_block = ext4_get_block; 5910 retry_alloc: 5911 handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode)); 5912 if (IS_ERR(handle)) { 5913 ret = VM_FAULT_SIGBUS; 5914 goto out; 5915 } 5916 ret = __block_page_mkwrite(vma, vmf, get_block); 5917 if (!ret && ext4_should_journal_data(inode)) { 5918 if (walk_page_buffers(handle, page_buffers(page), 0, 5919 PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) { 5920 unlock_page(page); 5921 ret = VM_FAULT_SIGBUS; 5922 goto out; 5923 } 5924 ext4_set_inode_state(inode, EXT4_STATE_JDATA); 5925 } 5926 ext4_journal_stop(handle); 5927 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 5928 goto retry_alloc; 5929 out_ret: 5930 ret = block_page_mkwrite_return(ret); 5931 out: 5932 return ret; 5933 } 5934