1 /* 2 * Copyright (C) 2015 Shaohua Li <shli@fb.com> 3 * Copyright (C) 2016 Song Liu <songliubraving@fb.com> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 */ 15 #include <linux/kernel.h> 16 #include <linux/wait.h> 17 #include <linux/blkdev.h> 18 #include <linux/slab.h> 19 #include <linux/raid/md_p.h> 20 #include <linux/crc32c.h> 21 #include <linux/random.h> 22 #include <linux/kthread.h> 23 #include <linux/types.h> 24 #include "md.h" 25 #include "raid5.h" 26 #include "bitmap.h" 27 28 /* 29 * metadata/data stored in disk with 4k size unit (a block) regardless 30 * underneath hardware sector size. only works with PAGE_SIZE == 4096 31 */ 32 #define BLOCK_SECTORS (8) 33 #define BLOCK_SECTOR_SHIFT (3) 34 35 /* 36 * log->max_free_space is min(1/4 disk size, 10G reclaimable space). 37 * 38 * In write through mode, the reclaim runs every log->max_free_space. 39 * This can prevent the recovery scans for too long 40 */ 41 #define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */ 42 #define RECLAIM_MAX_FREE_SPACE_SHIFT (2) 43 44 /* wake up reclaim thread periodically */ 45 #define R5C_RECLAIM_WAKEUP_INTERVAL (30 * HZ) 46 /* start flush with these full stripes */ 47 #define R5C_FULL_STRIPE_FLUSH_BATCH(conf) (conf->max_nr_stripes / 4) 48 /* reclaim stripes in groups */ 49 #define R5C_RECLAIM_STRIPE_GROUP (NR_STRIPE_HASH_LOCKS * 2) 50 51 /* 52 * We only need 2 bios per I/O unit to make progress, but ensure we 53 * have a few more available to not get too tight. 54 */ 55 #define R5L_POOL_SIZE 4 56 57 static char *r5c_journal_mode_str[] = {"write-through", 58 "write-back"}; 59 /* 60 * raid5 cache state machine 61 * 62 * With the RAID cache, each stripe works in two phases: 63 * - caching phase 64 * - writing-out phase 65 * 66 * These two phases are controlled by bit STRIPE_R5C_CACHING: 67 * if STRIPE_R5C_CACHING == 0, the stripe is in writing-out phase 68 * if STRIPE_R5C_CACHING == 1, the stripe is in caching phase 69 * 70 * When there is no journal, or the journal is in write-through mode, 71 * the stripe is always in writing-out phase. 72 * 73 * For write-back journal, the stripe is sent to caching phase on write 74 * (r5c_try_caching_write). r5c_make_stripe_write_out() kicks off 75 * the write-out phase by clearing STRIPE_R5C_CACHING. 76 * 77 * Stripes in caching phase do not write the raid disks. Instead, all 78 * writes are committed from the log device. Therefore, a stripe in 79 * caching phase handles writes as: 80 * - write to log device 81 * - return IO 82 * 83 * Stripes in writing-out phase handle writes as: 84 * - calculate parity 85 * - write pending data and parity to journal 86 * - write data and parity to raid disks 87 * - return IO for pending writes 88 */ 89 90 struct r5l_log { 91 struct md_rdev *rdev; 92 93 u32 uuid_checksum; 94 95 sector_t device_size; /* log device size, round to 96 * BLOCK_SECTORS */ 97 sector_t max_free_space; /* reclaim run if free space is at 98 * this size */ 99 100 sector_t last_checkpoint; /* log tail. where recovery scan 101 * starts from */ 102 u64 last_cp_seq; /* log tail sequence */ 103 104 sector_t log_start; /* log head. where new data appends */ 105 u64 seq; /* log head sequence */ 106 107 sector_t next_checkpoint; 108 109 struct mutex io_mutex; 110 struct r5l_io_unit *current_io; /* current io_unit accepting new data */ 111 112 spinlock_t io_list_lock; 113 struct list_head running_ios; /* io_units which are still running, 114 * and have not yet been completely 115 * written to the log */ 116 struct list_head io_end_ios; /* io_units which have been completely 117 * written to the log but not yet written 118 * to the RAID */ 119 struct list_head flushing_ios; /* io_units which are waiting for log 120 * cache flush */ 121 struct list_head finished_ios; /* io_units which settle down in log disk */ 122 struct bio flush_bio; 123 124 struct list_head no_mem_stripes; /* pending stripes, -ENOMEM */ 125 126 struct kmem_cache *io_kc; 127 mempool_t *io_pool; 128 struct bio_set *bs; 129 mempool_t *meta_pool; 130 131 struct md_thread *reclaim_thread; 132 unsigned long reclaim_target; /* number of space that need to be 133 * reclaimed. if it's 0, reclaim spaces 134 * used by io_units which are in 135 * IO_UNIT_STRIPE_END state (eg, reclaim 136 * dones't wait for specific io_unit 137 * switching to IO_UNIT_STRIPE_END 138 * state) */ 139 wait_queue_head_t iounit_wait; 140 141 struct list_head no_space_stripes; /* pending stripes, log has no space */ 142 spinlock_t no_space_stripes_lock; 143 144 bool need_cache_flush; 145 146 /* for r5c_cache */ 147 enum r5c_journal_mode r5c_journal_mode; 148 149 /* all stripes in r5cache, in the order of seq at sh->log_start */ 150 struct list_head stripe_in_journal_list; 151 152 spinlock_t stripe_in_journal_lock; 153 atomic_t stripe_in_journal_count; 154 155 /* to submit async io_units, to fulfill ordering of flush */ 156 struct work_struct deferred_io_work; 157 /* to disable write back during in degraded mode */ 158 struct work_struct disable_writeback_work; 159 160 /* to for chunk_aligned_read in writeback mode, details below */ 161 spinlock_t tree_lock; 162 struct radix_tree_root big_stripe_tree; 163 }; 164 165 /* 166 * Enable chunk_aligned_read() with write back cache. 167 * 168 * Each chunk may contain more than one stripe (for example, a 256kB 169 * chunk contains 64 4kB-page, so this chunk contain 64 stripes). For 170 * chunk_aligned_read, these stripes are grouped into one "big_stripe". 171 * For each big_stripe, we count how many stripes of this big_stripe 172 * are in the write back cache. These data are tracked in a radix tree 173 * (big_stripe_tree). We use radix_tree item pointer as the counter. 174 * r5c_tree_index() is used to calculate keys for the radix tree. 175 * 176 * chunk_aligned_read() calls r5c_big_stripe_cached() to look up 177 * big_stripe of each chunk in the tree. If this big_stripe is in the 178 * tree, chunk_aligned_read() aborts. This look up is protected by 179 * rcu_read_lock(). 180 * 181 * It is necessary to remember whether a stripe is counted in 182 * big_stripe_tree. Instead of adding new flag, we reuses existing flags: 183 * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE. If either of these 184 * two flags are set, the stripe is counted in big_stripe_tree. This 185 * requires moving set_bit(STRIPE_R5C_PARTIAL_STRIPE) to 186 * r5c_try_caching_write(); and moving clear_bit of 187 * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE to 188 * r5c_finish_stripe_write_out(). 189 */ 190 191 /* 192 * radix tree requests lowest 2 bits of data pointer to be 2b'00. 193 * So it is necessary to left shift the counter by 2 bits before using it 194 * as data pointer of the tree. 195 */ 196 #define R5C_RADIX_COUNT_SHIFT 2 197 198 /* 199 * calculate key for big_stripe_tree 200 * 201 * sect: align_bi->bi_iter.bi_sector or sh->sector 202 */ 203 static inline sector_t r5c_tree_index(struct r5conf *conf, 204 sector_t sect) 205 { 206 sector_t offset; 207 208 offset = sector_div(sect, conf->chunk_sectors); 209 return sect; 210 } 211 212 /* 213 * an IO range starts from a meta data block and end at the next meta data 214 * block. The io unit's the meta data block tracks data/parity followed it. io 215 * unit is written to log disk with normal write, as we always flush log disk 216 * first and then start move data to raid disks, there is no requirement to 217 * write io unit with FLUSH/FUA 218 */ 219 struct r5l_io_unit { 220 struct r5l_log *log; 221 222 struct page *meta_page; /* store meta block */ 223 int meta_offset; /* current offset in meta_page */ 224 225 struct bio *current_bio;/* current_bio accepting new data */ 226 227 atomic_t pending_stripe;/* how many stripes not flushed to raid */ 228 u64 seq; /* seq number of the metablock */ 229 sector_t log_start; /* where the io_unit starts */ 230 sector_t log_end; /* where the io_unit ends */ 231 struct list_head log_sibling; /* log->running_ios */ 232 struct list_head stripe_list; /* stripes added to the io_unit */ 233 234 int state; 235 bool need_split_bio; 236 struct bio *split_bio; 237 238 unsigned int has_flush:1; /* include flush request */ 239 unsigned int has_fua:1; /* include fua request */ 240 unsigned int has_null_flush:1; /* include empty flush request */ 241 /* 242 * io isn't sent yet, flush/fua request can only be submitted till it's 243 * the first IO in running_ios list 244 */ 245 unsigned int io_deferred:1; 246 247 struct bio_list flush_barriers; /* size == 0 flush bios */ 248 }; 249 250 /* r5l_io_unit state */ 251 enum r5l_io_unit_state { 252 IO_UNIT_RUNNING = 0, /* accepting new IO */ 253 IO_UNIT_IO_START = 1, /* io_unit bio start writing to log, 254 * don't accepting new bio */ 255 IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */ 256 IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */ 257 }; 258 259 bool r5c_is_writeback(struct r5l_log *log) 260 { 261 return (log != NULL && 262 log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK); 263 } 264 265 static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc) 266 { 267 start += inc; 268 if (start >= log->device_size) 269 start = start - log->device_size; 270 return start; 271 } 272 273 static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start, 274 sector_t end) 275 { 276 if (end >= start) 277 return end - start; 278 else 279 return end + log->device_size - start; 280 } 281 282 static bool r5l_has_free_space(struct r5l_log *log, sector_t size) 283 { 284 sector_t used_size; 285 286 used_size = r5l_ring_distance(log, log->last_checkpoint, 287 log->log_start); 288 289 return log->device_size > used_size + size; 290 } 291 292 static void __r5l_set_io_unit_state(struct r5l_io_unit *io, 293 enum r5l_io_unit_state state) 294 { 295 if (WARN_ON(io->state >= state)) 296 return; 297 io->state = state; 298 } 299 300 static void 301 r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev) 302 { 303 struct bio *wbi, *wbi2; 304 305 wbi = dev->written; 306 dev->written = NULL; 307 while (wbi && wbi->bi_iter.bi_sector < 308 dev->sector + STRIPE_SECTORS) { 309 wbi2 = r5_next_bio(wbi, dev->sector); 310 md_write_end(conf->mddev); 311 bio_endio(wbi); 312 wbi = wbi2; 313 } 314 } 315 316 void r5c_handle_cached_data_endio(struct r5conf *conf, 317 struct stripe_head *sh, int disks) 318 { 319 int i; 320 321 for (i = sh->disks; i--; ) { 322 if (sh->dev[i].written) { 323 set_bit(R5_UPTODATE, &sh->dev[i].flags); 324 r5c_return_dev_pending_writes(conf, &sh->dev[i]); 325 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 326 STRIPE_SECTORS, 327 !test_bit(STRIPE_DEGRADED, &sh->state), 328 0); 329 } 330 } 331 } 332 333 void r5l_wake_reclaim(struct r5l_log *log, sector_t space); 334 335 /* Check whether we should flush some stripes to free up stripe cache */ 336 void r5c_check_stripe_cache_usage(struct r5conf *conf) 337 { 338 int total_cached; 339 340 if (!r5c_is_writeback(conf->log)) 341 return; 342 343 total_cached = atomic_read(&conf->r5c_cached_partial_stripes) + 344 atomic_read(&conf->r5c_cached_full_stripes); 345 346 /* 347 * The following condition is true for either of the following: 348 * - stripe cache pressure high: 349 * total_cached > 3/4 min_nr_stripes || 350 * empty_inactive_list_nr > 0 351 * - stripe cache pressure moderate: 352 * total_cached > 1/2 min_nr_stripes 353 */ 354 if (total_cached > conf->min_nr_stripes * 1 / 2 || 355 atomic_read(&conf->empty_inactive_list_nr) > 0) 356 r5l_wake_reclaim(conf->log, 0); 357 } 358 359 /* 360 * flush cache when there are R5C_FULL_STRIPE_FLUSH_BATCH or more full 361 * stripes in the cache 362 */ 363 void r5c_check_cached_full_stripe(struct r5conf *conf) 364 { 365 if (!r5c_is_writeback(conf->log)) 366 return; 367 368 /* 369 * wake up reclaim for R5C_FULL_STRIPE_FLUSH_BATCH cached stripes 370 * or a full stripe (chunk size / 4k stripes). 371 */ 372 if (atomic_read(&conf->r5c_cached_full_stripes) >= 373 min(R5C_FULL_STRIPE_FLUSH_BATCH(conf), 374 conf->chunk_sectors >> STRIPE_SHIFT)) 375 r5l_wake_reclaim(conf->log, 0); 376 } 377 378 /* 379 * Total log space (in sectors) needed to flush all data in cache 380 * 381 * To avoid deadlock due to log space, it is necessary to reserve log 382 * space to flush critical stripes (stripes that occupying log space near 383 * last_checkpoint). This function helps check how much log space is 384 * required to flush all cached stripes. 385 * 386 * To reduce log space requirements, two mechanisms are used to give cache 387 * flush higher priorities: 388 * 1. In handle_stripe_dirtying() and schedule_reconstruction(), 389 * stripes ALREADY in journal can be flushed w/o pending writes; 390 * 2. In r5l_write_stripe() and r5c_cache_data(), stripes NOT in journal 391 * can be delayed (r5l_add_no_space_stripe). 392 * 393 * In cache flush, the stripe goes through 1 and then 2. For a stripe that 394 * already passed 1, flushing it requires at most (conf->max_degraded + 1) 395 * pages of journal space. For stripes that has not passed 1, flushing it 396 * requires (conf->raid_disks + 1) pages of journal space. There are at 397 * most (conf->group_cnt + 1) stripe that passed 1. So total journal space 398 * required to flush all cached stripes (in pages) is: 399 * 400 * (stripe_in_journal_count - group_cnt - 1) * (max_degraded + 1) + 401 * (group_cnt + 1) * (raid_disks + 1) 402 * or 403 * (stripe_in_journal_count) * (max_degraded + 1) + 404 * (group_cnt + 1) * (raid_disks - max_degraded) 405 */ 406 static sector_t r5c_log_required_to_flush_cache(struct r5conf *conf) 407 { 408 struct r5l_log *log = conf->log; 409 410 if (!r5c_is_writeback(log)) 411 return 0; 412 413 return BLOCK_SECTORS * 414 ((conf->max_degraded + 1) * atomic_read(&log->stripe_in_journal_count) + 415 (conf->raid_disks - conf->max_degraded) * (conf->group_cnt + 1)); 416 } 417 418 /* 419 * evaluate log space usage and update R5C_LOG_TIGHT and R5C_LOG_CRITICAL 420 * 421 * R5C_LOG_TIGHT is set when free space on the log device is less than 3x of 422 * reclaim_required_space. R5C_LOG_CRITICAL is set when free space on the log 423 * device is less than 2x of reclaim_required_space. 424 */ 425 static inline void r5c_update_log_state(struct r5l_log *log) 426 { 427 struct r5conf *conf = log->rdev->mddev->private; 428 sector_t free_space; 429 sector_t reclaim_space; 430 bool wake_reclaim = false; 431 432 if (!r5c_is_writeback(log)) 433 return; 434 435 free_space = r5l_ring_distance(log, log->log_start, 436 log->last_checkpoint); 437 reclaim_space = r5c_log_required_to_flush_cache(conf); 438 if (free_space < 2 * reclaim_space) 439 set_bit(R5C_LOG_CRITICAL, &conf->cache_state); 440 else { 441 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state)) 442 wake_reclaim = true; 443 clear_bit(R5C_LOG_CRITICAL, &conf->cache_state); 444 } 445 if (free_space < 3 * reclaim_space) 446 set_bit(R5C_LOG_TIGHT, &conf->cache_state); 447 else 448 clear_bit(R5C_LOG_TIGHT, &conf->cache_state); 449 450 if (wake_reclaim) 451 r5l_wake_reclaim(log, 0); 452 } 453 454 /* 455 * Put the stripe into writing-out phase by clearing STRIPE_R5C_CACHING. 456 * This function should only be called in write-back mode. 457 */ 458 void r5c_make_stripe_write_out(struct stripe_head *sh) 459 { 460 struct r5conf *conf = sh->raid_conf; 461 struct r5l_log *log = conf->log; 462 463 BUG_ON(!r5c_is_writeback(log)); 464 465 WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state)); 466 clear_bit(STRIPE_R5C_CACHING, &sh->state); 467 468 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 469 atomic_inc(&conf->preread_active_stripes); 470 } 471 472 static void r5c_handle_data_cached(struct stripe_head *sh) 473 { 474 int i; 475 476 for (i = sh->disks; i--; ) 477 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { 478 set_bit(R5_InJournal, &sh->dev[i].flags); 479 clear_bit(R5_LOCKED, &sh->dev[i].flags); 480 } 481 clear_bit(STRIPE_LOG_TRAPPED, &sh->state); 482 } 483 484 /* 485 * this journal write must contain full parity, 486 * it may also contain some data pages 487 */ 488 static void r5c_handle_parity_cached(struct stripe_head *sh) 489 { 490 int i; 491 492 for (i = sh->disks; i--; ) 493 if (test_bit(R5_InJournal, &sh->dev[i].flags)) 494 set_bit(R5_Wantwrite, &sh->dev[i].flags); 495 } 496 497 /* 498 * Setting proper flags after writing (or flushing) data and/or parity to the 499 * log device. This is called from r5l_log_endio() or r5l_log_flush_endio(). 500 */ 501 static void r5c_finish_cache_stripe(struct stripe_head *sh) 502 { 503 struct r5l_log *log = sh->raid_conf->log; 504 505 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) { 506 BUG_ON(test_bit(STRIPE_R5C_CACHING, &sh->state)); 507 /* 508 * Set R5_InJournal for parity dev[pd_idx]. This means 509 * all data AND parity in the journal. For RAID 6, it is 510 * NOT necessary to set the flag for dev[qd_idx], as the 511 * two parities are written out together. 512 */ 513 set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags); 514 } else if (test_bit(STRIPE_R5C_CACHING, &sh->state)) { 515 r5c_handle_data_cached(sh); 516 } else { 517 r5c_handle_parity_cached(sh); 518 set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags); 519 } 520 } 521 522 static void r5l_io_run_stripes(struct r5l_io_unit *io) 523 { 524 struct stripe_head *sh, *next; 525 526 list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) { 527 list_del_init(&sh->log_list); 528 529 r5c_finish_cache_stripe(sh); 530 531 set_bit(STRIPE_HANDLE, &sh->state); 532 raid5_release_stripe(sh); 533 } 534 } 535 536 static void r5l_log_run_stripes(struct r5l_log *log) 537 { 538 struct r5l_io_unit *io, *next; 539 540 assert_spin_locked(&log->io_list_lock); 541 542 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) { 543 /* don't change list order */ 544 if (io->state < IO_UNIT_IO_END) 545 break; 546 547 list_move_tail(&io->log_sibling, &log->finished_ios); 548 r5l_io_run_stripes(io); 549 } 550 } 551 552 static void r5l_move_to_end_ios(struct r5l_log *log) 553 { 554 struct r5l_io_unit *io, *next; 555 556 assert_spin_locked(&log->io_list_lock); 557 558 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) { 559 /* don't change list order */ 560 if (io->state < IO_UNIT_IO_END) 561 break; 562 list_move_tail(&io->log_sibling, &log->io_end_ios); 563 } 564 } 565 566 static void __r5l_stripe_write_finished(struct r5l_io_unit *io); 567 static void r5l_log_endio(struct bio *bio) 568 { 569 struct r5l_io_unit *io = bio->bi_private; 570 struct r5l_io_unit *io_deferred; 571 struct r5l_log *log = io->log; 572 unsigned long flags; 573 574 if (bio->bi_error) 575 md_error(log->rdev->mddev, log->rdev); 576 577 bio_put(bio); 578 mempool_free(io->meta_page, log->meta_pool); 579 580 spin_lock_irqsave(&log->io_list_lock, flags); 581 __r5l_set_io_unit_state(io, IO_UNIT_IO_END); 582 if (log->need_cache_flush && !list_empty(&io->stripe_list)) 583 r5l_move_to_end_ios(log); 584 else 585 r5l_log_run_stripes(log); 586 if (!list_empty(&log->running_ios)) { 587 /* 588 * FLUSH/FUA io_unit is deferred because of ordering, now we 589 * can dispatch it 590 */ 591 io_deferred = list_first_entry(&log->running_ios, 592 struct r5l_io_unit, log_sibling); 593 if (io_deferred->io_deferred) 594 schedule_work(&log->deferred_io_work); 595 } 596 597 spin_unlock_irqrestore(&log->io_list_lock, flags); 598 599 if (log->need_cache_flush) 600 md_wakeup_thread(log->rdev->mddev->thread); 601 602 if (io->has_null_flush) { 603 struct bio *bi; 604 605 WARN_ON(bio_list_empty(&io->flush_barriers)); 606 while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) { 607 bio_endio(bi); 608 atomic_dec(&io->pending_stripe); 609 } 610 } 611 612 /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */ 613 if (atomic_read(&io->pending_stripe) == 0) 614 __r5l_stripe_write_finished(io); 615 } 616 617 static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io) 618 { 619 unsigned long flags; 620 621 spin_lock_irqsave(&log->io_list_lock, flags); 622 __r5l_set_io_unit_state(io, IO_UNIT_IO_START); 623 spin_unlock_irqrestore(&log->io_list_lock, flags); 624 625 if (io->has_flush) 626 io->current_bio->bi_opf |= REQ_PREFLUSH; 627 if (io->has_fua) 628 io->current_bio->bi_opf |= REQ_FUA; 629 submit_bio(io->current_bio); 630 631 if (!io->split_bio) 632 return; 633 634 if (io->has_flush) 635 io->split_bio->bi_opf |= REQ_PREFLUSH; 636 if (io->has_fua) 637 io->split_bio->bi_opf |= REQ_FUA; 638 submit_bio(io->split_bio); 639 } 640 641 /* deferred io_unit will be dispatched here */ 642 static void r5l_submit_io_async(struct work_struct *work) 643 { 644 struct r5l_log *log = container_of(work, struct r5l_log, 645 deferred_io_work); 646 struct r5l_io_unit *io = NULL; 647 unsigned long flags; 648 649 spin_lock_irqsave(&log->io_list_lock, flags); 650 if (!list_empty(&log->running_ios)) { 651 io = list_first_entry(&log->running_ios, struct r5l_io_unit, 652 log_sibling); 653 if (!io->io_deferred) 654 io = NULL; 655 else 656 io->io_deferred = 0; 657 } 658 spin_unlock_irqrestore(&log->io_list_lock, flags); 659 if (io) 660 r5l_do_submit_io(log, io); 661 } 662 663 static void r5c_disable_writeback_async(struct work_struct *work) 664 { 665 struct r5l_log *log = container_of(work, struct r5l_log, 666 disable_writeback_work); 667 struct mddev *mddev = log->rdev->mddev; 668 669 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) 670 return; 671 pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n", 672 mdname(mddev)); 673 mddev_suspend(mddev); 674 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; 675 mddev_resume(mddev); 676 } 677 678 static void r5l_submit_current_io(struct r5l_log *log) 679 { 680 struct r5l_io_unit *io = log->current_io; 681 struct bio *bio; 682 struct r5l_meta_block *block; 683 unsigned long flags; 684 u32 crc; 685 bool do_submit = true; 686 687 if (!io) 688 return; 689 690 block = page_address(io->meta_page); 691 block->meta_size = cpu_to_le32(io->meta_offset); 692 crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE); 693 block->checksum = cpu_to_le32(crc); 694 bio = io->current_bio; 695 696 log->current_io = NULL; 697 spin_lock_irqsave(&log->io_list_lock, flags); 698 if (io->has_flush || io->has_fua) { 699 if (io != list_first_entry(&log->running_ios, 700 struct r5l_io_unit, log_sibling)) { 701 io->io_deferred = 1; 702 do_submit = false; 703 } 704 } 705 spin_unlock_irqrestore(&log->io_list_lock, flags); 706 if (do_submit) 707 r5l_do_submit_io(log, io); 708 } 709 710 static struct bio *r5l_bio_alloc(struct r5l_log *log) 711 { 712 struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, log->bs); 713 714 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 715 bio->bi_bdev = log->rdev->bdev; 716 bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start; 717 718 return bio; 719 } 720 721 static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io) 722 { 723 log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS); 724 725 r5c_update_log_state(log); 726 /* 727 * If we filled up the log device start from the beginning again, 728 * which will require a new bio. 729 * 730 * Note: for this to work properly the log size needs to me a multiple 731 * of BLOCK_SECTORS. 732 */ 733 if (log->log_start == 0) 734 io->need_split_bio = true; 735 736 io->log_end = log->log_start; 737 } 738 739 static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log) 740 { 741 struct r5l_io_unit *io; 742 struct r5l_meta_block *block; 743 744 io = mempool_alloc(log->io_pool, GFP_ATOMIC); 745 if (!io) 746 return NULL; 747 memset(io, 0, sizeof(*io)); 748 749 io->log = log; 750 INIT_LIST_HEAD(&io->log_sibling); 751 INIT_LIST_HEAD(&io->stripe_list); 752 bio_list_init(&io->flush_barriers); 753 io->state = IO_UNIT_RUNNING; 754 755 io->meta_page = mempool_alloc(log->meta_pool, GFP_NOIO); 756 block = page_address(io->meta_page); 757 clear_page(block); 758 block->magic = cpu_to_le32(R5LOG_MAGIC); 759 block->version = R5LOG_VERSION; 760 block->seq = cpu_to_le64(log->seq); 761 block->position = cpu_to_le64(log->log_start); 762 763 io->log_start = log->log_start; 764 io->meta_offset = sizeof(struct r5l_meta_block); 765 io->seq = log->seq++; 766 767 io->current_bio = r5l_bio_alloc(log); 768 io->current_bio->bi_end_io = r5l_log_endio; 769 io->current_bio->bi_private = io; 770 bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0); 771 772 r5_reserve_log_entry(log, io); 773 774 spin_lock_irq(&log->io_list_lock); 775 list_add_tail(&io->log_sibling, &log->running_ios); 776 spin_unlock_irq(&log->io_list_lock); 777 778 return io; 779 } 780 781 static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size) 782 { 783 if (log->current_io && 784 log->current_io->meta_offset + payload_size > PAGE_SIZE) 785 r5l_submit_current_io(log); 786 787 if (!log->current_io) { 788 log->current_io = r5l_new_meta(log); 789 if (!log->current_io) 790 return -ENOMEM; 791 } 792 793 return 0; 794 } 795 796 static void r5l_append_payload_meta(struct r5l_log *log, u16 type, 797 sector_t location, 798 u32 checksum1, u32 checksum2, 799 bool checksum2_valid) 800 { 801 struct r5l_io_unit *io = log->current_io; 802 struct r5l_payload_data_parity *payload; 803 804 payload = page_address(io->meta_page) + io->meta_offset; 805 payload->header.type = cpu_to_le16(type); 806 payload->header.flags = cpu_to_le16(0); 807 payload->size = cpu_to_le32((1 + !!checksum2_valid) << 808 (PAGE_SHIFT - 9)); 809 payload->location = cpu_to_le64(location); 810 payload->checksum[0] = cpu_to_le32(checksum1); 811 if (checksum2_valid) 812 payload->checksum[1] = cpu_to_le32(checksum2); 813 814 io->meta_offset += sizeof(struct r5l_payload_data_parity) + 815 sizeof(__le32) * (1 + !!checksum2_valid); 816 } 817 818 static void r5l_append_payload_page(struct r5l_log *log, struct page *page) 819 { 820 struct r5l_io_unit *io = log->current_io; 821 822 if (io->need_split_bio) { 823 BUG_ON(io->split_bio); 824 io->split_bio = io->current_bio; 825 io->current_bio = r5l_bio_alloc(log); 826 bio_chain(io->current_bio, io->split_bio); 827 io->need_split_bio = false; 828 } 829 830 if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0)) 831 BUG(); 832 833 r5_reserve_log_entry(log, io); 834 } 835 836 static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect) 837 { 838 struct mddev *mddev = log->rdev->mddev; 839 struct r5conf *conf = mddev->private; 840 struct r5l_io_unit *io; 841 struct r5l_payload_flush *payload; 842 int meta_size; 843 844 /* 845 * payload_flush requires extra writes to the journal. 846 * To avoid handling the extra IO in quiesce, just skip 847 * flush_payload 848 */ 849 if (conf->quiesce) 850 return; 851 852 mutex_lock(&log->io_mutex); 853 meta_size = sizeof(struct r5l_payload_flush) + sizeof(__le64); 854 855 if (r5l_get_meta(log, meta_size)) { 856 mutex_unlock(&log->io_mutex); 857 return; 858 } 859 860 /* current implementation is one stripe per flush payload */ 861 io = log->current_io; 862 payload = page_address(io->meta_page) + io->meta_offset; 863 payload->header.type = cpu_to_le16(R5LOG_PAYLOAD_FLUSH); 864 payload->header.flags = cpu_to_le16(0); 865 payload->size = cpu_to_le32(sizeof(__le64)); 866 payload->flush_stripes[0] = cpu_to_le64(sect); 867 io->meta_offset += meta_size; 868 mutex_unlock(&log->io_mutex); 869 } 870 871 static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh, 872 int data_pages, int parity_pages) 873 { 874 int i; 875 int meta_size; 876 int ret; 877 struct r5l_io_unit *io; 878 879 meta_size = 880 ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) 881 * data_pages) + 882 sizeof(struct r5l_payload_data_parity) + 883 sizeof(__le32) * parity_pages; 884 885 ret = r5l_get_meta(log, meta_size); 886 if (ret) 887 return ret; 888 889 io = log->current_io; 890 891 if (test_and_clear_bit(STRIPE_R5C_PREFLUSH, &sh->state)) 892 io->has_flush = 1; 893 894 for (i = 0; i < sh->disks; i++) { 895 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) || 896 test_bit(R5_InJournal, &sh->dev[i].flags)) 897 continue; 898 if (i == sh->pd_idx || i == sh->qd_idx) 899 continue; 900 if (test_bit(R5_WantFUA, &sh->dev[i].flags) && 901 log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) { 902 io->has_fua = 1; 903 /* 904 * we need to flush journal to make sure recovery can 905 * reach the data with fua flag 906 */ 907 io->has_flush = 1; 908 } 909 r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA, 910 raid5_compute_blocknr(sh, i, 0), 911 sh->dev[i].log_checksum, 0, false); 912 r5l_append_payload_page(log, sh->dev[i].page); 913 } 914 915 if (parity_pages == 2) { 916 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY, 917 sh->sector, sh->dev[sh->pd_idx].log_checksum, 918 sh->dev[sh->qd_idx].log_checksum, true); 919 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page); 920 r5l_append_payload_page(log, sh->dev[sh->qd_idx].page); 921 } else if (parity_pages == 1) { 922 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY, 923 sh->sector, sh->dev[sh->pd_idx].log_checksum, 924 0, false); 925 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page); 926 } else /* Just writing data, not parity, in caching phase */ 927 BUG_ON(parity_pages != 0); 928 929 list_add_tail(&sh->log_list, &io->stripe_list); 930 atomic_inc(&io->pending_stripe); 931 sh->log_io = io; 932 933 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) 934 return 0; 935 936 if (sh->log_start == MaxSector) { 937 BUG_ON(!list_empty(&sh->r5c)); 938 sh->log_start = io->log_start; 939 spin_lock_irq(&log->stripe_in_journal_lock); 940 list_add_tail(&sh->r5c, 941 &log->stripe_in_journal_list); 942 spin_unlock_irq(&log->stripe_in_journal_lock); 943 atomic_inc(&log->stripe_in_journal_count); 944 } 945 return 0; 946 } 947 948 /* add stripe to no_space_stripes, and then wake up reclaim */ 949 static inline void r5l_add_no_space_stripe(struct r5l_log *log, 950 struct stripe_head *sh) 951 { 952 spin_lock(&log->no_space_stripes_lock); 953 list_add_tail(&sh->log_list, &log->no_space_stripes); 954 spin_unlock(&log->no_space_stripes_lock); 955 } 956 957 /* 958 * running in raid5d, where reclaim could wait for raid5d too (when it flushes 959 * data from log to raid disks), so we shouldn't wait for reclaim here 960 */ 961 int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh) 962 { 963 struct r5conf *conf = sh->raid_conf; 964 int write_disks = 0; 965 int data_pages, parity_pages; 966 int reserve; 967 int i; 968 int ret = 0; 969 bool wake_reclaim = false; 970 971 if (!log) 972 return -EAGAIN; 973 /* Don't support stripe batch */ 974 if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) || 975 test_bit(STRIPE_SYNCING, &sh->state)) { 976 /* the stripe is written to log, we start writing it to raid */ 977 clear_bit(STRIPE_LOG_TRAPPED, &sh->state); 978 return -EAGAIN; 979 } 980 981 WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state)); 982 983 for (i = 0; i < sh->disks; i++) { 984 void *addr; 985 986 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) || 987 test_bit(R5_InJournal, &sh->dev[i].flags)) 988 continue; 989 990 write_disks++; 991 /* checksum is already calculated in last run */ 992 if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) 993 continue; 994 addr = kmap_atomic(sh->dev[i].page); 995 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum, 996 addr, PAGE_SIZE); 997 kunmap_atomic(addr); 998 } 999 parity_pages = 1 + !!(sh->qd_idx >= 0); 1000 data_pages = write_disks - parity_pages; 1001 1002 set_bit(STRIPE_LOG_TRAPPED, &sh->state); 1003 /* 1004 * The stripe must enter state machine again to finish the write, so 1005 * don't delay. 1006 */ 1007 clear_bit(STRIPE_DELAYED, &sh->state); 1008 atomic_inc(&sh->count); 1009 1010 mutex_lock(&log->io_mutex); 1011 /* meta + data */ 1012 reserve = (1 + write_disks) << (PAGE_SHIFT - 9); 1013 1014 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) { 1015 if (!r5l_has_free_space(log, reserve)) { 1016 r5l_add_no_space_stripe(log, sh); 1017 wake_reclaim = true; 1018 } else { 1019 ret = r5l_log_stripe(log, sh, data_pages, parity_pages); 1020 if (ret) { 1021 spin_lock_irq(&log->io_list_lock); 1022 list_add_tail(&sh->log_list, 1023 &log->no_mem_stripes); 1024 spin_unlock_irq(&log->io_list_lock); 1025 } 1026 } 1027 } else { /* R5C_JOURNAL_MODE_WRITE_BACK */ 1028 /* 1029 * log space critical, do not process stripes that are 1030 * not in cache yet (sh->log_start == MaxSector). 1031 */ 1032 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) && 1033 sh->log_start == MaxSector) { 1034 r5l_add_no_space_stripe(log, sh); 1035 wake_reclaim = true; 1036 reserve = 0; 1037 } else if (!r5l_has_free_space(log, reserve)) { 1038 if (sh->log_start == log->last_checkpoint) 1039 BUG(); 1040 else 1041 r5l_add_no_space_stripe(log, sh); 1042 } else { 1043 ret = r5l_log_stripe(log, sh, data_pages, parity_pages); 1044 if (ret) { 1045 spin_lock_irq(&log->io_list_lock); 1046 list_add_tail(&sh->log_list, 1047 &log->no_mem_stripes); 1048 spin_unlock_irq(&log->io_list_lock); 1049 } 1050 } 1051 } 1052 1053 mutex_unlock(&log->io_mutex); 1054 if (wake_reclaim) 1055 r5l_wake_reclaim(log, reserve); 1056 return 0; 1057 } 1058 1059 void r5l_write_stripe_run(struct r5l_log *log) 1060 { 1061 if (!log) 1062 return; 1063 mutex_lock(&log->io_mutex); 1064 r5l_submit_current_io(log); 1065 mutex_unlock(&log->io_mutex); 1066 } 1067 1068 int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio) 1069 { 1070 if (!log) 1071 return -ENODEV; 1072 1073 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) { 1074 /* 1075 * in write through (journal only) 1076 * we flush log disk cache first, then write stripe data to 1077 * raid disks. So if bio is finished, the log disk cache is 1078 * flushed already. The recovery guarantees we can recovery 1079 * the bio from log disk, so we don't need to flush again 1080 */ 1081 if (bio->bi_iter.bi_size == 0) { 1082 bio_endio(bio); 1083 return 0; 1084 } 1085 bio->bi_opf &= ~REQ_PREFLUSH; 1086 } else { 1087 /* write back (with cache) */ 1088 if (bio->bi_iter.bi_size == 0) { 1089 mutex_lock(&log->io_mutex); 1090 r5l_get_meta(log, 0); 1091 bio_list_add(&log->current_io->flush_barriers, bio); 1092 log->current_io->has_flush = 1; 1093 log->current_io->has_null_flush = 1; 1094 atomic_inc(&log->current_io->pending_stripe); 1095 r5l_submit_current_io(log); 1096 mutex_unlock(&log->io_mutex); 1097 return 0; 1098 } 1099 } 1100 return -EAGAIN; 1101 } 1102 1103 /* This will run after log space is reclaimed */ 1104 static void r5l_run_no_space_stripes(struct r5l_log *log) 1105 { 1106 struct stripe_head *sh; 1107 1108 spin_lock(&log->no_space_stripes_lock); 1109 while (!list_empty(&log->no_space_stripes)) { 1110 sh = list_first_entry(&log->no_space_stripes, 1111 struct stripe_head, log_list); 1112 list_del_init(&sh->log_list); 1113 set_bit(STRIPE_HANDLE, &sh->state); 1114 raid5_release_stripe(sh); 1115 } 1116 spin_unlock(&log->no_space_stripes_lock); 1117 } 1118 1119 /* 1120 * calculate new last_checkpoint 1121 * for write through mode, returns log->next_checkpoint 1122 * for write back, returns log_start of first sh in stripe_in_journal_list 1123 */ 1124 static sector_t r5c_calculate_new_cp(struct r5conf *conf) 1125 { 1126 struct stripe_head *sh; 1127 struct r5l_log *log = conf->log; 1128 sector_t new_cp; 1129 unsigned long flags; 1130 1131 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) 1132 return log->next_checkpoint; 1133 1134 spin_lock_irqsave(&log->stripe_in_journal_lock, flags); 1135 if (list_empty(&conf->log->stripe_in_journal_list)) { 1136 /* all stripes flushed */ 1137 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags); 1138 return log->next_checkpoint; 1139 } 1140 sh = list_first_entry(&conf->log->stripe_in_journal_list, 1141 struct stripe_head, r5c); 1142 new_cp = sh->log_start; 1143 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags); 1144 return new_cp; 1145 } 1146 1147 static sector_t r5l_reclaimable_space(struct r5l_log *log) 1148 { 1149 struct r5conf *conf = log->rdev->mddev->private; 1150 1151 return r5l_ring_distance(log, log->last_checkpoint, 1152 r5c_calculate_new_cp(conf)); 1153 } 1154 1155 static void r5l_run_no_mem_stripe(struct r5l_log *log) 1156 { 1157 struct stripe_head *sh; 1158 1159 assert_spin_locked(&log->io_list_lock); 1160 1161 if (!list_empty(&log->no_mem_stripes)) { 1162 sh = list_first_entry(&log->no_mem_stripes, 1163 struct stripe_head, log_list); 1164 list_del_init(&sh->log_list); 1165 set_bit(STRIPE_HANDLE, &sh->state); 1166 raid5_release_stripe(sh); 1167 } 1168 } 1169 1170 static bool r5l_complete_finished_ios(struct r5l_log *log) 1171 { 1172 struct r5l_io_unit *io, *next; 1173 bool found = false; 1174 1175 assert_spin_locked(&log->io_list_lock); 1176 1177 list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) { 1178 /* don't change list order */ 1179 if (io->state < IO_UNIT_STRIPE_END) 1180 break; 1181 1182 log->next_checkpoint = io->log_start; 1183 1184 list_del(&io->log_sibling); 1185 mempool_free(io, log->io_pool); 1186 r5l_run_no_mem_stripe(log); 1187 1188 found = true; 1189 } 1190 1191 return found; 1192 } 1193 1194 static void __r5l_stripe_write_finished(struct r5l_io_unit *io) 1195 { 1196 struct r5l_log *log = io->log; 1197 struct r5conf *conf = log->rdev->mddev->private; 1198 unsigned long flags; 1199 1200 spin_lock_irqsave(&log->io_list_lock, flags); 1201 __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END); 1202 1203 if (!r5l_complete_finished_ios(log)) { 1204 spin_unlock_irqrestore(&log->io_list_lock, flags); 1205 return; 1206 } 1207 1208 if (r5l_reclaimable_space(log) > log->max_free_space || 1209 test_bit(R5C_LOG_TIGHT, &conf->cache_state)) 1210 r5l_wake_reclaim(log, 0); 1211 1212 spin_unlock_irqrestore(&log->io_list_lock, flags); 1213 wake_up(&log->iounit_wait); 1214 } 1215 1216 void r5l_stripe_write_finished(struct stripe_head *sh) 1217 { 1218 struct r5l_io_unit *io; 1219 1220 io = sh->log_io; 1221 sh->log_io = NULL; 1222 1223 if (io && atomic_dec_and_test(&io->pending_stripe)) 1224 __r5l_stripe_write_finished(io); 1225 } 1226 1227 static void r5l_log_flush_endio(struct bio *bio) 1228 { 1229 struct r5l_log *log = container_of(bio, struct r5l_log, 1230 flush_bio); 1231 unsigned long flags; 1232 struct r5l_io_unit *io; 1233 1234 if (bio->bi_error) 1235 md_error(log->rdev->mddev, log->rdev); 1236 1237 spin_lock_irqsave(&log->io_list_lock, flags); 1238 list_for_each_entry(io, &log->flushing_ios, log_sibling) 1239 r5l_io_run_stripes(io); 1240 list_splice_tail_init(&log->flushing_ios, &log->finished_ios); 1241 spin_unlock_irqrestore(&log->io_list_lock, flags); 1242 } 1243 1244 /* 1245 * Starting dispatch IO to raid. 1246 * io_unit(meta) consists of a log. There is one situation we want to avoid. A 1247 * broken meta in the middle of a log causes recovery can't find meta at the 1248 * head of log. If operations require meta at the head persistent in log, we 1249 * must make sure meta before it persistent in log too. A case is: 1250 * 1251 * stripe data/parity is in log, we start write stripe to raid disks. stripe 1252 * data/parity must be persistent in log before we do the write to raid disks. 1253 * 1254 * The solution is we restrictly maintain io_unit list order. In this case, we 1255 * only write stripes of an io_unit to raid disks till the io_unit is the first 1256 * one whose data/parity is in log. 1257 */ 1258 void r5l_flush_stripe_to_raid(struct r5l_log *log) 1259 { 1260 bool do_flush; 1261 1262 if (!log || !log->need_cache_flush) 1263 return; 1264 1265 spin_lock_irq(&log->io_list_lock); 1266 /* flush bio is running */ 1267 if (!list_empty(&log->flushing_ios)) { 1268 spin_unlock_irq(&log->io_list_lock); 1269 return; 1270 } 1271 list_splice_tail_init(&log->io_end_ios, &log->flushing_ios); 1272 do_flush = !list_empty(&log->flushing_ios); 1273 spin_unlock_irq(&log->io_list_lock); 1274 1275 if (!do_flush) 1276 return; 1277 bio_reset(&log->flush_bio); 1278 log->flush_bio.bi_bdev = log->rdev->bdev; 1279 log->flush_bio.bi_end_io = r5l_log_flush_endio; 1280 log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 1281 submit_bio(&log->flush_bio); 1282 } 1283 1284 static void r5l_write_super(struct r5l_log *log, sector_t cp); 1285 static void r5l_write_super_and_discard_space(struct r5l_log *log, 1286 sector_t end) 1287 { 1288 struct block_device *bdev = log->rdev->bdev; 1289 struct mddev *mddev; 1290 1291 r5l_write_super(log, end); 1292 1293 if (!blk_queue_discard(bdev_get_queue(bdev))) 1294 return; 1295 1296 mddev = log->rdev->mddev; 1297 /* 1298 * Discard could zero data, so before discard we must make sure 1299 * superblock is updated to new log tail. Updating superblock (either 1300 * directly call md_update_sb() or depend on md thread) must hold 1301 * reconfig mutex. On the other hand, raid5_quiesce is called with 1302 * reconfig_mutex hold. The first step of raid5_quiesce() is waitting 1303 * for all IO finish, hence waitting for reclaim thread, while reclaim 1304 * thread is calling this function and waitting for reconfig mutex. So 1305 * there is a deadlock. We workaround this issue with a trylock. 1306 * FIXME: we could miss discard if we can't take reconfig mutex 1307 */ 1308 set_mask_bits(&mddev->sb_flags, 0, 1309 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); 1310 if (!mddev_trylock(mddev)) 1311 return; 1312 md_update_sb(mddev, 1); 1313 mddev_unlock(mddev); 1314 1315 /* discard IO error really doesn't matter, ignore it */ 1316 if (log->last_checkpoint < end) { 1317 blkdev_issue_discard(bdev, 1318 log->last_checkpoint + log->rdev->data_offset, 1319 end - log->last_checkpoint, GFP_NOIO, 0); 1320 } else { 1321 blkdev_issue_discard(bdev, 1322 log->last_checkpoint + log->rdev->data_offset, 1323 log->device_size - log->last_checkpoint, 1324 GFP_NOIO, 0); 1325 blkdev_issue_discard(bdev, log->rdev->data_offset, end, 1326 GFP_NOIO, 0); 1327 } 1328 } 1329 1330 /* 1331 * r5c_flush_stripe moves stripe from cached list to handle_list. When called, 1332 * the stripe must be on r5c_cached_full_stripes or r5c_cached_partial_stripes. 1333 * 1334 * must hold conf->device_lock 1335 */ 1336 static void r5c_flush_stripe(struct r5conf *conf, struct stripe_head *sh) 1337 { 1338 BUG_ON(list_empty(&sh->lru)); 1339 BUG_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state)); 1340 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); 1341 1342 /* 1343 * The stripe is not ON_RELEASE_LIST, so it is safe to call 1344 * raid5_release_stripe() while holding conf->device_lock 1345 */ 1346 BUG_ON(test_bit(STRIPE_ON_RELEASE_LIST, &sh->state)); 1347 assert_spin_locked(&conf->device_lock); 1348 1349 list_del_init(&sh->lru); 1350 atomic_inc(&sh->count); 1351 1352 set_bit(STRIPE_HANDLE, &sh->state); 1353 atomic_inc(&conf->active_stripes); 1354 r5c_make_stripe_write_out(sh); 1355 1356 if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) 1357 atomic_inc(&conf->r5c_flushing_partial_stripes); 1358 else 1359 atomic_inc(&conf->r5c_flushing_full_stripes); 1360 raid5_release_stripe(sh); 1361 } 1362 1363 /* 1364 * if num == 0, flush all full stripes 1365 * if num > 0, flush all full stripes. If less than num full stripes are 1366 * flushed, flush some partial stripes until totally num stripes are 1367 * flushed or there is no more cached stripes. 1368 */ 1369 void r5c_flush_cache(struct r5conf *conf, int num) 1370 { 1371 int count; 1372 struct stripe_head *sh, *next; 1373 1374 assert_spin_locked(&conf->device_lock); 1375 if (!conf->log) 1376 return; 1377 1378 count = 0; 1379 list_for_each_entry_safe(sh, next, &conf->r5c_full_stripe_list, lru) { 1380 r5c_flush_stripe(conf, sh); 1381 count++; 1382 } 1383 1384 if (count >= num) 1385 return; 1386 list_for_each_entry_safe(sh, next, 1387 &conf->r5c_partial_stripe_list, lru) { 1388 r5c_flush_stripe(conf, sh); 1389 if (++count >= num) 1390 break; 1391 } 1392 } 1393 1394 static void r5c_do_reclaim(struct r5conf *conf) 1395 { 1396 struct r5l_log *log = conf->log; 1397 struct stripe_head *sh; 1398 int count = 0; 1399 unsigned long flags; 1400 int total_cached; 1401 int stripes_to_flush; 1402 int flushing_partial, flushing_full; 1403 1404 if (!r5c_is_writeback(log)) 1405 return; 1406 1407 flushing_partial = atomic_read(&conf->r5c_flushing_partial_stripes); 1408 flushing_full = atomic_read(&conf->r5c_flushing_full_stripes); 1409 total_cached = atomic_read(&conf->r5c_cached_partial_stripes) + 1410 atomic_read(&conf->r5c_cached_full_stripes) - 1411 flushing_full - flushing_partial; 1412 1413 if (total_cached > conf->min_nr_stripes * 3 / 4 || 1414 atomic_read(&conf->empty_inactive_list_nr) > 0) 1415 /* 1416 * if stripe cache pressure high, flush all full stripes and 1417 * some partial stripes 1418 */ 1419 stripes_to_flush = R5C_RECLAIM_STRIPE_GROUP; 1420 else if (total_cached > conf->min_nr_stripes * 1 / 2 || 1421 atomic_read(&conf->r5c_cached_full_stripes) - flushing_full > 1422 R5C_FULL_STRIPE_FLUSH_BATCH(conf)) 1423 /* 1424 * if stripe cache pressure moderate, or if there is many full 1425 * stripes,flush all full stripes 1426 */ 1427 stripes_to_flush = 0; 1428 else 1429 /* no need to flush */ 1430 stripes_to_flush = -1; 1431 1432 if (stripes_to_flush >= 0) { 1433 spin_lock_irqsave(&conf->device_lock, flags); 1434 r5c_flush_cache(conf, stripes_to_flush); 1435 spin_unlock_irqrestore(&conf->device_lock, flags); 1436 } 1437 1438 /* if log space is tight, flush stripes on stripe_in_journal_list */ 1439 if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) { 1440 spin_lock_irqsave(&log->stripe_in_journal_lock, flags); 1441 spin_lock(&conf->device_lock); 1442 list_for_each_entry(sh, &log->stripe_in_journal_list, r5c) { 1443 /* 1444 * stripes on stripe_in_journal_list could be in any 1445 * state of the stripe_cache state machine. In this 1446 * case, we only want to flush stripe on 1447 * r5c_cached_full/partial_stripes. The following 1448 * condition makes sure the stripe is on one of the 1449 * two lists. 1450 */ 1451 if (!list_empty(&sh->lru) && 1452 !test_bit(STRIPE_HANDLE, &sh->state) && 1453 atomic_read(&sh->count) == 0) { 1454 r5c_flush_stripe(conf, sh); 1455 if (count++ >= R5C_RECLAIM_STRIPE_GROUP) 1456 break; 1457 } 1458 } 1459 spin_unlock(&conf->device_lock); 1460 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags); 1461 } 1462 1463 if (!test_bit(R5C_LOG_CRITICAL, &conf->cache_state)) 1464 r5l_run_no_space_stripes(log); 1465 1466 md_wakeup_thread(conf->mddev->thread); 1467 } 1468 1469 static void r5l_do_reclaim(struct r5l_log *log) 1470 { 1471 struct r5conf *conf = log->rdev->mddev->private; 1472 sector_t reclaim_target = xchg(&log->reclaim_target, 0); 1473 sector_t reclaimable; 1474 sector_t next_checkpoint; 1475 bool write_super; 1476 1477 spin_lock_irq(&log->io_list_lock); 1478 write_super = r5l_reclaimable_space(log) > log->max_free_space || 1479 reclaim_target != 0 || !list_empty(&log->no_space_stripes); 1480 /* 1481 * move proper io_unit to reclaim list. We should not change the order. 1482 * reclaimable/unreclaimable io_unit can be mixed in the list, we 1483 * shouldn't reuse space of an unreclaimable io_unit 1484 */ 1485 while (1) { 1486 reclaimable = r5l_reclaimable_space(log); 1487 if (reclaimable >= reclaim_target || 1488 (list_empty(&log->running_ios) && 1489 list_empty(&log->io_end_ios) && 1490 list_empty(&log->flushing_ios) && 1491 list_empty(&log->finished_ios))) 1492 break; 1493 1494 md_wakeup_thread(log->rdev->mddev->thread); 1495 wait_event_lock_irq(log->iounit_wait, 1496 r5l_reclaimable_space(log) > reclaimable, 1497 log->io_list_lock); 1498 } 1499 1500 next_checkpoint = r5c_calculate_new_cp(conf); 1501 spin_unlock_irq(&log->io_list_lock); 1502 1503 if (reclaimable == 0 || !write_super) 1504 return; 1505 1506 /* 1507 * write_super will flush cache of each raid disk. We must write super 1508 * here, because the log area might be reused soon and we don't want to 1509 * confuse recovery 1510 */ 1511 r5l_write_super_and_discard_space(log, next_checkpoint); 1512 1513 mutex_lock(&log->io_mutex); 1514 log->last_checkpoint = next_checkpoint; 1515 r5c_update_log_state(log); 1516 mutex_unlock(&log->io_mutex); 1517 1518 r5l_run_no_space_stripes(log); 1519 } 1520 1521 static void r5l_reclaim_thread(struct md_thread *thread) 1522 { 1523 struct mddev *mddev = thread->mddev; 1524 struct r5conf *conf = mddev->private; 1525 struct r5l_log *log = conf->log; 1526 1527 if (!log) 1528 return; 1529 r5c_do_reclaim(conf); 1530 r5l_do_reclaim(log); 1531 } 1532 1533 void r5l_wake_reclaim(struct r5l_log *log, sector_t space) 1534 { 1535 unsigned long target; 1536 unsigned long new = (unsigned long)space; /* overflow in theory */ 1537 1538 if (!log) 1539 return; 1540 do { 1541 target = log->reclaim_target; 1542 if (new < target) 1543 return; 1544 } while (cmpxchg(&log->reclaim_target, target, new) != target); 1545 md_wakeup_thread(log->reclaim_thread); 1546 } 1547 1548 void r5l_quiesce(struct r5l_log *log, int state) 1549 { 1550 struct mddev *mddev; 1551 if (!log || state == 2) 1552 return; 1553 if (state == 0) 1554 kthread_unpark(log->reclaim_thread->tsk); 1555 else if (state == 1) { 1556 /* make sure r5l_write_super_and_discard_space exits */ 1557 mddev = log->rdev->mddev; 1558 wake_up(&mddev->sb_wait); 1559 kthread_park(log->reclaim_thread->tsk); 1560 r5l_wake_reclaim(log, MaxSector); 1561 r5l_do_reclaim(log); 1562 } 1563 } 1564 1565 bool r5l_log_disk_error(struct r5conf *conf) 1566 { 1567 struct r5l_log *log; 1568 bool ret; 1569 /* don't allow write if journal disk is missing */ 1570 rcu_read_lock(); 1571 log = rcu_dereference(conf->log); 1572 1573 if (!log) 1574 ret = test_bit(MD_HAS_JOURNAL, &conf->mddev->flags); 1575 else 1576 ret = test_bit(Faulty, &log->rdev->flags); 1577 rcu_read_unlock(); 1578 return ret; 1579 } 1580 1581 #define R5L_RECOVERY_PAGE_POOL_SIZE 256 1582 1583 struct r5l_recovery_ctx { 1584 struct page *meta_page; /* current meta */ 1585 sector_t meta_total_blocks; /* total size of current meta and data */ 1586 sector_t pos; /* recovery position */ 1587 u64 seq; /* recovery position seq */ 1588 int data_parity_stripes; /* number of data_parity stripes */ 1589 int data_only_stripes; /* number of data_only stripes */ 1590 struct list_head cached_list; 1591 1592 /* 1593 * read ahead page pool (ra_pool) 1594 * in recovery, log is read sequentially. It is not efficient to 1595 * read every page with sync_page_io(). The read ahead page pool 1596 * reads multiple pages with one IO, so further log read can 1597 * just copy data from the pool. 1598 */ 1599 struct page *ra_pool[R5L_RECOVERY_PAGE_POOL_SIZE]; 1600 sector_t pool_offset; /* offset of first page in the pool */ 1601 int total_pages; /* total allocated pages */ 1602 int valid_pages; /* pages with valid data */ 1603 struct bio *ra_bio; /* bio to do the read ahead */ 1604 }; 1605 1606 static int r5l_recovery_allocate_ra_pool(struct r5l_log *log, 1607 struct r5l_recovery_ctx *ctx) 1608 { 1609 struct page *page; 1610 1611 ctx->ra_bio = bio_alloc_bioset(GFP_KERNEL, BIO_MAX_PAGES, log->bs); 1612 if (!ctx->ra_bio) 1613 return -ENOMEM; 1614 1615 ctx->valid_pages = 0; 1616 ctx->total_pages = 0; 1617 while (ctx->total_pages < R5L_RECOVERY_PAGE_POOL_SIZE) { 1618 page = alloc_page(GFP_KERNEL); 1619 1620 if (!page) 1621 break; 1622 ctx->ra_pool[ctx->total_pages] = page; 1623 ctx->total_pages += 1; 1624 } 1625 1626 if (ctx->total_pages == 0) { 1627 bio_put(ctx->ra_bio); 1628 return -ENOMEM; 1629 } 1630 1631 ctx->pool_offset = 0; 1632 return 0; 1633 } 1634 1635 static void r5l_recovery_free_ra_pool(struct r5l_log *log, 1636 struct r5l_recovery_ctx *ctx) 1637 { 1638 int i; 1639 1640 for (i = 0; i < ctx->total_pages; ++i) 1641 put_page(ctx->ra_pool[i]); 1642 bio_put(ctx->ra_bio); 1643 } 1644 1645 /* 1646 * fetch ctx->valid_pages pages from offset 1647 * In normal cases, ctx->valid_pages == ctx->total_pages after the call. 1648 * However, if the offset is close to the end of the journal device, 1649 * ctx->valid_pages could be smaller than ctx->total_pages 1650 */ 1651 static int r5l_recovery_fetch_ra_pool(struct r5l_log *log, 1652 struct r5l_recovery_ctx *ctx, 1653 sector_t offset) 1654 { 1655 bio_reset(ctx->ra_bio); 1656 ctx->ra_bio->bi_bdev = log->rdev->bdev; 1657 bio_set_op_attrs(ctx->ra_bio, REQ_OP_READ, 0); 1658 ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset; 1659 1660 ctx->valid_pages = 0; 1661 ctx->pool_offset = offset; 1662 1663 while (ctx->valid_pages < ctx->total_pages) { 1664 bio_add_page(ctx->ra_bio, 1665 ctx->ra_pool[ctx->valid_pages], PAGE_SIZE, 0); 1666 ctx->valid_pages += 1; 1667 1668 offset = r5l_ring_add(log, offset, BLOCK_SECTORS); 1669 1670 if (offset == 0) /* reached end of the device */ 1671 break; 1672 } 1673 1674 return submit_bio_wait(ctx->ra_bio); 1675 } 1676 1677 /* 1678 * try read a page from the read ahead page pool, if the page is not in the 1679 * pool, call r5l_recovery_fetch_ra_pool 1680 */ 1681 static int r5l_recovery_read_page(struct r5l_log *log, 1682 struct r5l_recovery_ctx *ctx, 1683 struct page *page, 1684 sector_t offset) 1685 { 1686 int ret; 1687 1688 if (offset < ctx->pool_offset || 1689 offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS) { 1690 ret = r5l_recovery_fetch_ra_pool(log, ctx, offset); 1691 if (ret) 1692 return ret; 1693 } 1694 1695 BUG_ON(offset < ctx->pool_offset || 1696 offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS); 1697 1698 memcpy(page_address(page), 1699 page_address(ctx->ra_pool[(offset - ctx->pool_offset) >> 1700 BLOCK_SECTOR_SHIFT]), 1701 PAGE_SIZE); 1702 return 0; 1703 } 1704 1705 static int r5l_recovery_read_meta_block(struct r5l_log *log, 1706 struct r5l_recovery_ctx *ctx) 1707 { 1708 struct page *page = ctx->meta_page; 1709 struct r5l_meta_block *mb; 1710 u32 crc, stored_crc; 1711 int ret; 1712 1713 ret = r5l_recovery_read_page(log, ctx, page, ctx->pos); 1714 if (ret != 0) 1715 return ret; 1716 1717 mb = page_address(page); 1718 stored_crc = le32_to_cpu(mb->checksum); 1719 mb->checksum = 0; 1720 1721 if (le32_to_cpu(mb->magic) != R5LOG_MAGIC || 1722 le64_to_cpu(mb->seq) != ctx->seq || 1723 mb->version != R5LOG_VERSION || 1724 le64_to_cpu(mb->position) != ctx->pos) 1725 return -EINVAL; 1726 1727 crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); 1728 if (stored_crc != crc) 1729 return -EINVAL; 1730 1731 if (le32_to_cpu(mb->meta_size) > PAGE_SIZE) 1732 return -EINVAL; 1733 1734 ctx->meta_total_blocks = BLOCK_SECTORS; 1735 1736 return 0; 1737 } 1738 1739 static void 1740 r5l_recovery_create_empty_meta_block(struct r5l_log *log, 1741 struct page *page, 1742 sector_t pos, u64 seq) 1743 { 1744 struct r5l_meta_block *mb; 1745 1746 mb = page_address(page); 1747 clear_page(mb); 1748 mb->magic = cpu_to_le32(R5LOG_MAGIC); 1749 mb->version = R5LOG_VERSION; 1750 mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block)); 1751 mb->seq = cpu_to_le64(seq); 1752 mb->position = cpu_to_le64(pos); 1753 } 1754 1755 static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos, 1756 u64 seq) 1757 { 1758 struct page *page; 1759 struct r5l_meta_block *mb; 1760 1761 page = alloc_page(GFP_KERNEL); 1762 if (!page) 1763 return -ENOMEM; 1764 r5l_recovery_create_empty_meta_block(log, page, pos, seq); 1765 mb = page_address(page); 1766 mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, 1767 mb, PAGE_SIZE)); 1768 if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE, 1769 REQ_FUA, false)) { 1770 __free_page(page); 1771 return -EIO; 1772 } 1773 __free_page(page); 1774 return 0; 1775 } 1776 1777 /* 1778 * r5l_recovery_load_data and r5l_recovery_load_parity uses flag R5_Wantwrite 1779 * to mark valid (potentially not flushed) data in the journal. 1780 * 1781 * We already verified checksum in r5l_recovery_verify_data_checksum_for_mb, 1782 * so there should not be any mismatch here. 1783 */ 1784 static void r5l_recovery_load_data(struct r5l_log *log, 1785 struct stripe_head *sh, 1786 struct r5l_recovery_ctx *ctx, 1787 struct r5l_payload_data_parity *payload, 1788 sector_t log_offset) 1789 { 1790 struct mddev *mddev = log->rdev->mddev; 1791 struct r5conf *conf = mddev->private; 1792 int dd_idx; 1793 1794 raid5_compute_sector(conf, 1795 le64_to_cpu(payload->location), 0, 1796 &dd_idx, sh); 1797 r5l_recovery_read_page(log, ctx, sh->dev[dd_idx].page, log_offset); 1798 sh->dev[dd_idx].log_checksum = 1799 le32_to_cpu(payload->checksum[0]); 1800 ctx->meta_total_blocks += BLOCK_SECTORS; 1801 1802 set_bit(R5_Wantwrite, &sh->dev[dd_idx].flags); 1803 set_bit(STRIPE_R5C_CACHING, &sh->state); 1804 } 1805 1806 static void r5l_recovery_load_parity(struct r5l_log *log, 1807 struct stripe_head *sh, 1808 struct r5l_recovery_ctx *ctx, 1809 struct r5l_payload_data_parity *payload, 1810 sector_t log_offset) 1811 { 1812 struct mddev *mddev = log->rdev->mddev; 1813 struct r5conf *conf = mddev->private; 1814 1815 ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded; 1816 r5l_recovery_read_page(log, ctx, sh->dev[sh->pd_idx].page, log_offset); 1817 sh->dev[sh->pd_idx].log_checksum = 1818 le32_to_cpu(payload->checksum[0]); 1819 set_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags); 1820 1821 if (sh->qd_idx >= 0) { 1822 r5l_recovery_read_page( 1823 log, ctx, sh->dev[sh->qd_idx].page, 1824 r5l_ring_add(log, log_offset, BLOCK_SECTORS)); 1825 sh->dev[sh->qd_idx].log_checksum = 1826 le32_to_cpu(payload->checksum[1]); 1827 set_bit(R5_Wantwrite, &sh->dev[sh->qd_idx].flags); 1828 } 1829 clear_bit(STRIPE_R5C_CACHING, &sh->state); 1830 } 1831 1832 static void r5l_recovery_reset_stripe(struct stripe_head *sh) 1833 { 1834 int i; 1835 1836 sh->state = 0; 1837 sh->log_start = MaxSector; 1838 for (i = sh->disks; i--; ) 1839 sh->dev[i].flags = 0; 1840 } 1841 1842 static void 1843 r5l_recovery_replay_one_stripe(struct r5conf *conf, 1844 struct stripe_head *sh, 1845 struct r5l_recovery_ctx *ctx) 1846 { 1847 struct md_rdev *rdev, *rrdev; 1848 int disk_index; 1849 int data_count = 0; 1850 1851 for (disk_index = 0; disk_index < sh->disks; disk_index++) { 1852 if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags)) 1853 continue; 1854 if (disk_index == sh->qd_idx || disk_index == sh->pd_idx) 1855 continue; 1856 data_count++; 1857 } 1858 1859 /* 1860 * stripes that only have parity must have been flushed 1861 * before the crash that we are now recovering from, so 1862 * there is nothing more to recovery. 1863 */ 1864 if (data_count == 0) 1865 goto out; 1866 1867 for (disk_index = 0; disk_index < sh->disks; disk_index++) { 1868 if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags)) 1869 continue; 1870 1871 /* in case device is broken */ 1872 rcu_read_lock(); 1873 rdev = rcu_dereference(conf->disks[disk_index].rdev); 1874 if (rdev) { 1875 atomic_inc(&rdev->nr_pending); 1876 rcu_read_unlock(); 1877 sync_page_io(rdev, sh->sector, PAGE_SIZE, 1878 sh->dev[disk_index].page, REQ_OP_WRITE, 0, 1879 false); 1880 rdev_dec_pending(rdev, rdev->mddev); 1881 rcu_read_lock(); 1882 } 1883 rrdev = rcu_dereference(conf->disks[disk_index].replacement); 1884 if (rrdev) { 1885 atomic_inc(&rrdev->nr_pending); 1886 rcu_read_unlock(); 1887 sync_page_io(rrdev, sh->sector, PAGE_SIZE, 1888 sh->dev[disk_index].page, REQ_OP_WRITE, 0, 1889 false); 1890 rdev_dec_pending(rrdev, rrdev->mddev); 1891 rcu_read_lock(); 1892 } 1893 rcu_read_unlock(); 1894 } 1895 ctx->data_parity_stripes++; 1896 out: 1897 r5l_recovery_reset_stripe(sh); 1898 } 1899 1900 static struct stripe_head * 1901 r5c_recovery_alloc_stripe(struct r5conf *conf, 1902 sector_t stripe_sect) 1903 { 1904 struct stripe_head *sh; 1905 1906 sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0); 1907 if (!sh) 1908 return NULL; /* no more stripe available */ 1909 1910 r5l_recovery_reset_stripe(sh); 1911 1912 return sh; 1913 } 1914 1915 static struct stripe_head * 1916 r5c_recovery_lookup_stripe(struct list_head *list, sector_t sect) 1917 { 1918 struct stripe_head *sh; 1919 1920 list_for_each_entry(sh, list, lru) 1921 if (sh->sector == sect) 1922 return sh; 1923 return NULL; 1924 } 1925 1926 static void 1927 r5c_recovery_drop_stripes(struct list_head *cached_stripe_list, 1928 struct r5l_recovery_ctx *ctx) 1929 { 1930 struct stripe_head *sh, *next; 1931 1932 list_for_each_entry_safe(sh, next, cached_stripe_list, lru) { 1933 r5l_recovery_reset_stripe(sh); 1934 list_del_init(&sh->lru); 1935 raid5_release_stripe(sh); 1936 } 1937 } 1938 1939 static void 1940 r5c_recovery_replay_stripes(struct list_head *cached_stripe_list, 1941 struct r5l_recovery_ctx *ctx) 1942 { 1943 struct stripe_head *sh, *next; 1944 1945 list_for_each_entry_safe(sh, next, cached_stripe_list, lru) 1946 if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) { 1947 r5l_recovery_replay_one_stripe(sh->raid_conf, sh, ctx); 1948 list_del_init(&sh->lru); 1949 raid5_release_stripe(sh); 1950 } 1951 } 1952 1953 /* if matches return 0; otherwise return -EINVAL */ 1954 static int 1955 r5l_recovery_verify_data_checksum(struct r5l_log *log, 1956 struct r5l_recovery_ctx *ctx, 1957 struct page *page, 1958 sector_t log_offset, __le32 log_checksum) 1959 { 1960 void *addr; 1961 u32 checksum; 1962 1963 r5l_recovery_read_page(log, ctx, page, log_offset); 1964 addr = kmap_atomic(page); 1965 checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE); 1966 kunmap_atomic(addr); 1967 return (le32_to_cpu(log_checksum) == checksum) ? 0 : -EINVAL; 1968 } 1969 1970 /* 1971 * before loading data to stripe cache, we need verify checksum for all data, 1972 * if there is mismatch for any data page, we drop all data in the mata block 1973 */ 1974 static int 1975 r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log, 1976 struct r5l_recovery_ctx *ctx) 1977 { 1978 struct mddev *mddev = log->rdev->mddev; 1979 struct r5conf *conf = mddev->private; 1980 struct r5l_meta_block *mb = page_address(ctx->meta_page); 1981 sector_t mb_offset = sizeof(struct r5l_meta_block); 1982 sector_t log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); 1983 struct page *page; 1984 struct r5l_payload_data_parity *payload; 1985 struct r5l_payload_flush *payload_flush; 1986 1987 page = alloc_page(GFP_KERNEL); 1988 if (!page) 1989 return -ENOMEM; 1990 1991 while (mb_offset < le32_to_cpu(mb->meta_size)) { 1992 payload = (void *)mb + mb_offset; 1993 payload_flush = (void *)mb + mb_offset; 1994 1995 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) { 1996 if (r5l_recovery_verify_data_checksum( 1997 log, ctx, page, log_offset, 1998 payload->checksum[0]) < 0) 1999 goto mismatch; 2000 } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY) { 2001 if (r5l_recovery_verify_data_checksum( 2002 log, ctx, page, log_offset, 2003 payload->checksum[0]) < 0) 2004 goto mismatch; 2005 if (conf->max_degraded == 2 && /* q for RAID 6 */ 2006 r5l_recovery_verify_data_checksum( 2007 log, ctx, page, 2008 r5l_ring_add(log, log_offset, 2009 BLOCK_SECTORS), 2010 payload->checksum[1]) < 0) 2011 goto mismatch; 2012 } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) { 2013 /* nothing to do for R5LOG_PAYLOAD_FLUSH here */ 2014 } else /* not R5LOG_PAYLOAD_DATA/PARITY/FLUSH */ 2015 goto mismatch; 2016 2017 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) { 2018 mb_offset += sizeof(struct r5l_payload_flush) + 2019 le32_to_cpu(payload_flush->size); 2020 } else { 2021 /* DATA or PARITY payload */ 2022 log_offset = r5l_ring_add(log, log_offset, 2023 le32_to_cpu(payload->size)); 2024 mb_offset += sizeof(struct r5l_payload_data_parity) + 2025 sizeof(__le32) * 2026 (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9)); 2027 } 2028 2029 } 2030 2031 put_page(page); 2032 return 0; 2033 2034 mismatch: 2035 put_page(page); 2036 return -EINVAL; 2037 } 2038 2039 /* 2040 * Analyze all data/parity pages in one meta block 2041 * Returns: 2042 * 0 for success 2043 * -EINVAL for unknown playload type 2044 * -EAGAIN for checksum mismatch of data page 2045 * -ENOMEM for run out of memory (alloc_page failed or run out of stripes) 2046 */ 2047 static int 2048 r5c_recovery_analyze_meta_block(struct r5l_log *log, 2049 struct r5l_recovery_ctx *ctx, 2050 struct list_head *cached_stripe_list) 2051 { 2052 struct mddev *mddev = log->rdev->mddev; 2053 struct r5conf *conf = mddev->private; 2054 struct r5l_meta_block *mb; 2055 struct r5l_payload_data_parity *payload; 2056 struct r5l_payload_flush *payload_flush; 2057 int mb_offset; 2058 sector_t log_offset; 2059 sector_t stripe_sect; 2060 struct stripe_head *sh; 2061 int ret; 2062 2063 /* 2064 * for mismatch in data blocks, we will drop all data in this mb, but 2065 * we will still read next mb for other data with FLUSH flag, as 2066 * io_unit could finish out of order. 2067 */ 2068 ret = r5l_recovery_verify_data_checksum_for_mb(log, ctx); 2069 if (ret == -EINVAL) 2070 return -EAGAIN; 2071 else if (ret) 2072 return ret; /* -ENOMEM duo to alloc_page() failed */ 2073 2074 mb = page_address(ctx->meta_page); 2075 mb_offset = sizeof(struct r5l_meta_block); 2076 log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); 2077 2078 while (mb_offset < le32_to_cpu(mb->meta_size)) { 2079 int dd; 2080 2081 payload = (void *)mb + mb_offset; 2082 payload_flush = (void *)mb + mb_offset; 2083 2084 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) { 2085 int i, count; 2086 2087 count = le32_to_cpu(payload_flush->size) / sizeof(__le64); 2088 for (i = 0; i < count; ++i) { 2089 stripe_sect = le64_to_cpu(payload_flush->flush_stripes[i]); 2090 sh = r5c_recovery_lookup_stripe(cached_stripe_list, 2091 stripe_sect); 2092 if (sh) { 2093 WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state)); 2094 r5l_recovery_reset_stripe(sh); 2095 list_del_init(&sh->lru); 2096 raid5_release_stripe(sh); 2097 } 2098 } 2099 2100 mb_offset += sizeof(struct r5l_payload_flush) + 2101 le32_to_cpu(payload_flush->size); 2102 continue; 2103 } 2104 2105 /* DATA or PARITY payload */ 2106 stripe_sect = (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) ? 2107 raid5_compute_sector( 2108 conf, le64_to_cpu(payload->location), 0, &dd, 2109 NULL) 2110 : le64_to_cpu(payload->location); 2111 2112 sh = r5c_recovery_lookup_stripe(cached_stripe_list, 2113 stripe_sect); 2114 2115 if (!sh) { 2116 sh = r5c_recovery_alloc_stripe(conf, stripe_sect); 2117 /* 2118 * cannot get stripe from raid5_get_active_stripe 2119 * try replay some stripes 2120 */ 2121 if (!sh) { 2122 r5c_recovery_replay_stripes( 2123 cached_stripe_list, ctx); 2124 sh = r5c_recovery_alloc_stripe( 2125 conf, stripe_sect); 2126 } 2127 if (!sh) { 2128 pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n", 2129 mdname(mddev), 2130 conf->min_nr_stripes * 2); 2131 raid5_set_cache_size(mddev, 2132 conf->min_nr_stripes * 2); 2133 sh = r5c_recovery_alloc_stripe(conf, 2134 stripe_sect); 2135 } 2136 if (!sh) { 2137 pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n", 2138 mdname(mddev)); 2139 return -ENOMEM; 2140 } 2141 list_add_tail(&sh->lru, cached_stripe_list); 2142 } 2143 2144 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) { 2145 if (!test_bit(STRIPE_R5C_CACHING, &sh->state) && 2146 test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) { 2147 r5l_recovery_replay_one_stripe(conf, sh, ctx); 2148 list_move_tail(&sh->lru, cached_stripe_list); 2149 } 2150 r5l_recovery_load_data(log, sh, ctx, payload, 2151 log_offset); 2152 } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY) 2153 r5l_recovery_load_parity(log, sh, ctx, payload, 2154 log_offset); 2155 else 2156 return -EINVAL; 2157 2158 log_offset = r5l_ring_add(log, log_offset, 2159 le32_to_cpu(payload->size)); 2160 2161 mb_offset += sizeof(struct r5l_payload_data_parity) + 2162 sizeof(__le32) * 2163 (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9)); 2164 } 2165 2166 return 0; 2167 } 2168 2169 /* 2170 * Load the stripe into cache. The stripe will be written out later by 2171 * the stripe cache state machine. 2172 */ 2173 static void r5c_recovery_load_one_stripe(struct r5l_log *log, 2174 struct stripe_head *sh) 2175 { 2176 struct r5dev *dev; 2177 int i; 2178 2179 for (i = sh->disks; i--; ) { 2180 dev = sh->dev + i; 2181 if (test_and_clear_bit(R5_Wantwrite, &dev->flags)) { 2182 set_bit(R5_InJournal, &dev->flags); 2183 set_bit(R5_UPTODATE, &dev->flags); 2184 } 2185 } 2186 } 2187 2188 /* 2189 * Scan through the log for all to-be-flushed data 2190 * 2191 * For stripes with data and parity, namely Data-Parity stripe 2192 * (STRIPE_R5C_CACHING == 0), we simply replay all the writes. 2193 * 2194 * For stripes with only data, namely Data-Only stripe 2195 * (STRIPE_R5C_CACHING == 1), we load them to stripe cache state machine. 2196 * 2197 * For a stripe, if we see data after parity, we should discard all previous 2198 * data and parity for this stripe, as these data are already flushed to 2199 * the array. 2200 * 2201 * At the end of the scan, we return the new journal_tail, which points to 2202 * first data-only stripe on the journal device, or next invalid meta block. 2203 */ 2204 static int r5c_recovery_flush_log(struct r5l_log *log, 2205 struct r5l_recovery_ctx *ctx) 2206 { 2207 struct stripe_head *sh; 2208 int ret = 0; 2209 2210 /* scan through the log */ 2211 while (1) { 2212 if (r5l_recovery_read_meta_block(log, ctx)) 2213 break; 2214 2215 ret = r5c_recovery_analyze_meta_block(log, ctx, 2216 &ctx->cached_list); 2217 /* 2218 * -EAGAIN means mismatch in data block, in this case, we still 2219 * try scan the next metablock 2220 */ 2221 if (ret && ret != -EAGAIN) 2222 break; /* ret == -EINVAL or -ENOMEM */ 2223 ctx->seq++; 2224 ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks); 2225 } 2226 2227 if (ret == -ENOMEM) { 2228 r5c_recovery_drop_stripes(&ctx->cached_list, ctx); 2229 return ret; 2230 } 2231 2232 /* replay data-parity stripes */ 2233 r5c_recovery_replay_stripes(&ctx->cached_list, ctx); 2234 2235 /* load data-only stripes to stripe cache */ 2236 list_for_each_entry(sh, &ctx->cached_list, lru) { 2237 WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state)); 2238 r5c_recovery_load_one_stripe(log, sh); 2239 ctx->data_only_stripes++; 2240 } 2241 2242 return 0; 2243 } 2244 2245 /* 2246 * we did a recovery. Now ctx.pos points to an invalid meta block. New 2247 * log will start here. but we can't let superblock point to last valid 2248 * meta block. The log might looks like: 2249 * | meta 1| meta 2| meta 3| 2250 * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If 2251 * superblock points to meta 1, we write a new valid meta 2n. if crash 2252 * happens again, new recovery will start from meta 1. Since meta 2n is 2253 * valid now, recovery will think meta 3 is valid, which is wrong. 2254 * The solution is we create a new meta in meta2 with its seq == meta 2255 * 1's seq + 10000 and let superblock points to meta2. The same recovery 2256 * will not think meta 3 is a valid meta, because its seq doesn't match 2257 */ 2258 2259 /* 2260 * Before recovery, the log looks like the following 2261 * 2262 * --------------------------------------------- 2263 * | valid log | invalid log | 2264 * --------------------------------------------- 2265 * ^ 2266 * |- log->last_checkpoint 2267 * |- log->last_cp_seq 2268 * 2269 * Now we scan through the log until we see invalid entry 2270 * 2271 * --------------------------------------------- 2272 * | valid log | invalid log | 2273 * --------------------------------------------- 2274 * ^ ^ 2275 * |- log->last_checkpoint |- ctx->pos 2276 * |- log->last_cp_seq |- ctx->seq 2277 * 2278 * From this point, we need to increase seq number by 10 to avoid 2279 * confusing next recovery. 2280 * 2281 * --------------------------------------------- 2282 * | valid log | invalid log | 2283 * --------------------------------------------- 2284 * ^ ^ 2285 * |- log->last_checkpoint |- ctx->pos+1 2286 * |- log->last_cp_seq |- ctx->seq+10001 2287 * 2288 * However, it is not safe to start the state machine yet, because data only 2289 * parities are not yet secured in RAID. To save these data only parities, we 2290 * rewrite them from seq+11. 2291 * 2292 * ----------------------------------------------------------------- 2293 * | valid log | data only stripes | invalid log | 2294 * ----------------------------------------------------------------- 2295 * ^ ^ 2296 * |- log->last_checkpoint |- ctx->pos+n 2297 * |- log->last_cp_seq |- ctx->seq+10000+n 2298 * 2299 * If failure happens again during this process, the recovery can safe start 2300 * again from log->last_checkpoint. 2301 * 2302 * Once data only stripes are rewritten to journal, we move log_tail 2303 * 2304 * ----------------------------------------------------------------- 2305 * | old log | data only stripes | invalid log | 2306 * ----------------------------------------------------------------- 2307 * ^ ^ 2308 * |- log->last_checkpoint |- ctx->pos+n 2309 * |- log->last_cp_seq |- ctx->seq+10000+n 2310 * 2311 * Then we can safely start the state machine. If failure happens from this 2312 * point on, the recovery will start from new log->last_checkpoint. 2313 */ 2314 static int 2315 r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, 2316 struct r5l_recovery_ctx *ctx) 2317 { 2318 struct stripe_head *sh; 2319 struct mddev *mddev = log->rdev->mddev; 2320 struct page *page; 2321 sector_t next_checkpoint = MaxSector; 2322 2323 page = alloc_page(GFP_KERNEL); 2324 if (!page) { 2325 pr_err("md/raid:%s: cannot allocate memory to rewrite data only stripes\n", 2326 mdname(mddev)); 2327 return -ENOMEM; 2328 } 2329 2330 WARN_ON(list_empty(&ctx->cached_list)); 2331 2332 list_for_each_entry(sh, &ctx->cached_list, lru) { 2333 struct r5l_meta_block *mb; 2334 int i; 2335 int offset; 2336 sector_t write_pos; 2337 2338 WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state)); 2339 r5l_recovery_create_empty_meta_block(log, page, 2340 ctx->pos, ctx->seq); 2341 mb = page_address(page); 2342 offset = le32_to_cpu(mb->meta_size); 2343 write_pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); 2344 2345 for (i = sh->disks; i--; ) { 2346 struct r5dev *dev = &sh->dev[i]; 2347 struct r5l_payload_data_parity *payload; 2348 void *addr; 2349 2350 if (test_bit(R5_InJournal, &dev->flags)) { 2351 payload = (void *)mb + offset; 2352 payload->header.type = cpu_to_le16( 2353 R5LOG_PAYLOAD_DATA); 2354 payload->size = cpu_to_le32(BLOCK_SECTORS); 2355 payload->location = cpu_to_le64( 2356 raid5_compute_blocknr(sh, i, 0)); 2357 addr = kmap_atomic(dev->page); 2358 payload->checksum[0] = cpu_to_le32( 2359 crc32c_le(log->uuid_checksum, addr, 2360 PAGE_SIZE)); 2361 kunmap_atomic(addr); 2362 sync_page_io(log->rdev, write_pos, PAGE_SIZE, 2363 dev->page, REQ_OP_WRITE, 0, false); 2364 write_pos = r5l_ring_add(log, write_pos, 2365 BLOCK_SECTORS); 2366 offset += sizeof(__le32) + 2367 sizeof(struct r5l_payload_data_parity); 2368 2369 } 2370 } 2371 mb->meta_size = cpu_to_le32(offset); 2372 mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, 2373 mb, PAGE_SIZE)); 2374 sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, 2375 REQ_OP_WRITE, REQ_FUA, false); 2376 sh->log_start = ctx->pos; 2377 list_add_tail(&sh->r5c, &log->stripe_in_journal_list); 2378 atomic_inc(&log->stripe_in_journal_count); 2379 ctx->pos = write_pos; 2380 ctx->seq += 1; 2381 next_checkpoint = sh->log_start; 2382 } 2383 log->next_checkpoint = next_checkpoint; 2384 __free_page(page); 2385 return 0; 2386 } 2387 2388 static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log, 2389 struct r5l_recovery_ctx *ctx) 2390 { 2391 struct mddev *mddev = log->rdev->mddev; 2392 struct r5conf *conf = mddev->private; 2393 struct stripe_head *sh, *next; 2394 2395 if (ctx->data_only_stripes == 0) 2396 return; 2397 2398 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_BACK; 2399 2400 list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) { 2401 r5c_make_stripe_write_out(sh); 2402 set_bit(STRIPE_HANDLE, &sh->state); 2403 list_del_init(&sh->lru); 2404 raid5_release_stripe(sh); 2405 } 2406 2407 md_wakeup_thread(conf->mddev->thread); 2408 /* reuse conf->wait_for_quiescent in recovery */ 2409 wait_event(conf->wait_for_quiescent, 2410 atomic_read(&conf->active_stripes) == 0); 2411 2412 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; 2413 } 2414 2415 static int r5l_recovery_log(struct r5l_log *log) 2416 { 2417 struct mddev *mddev = log->rdev->mddev; 2418 struct r5l_recovery_ctx *ctx; 2419 int ret; 2420 sector_t pos; 2421 2422 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 2423 if (!ctx) 2424 return -ENOMEM; 2425 2426 ctx->pos = log->last_checkpoint; 2427 ctx->seq = log->last_cp_seq; 2428 INIT_LIST_HEAD(&ctx->cached_list); 2429 ctx->meta_page = alloc_page(GFP_KERNEL); 2430 2431 if (!ctx->meta_page) { 2432 ret = -ENOMEM; 2433 goto meta_page; 2434 } 2435 2436 if (r5l_recovery_allocate_ra_pool(log, ctx) != 0) { 2437 ret = -ENOMEM; 2438 goto ra_pool; 2439 } 2440 2441 ret = r5c_recovery_flush_log(log, ctx); 2442 2443 if (ret) 2444 goto error; 2445 2446 pos = ctx->pos; 2447 ctx->seq += 10000; 2448 2449 if ((ctx->data_only_stripes == 0) && (ctx->data_parity_stripes == 0)) 2450 pr_debug("md/raid:%s: starting from clean shutdown\n", 2451 mdname(mddev)); 2452 else 2453 pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n", 2454 mdname(mddev), ctx->data_only_stripes, 2455 ctx->data_parity_stripes); 2456 2457 if (ctx->data_only_stripes == 0) { 2458 log->next_checkpoint = ctx->pos; 2459 r5l_log_write_empty_meta_block(log, ctx->pos, ctx->seq++); 2460 ctx->pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); 2461 } else if (r5c_recovery_rewrite_data_only_stripes(log, ctx)) { 2462 pr_err("md/raid:%s: failed to rewrite stripes to journal\n", 2463 mdname(mddev)); 2464 ret = -EIO; 2465 goto error; 2466 } 2467 2468 log->log_start = ctx->pos; 2469 log->seq = ctx->seq; 2470 log->last_checkpoint = pos; 2471 r5l_write_super(log, pos); 2472 2473 r5c_recovery_flush_data_only_stripes(log, ctx); 2474 ret = 0; 2475 error: 2476 r5l_recovery_free_ra_pool(log, ctx); 2477 ra_pool: 2478 __free_page(ctx->meta_page); 2479 meta_page: 2480 kfree(ctx); 2481 return ret; 2482 } 2483 2484 static void r5l_write_super(struct r5l_log *log, sector_t cp) 2485 { 2486 struct mddev *mddev = log->rdev->mddev; 2487 2488 log->rdev->journal_tail = cp; 2489 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2490 } 2491 2492 static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page) 2493 { 2494 struct r5conf *conf = mddev->private; 2495 int ret; 2496 2497 if (!conf->log) 2498 return 0; 2499 2500 switch (conf->log->r5c_journal_mode) { 2501 case R5C_JOURNAL_MODE_WRITE_THROUGH: 2502 ret = snprintf( 2503 page, PAGE_SIZE, "[%s] %s\n", 2504 r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH], 2505 r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]); 2506 break; 2507 case R5C_JOURNAL_MODE_WRITE_BACK: 2508 ret = snprintf( 2509 page, PAGE_SIZE, "%s [%s]\n", 2510 r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH], 2511 r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]); 2512 break; 2513 default: 2514 ret = 0; 2515 } 2516 return ret; 2517 } 2518 2519 /* 2520 * Set journal cache mode on @mddev (external API initially needed by dm-raid). 2521 * 2522 * @mode as defined in 'enum r5c_journal_mode'. 2523 * 2524 */ 2525 int r5c_journal_mode_set(struct mddev *mddev, int mode) 2526 { 2527 struct r5conf *conf = mddev->private; 2528 struct r5l_log *log = conf->log; 2529 2530 if (!log) 2531 return -ENODEV; 2532 2533 if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH || 2534 mode > R5C_JOURNAL_MODE_WRITE_BACK) 2535 return -EINVAL; 2536 2537 if (raid5_calc_degraded(conf) > 0 && 2538 mode == R5C_JOURNAL_MODE_WRITE_BACK) 2539 return -EINVAL; 2540 2541 mddev_suspend(mddev); 2542 conf->log->r5c_journal_mode = mode; 2543 mddev_resume(mddev); 2544 2545 pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n", 2546 mdname(mddev), mode, r5c_journal_mode_str[mode]); 2547 return 0; 2548 } 2549 EXPORT_SYMBOL(r5c_journal_mode_set); 2550 2551 static ssize_t r5c_journal_mode_store(struct mddev *mddev, 2552 const char *page, size_t length) 2553 { 2554 int mode = ARRAY_SIZE(r5c_journal_mode_str); 2555 size_t len = length; 2556 2557 if (len < 2) 2558 return -EINVAL; 2559 2560 if (page[len - 1] == '\n') 2561 len--; 2562 2563 while (mode--) 2564 if (strlen(r5c_journal_mode_str[mode]) == len && 2565 !strncmp(page, r5c_journal_mode_str[mode], len)) 2566 break; 2567 2568 return r5c_journal_mode_set(mddev, mode) ?: length; 2569 } 2570 2571 struct md_sysfs_entry 2572 r5c_journal_mode = __ATTR(journal_mode, 0644, 2573 r5c_journal_mode_show, r5c_journal_mode_store); 2574 2575 /* 2576 * Try handle write operation in caching phase. This function should only 2577 * be called in write-back mode. 2578 * 2579 * If all outstanding writes can be handled in caching phase, returns 0 2580 * If writes requires write-out phase, call r5c_make_stripe_write_out() 2581 * and returns -EAGAIN 2582 */ 2583 int r5c_try_caching_write(struct r5conf *conf, 2584 struct stripe_head *sh, 2585 struct stripe_head_state *s, 2586 int disks) 2587 { 2588 struct r5l_log *log = conf->log; 2589 int i; 2590 struct r5dev *dev; 2591 int to_cache = 0; 2592 void **pslot; 2593 sector_t tree_index; 2594 int ret; 2595 uintptr_t refcount; 2596 2597 BUG_ON(!r5c_is_writeback(log)); 2598 2599 if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) { 2600 /* 2601 * There are two different scenarios here: 2602 * 1. The stripe has some data cached, and it is sent to 2603 * write-out phase for reclaim 2604 * 2. The stripe is clean, and this is the first write 2605 * 2606 * For 1, return -EAGAIN, so we continue with 2607 * handle_stripe_dirtying(). 2608 * 2609 * For 2, set STRIPE_R5C_CACHING and continue with caching 2610 * write. 2611 */ 2612 2613 /* case 1: anything injournal or anything in written */ 2614 if (s->injournal > 0 || s->written > 0) 2615 return -EAGAIN; 2616 /* case 2 */ 2617 set_bit(STRIPE_R5C_CACHING, &sh->state); 2618 } 2619 2620 /* 2621 * When run in degraded mode, array is set to write-through mode. 2622 * This check helps drain pending write safely in the transition to 2623 * write-through mode. 2624 */ 2625 if (s->failed) { 2626 r5c_make_stripe_write_out(sh); 2627 return -EAGAIN; 2628 } 2629 2630 for (i = disks; i--; ) { 2631 dev = &sh->dev[i]; 2632 /* if non-overwrite, use writing-out phase */ 2633 if (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags) && 2634 !test_bit(R5_InJournal, &dev->flags)) { 2635 r5c_make_stripe_write_out(sh); 2636 return -EAGAIN; 2637 } 2638 } 2639 2640 /* if the stripe is not counted in big_stripe_tree, add it now */ 2641 if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) && 2642 !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) { 2643 tree_index = r5c_tree_index(conf, sh->sector); 2644 spin_lock(&log->tree_lock); 2645 pslot = radix_tree_lookup_slot(&log->big_stripe_tree, 2646 tree_index); 2647 if (pslot) { 2648 refcount = (uintptr_t)radix_tree_deref_slot_protected( 2649 pslot, &log->tree_lock) >> 2650 R5C_RADIX_COUNT_SHIFT; 2651 radix_tree_replace_slot( 2652 &log->big_stripe_tree, pslot, 2653 (void *)((refcount + 1) << R5C_RADIX_COUNT_SHIFT)); 2654 } else { 2655 /* 2656 * this radix_tree_insert can fail safely, so no 2657 * need to call radix_tree_preload() 2658 */ 2659 ret = radix_tree_insert( 2660 &log->big_stripe_tree, tree_index, 2661 (void *)(1 << R5C_RADIX_COUNT_SHIFT)); 2662 if (ret) { 2663 spin_unlock(&log->tree_lock); 2664 r5c_make_stripe_write_out(sh); 2665 return -EAGAIN; 2666 } 2667 } 2668 spin_unlock(&log->tree_lock); 2669 2670 /* 2671 * set STRIPE_R5C_PARTIAL_STRIPE, this shows the stripe is 2672 * counted in the radix tree 2673 */ 2674 set_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state); 2675 atomic_inc(&conf->r5c_cached_partial_stripes); 2676 } 2677 2678 for (i = disks; i--; ) { 2679 dev = &sh->dev[i]; 2680 if (dev->towrite) { 2681 set_bit(R5_Wantwrite, &dev->flags); 2682 set_bit(R5_Wantdrain, &dev->flags); 2683 set_bit(R5_LOCKED, &dev->flags); 2684 to_cache++; 2685 } 2686 } 2687 2688 if (to_cache) { 2689 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 2690 /* 2691 * set STRIPE_LOG_TRAPPED, which triggers r5c_cache_data() 2692 * in ops_run_io(). STRIPE_LOG_TRAPPED will be cleared in 2693 * r5c_handle_data_cached() 2694 */ 2695 set_bit(STRIPE_LOG_TRAPPED, &sh->state); 2696 } 2697 2698 return 0; 2699 } 2700 2701 /* 2702 * free extra pages (orig_page) we allocated for prexor 2703 */ 2704 void r5c_release_extra_page(struct stripe_head *sh) 2705 { 2706 struct r5conf *conf = sh->raid_conf; 2707 int i; 2708 bool using_disk_info_extra_page; 2709 2710 using_disk_info_extra_page = 2711 sh->dev[0].orig_page == conf->disks[0].extra_page; 2712 2713 for (i = sh->disks; i--; ) 2714 if (sh->dev[i].page != sh->dev[i].orig_page) { 2715 struct page *p = sh->dev[i].orig_page; 2716 2717 sh->dev[i].orig_page = sh->dev[i].page; 2718 clear_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags); 2719 2720 if (!using_disk_info_extra_page) 2721 put_page(p); 2722 } 2723 2724 if (using_disk_info_extra_page) { 2725 clear_bit(R5C_EXTRA_PAGE_IN_USE, &conf->cache_state); 2726 md_wakeup_thread(conf->mddev->thread); 2727 } 2728 } 2729 2730 void r5c_use_extra_page(struct stripe_head *sh) 2731 { 2732 struct r5conf *conf = sh->raid_conf; 2733 int i; 2734 struct r5dev *dev; 2735 2736 for (i = sh->disks; i--; ) { 2737 dev = &sh->dev[i]; 2738 if (dev->orig_page != dev->page) 2739 put_page(dev->orig_page); 2740 dev->orig_page = conf->disks[i].extra_page; 2741 } 2742 } 2743 2744 /* 2745 * clean up the stripe (clear R5_InJournal for dev[pd_idx] etc.) after the 2746 * stripe is committed to RAID disks. 2747 */ 2748 void r5c_finish_stripe_write_out(struct r5conf *conf, 2749 struct stripe_head *sh, 2750 struct stripe_head_state *s) 2751 { 2752 struct r5l_log *log = conf->log; 2753 int i; 2754 int do_wakeup = 0; 2755 sector_t tree_index; 2756 void **pslot; 2757 uintptr_t refcount; 2758 2759 if (!log || !test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags)) 2760 return; 2761 2762 WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state)); 2763 clear_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags); 2764 2765 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) 2766 return; 2767 2768 for (i = sh->disks; i--; ) { 2769 clear_bit(R5_InJournal, &sh->dev[i].flags); 2770 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2771 do_wakeup = 1; 2772 } 2773 2774 /* 2775 * analyse_stripe() runs before r5c_finish_stripe_write_out(), 2776 * We updated R5_InJournal, so we also update s->injournal. 2777 */ 2778 s->injournal = 0; 2779 2780 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 2781 if (atomic_dec_and_test(&conf->pending_full_writes)) 2782 md_wakeup_thread(conf->mddev->thread); 2783 2784 if (do_wakeup) 2785 wake_up(&conf->wait_for_overlap); 2786 2787 spin_lock_irq(&log->stripe_in_journal_lock); 2788 list_del_init(&sh->r5c); 2789 spin_unlock_irq(&log->stripe_in_journal_lock); 2790 sh->log_start = MaxSector; 2791 2792 atomic_dec(&log->stripe_in_journal_count); 2793 r5c_update_log_state(log); 2794 2795 /* stop counting this stripe in big_stripe_tree */ 2796 if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) || 2797 test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) { 2798 tree_index = r5c_tree_index(conf, sh->sector); 2799 spin_lock(&log->tree_lock); 2800 pslot = radix_tree_lookup_slot(&log->big_stripe_tree, 2801 tree_index); 2802 BUG_ON(pslot == NULL); 2803 refcount = (uintptr_t)radix_tree_deref_slot_protected( 2804 pslot, &log->tree_lock) >> 2805 R5C_RADIX_COUNT_SHIFT; 2806 if (refcount == 1) 2807 radix_tree_delete(&log->big_stripe_tree, tree_index); 2808 else 2809 radix_tree_replace_slot( 2810 &log->big_stripe_tree, pslot, 2811 (void *)((refcount - 1) << R5C_RADIX_COUNT_SHIFT)); 2812 spin_unlock(&log->tree_lock); 2813 } 2814 2815 if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) { 2816 BUG_ON(atomic_read(&conf->r5c_cached_partial_stripes) == 0); 2817 atomic_dec(&conf->r5c_flushing_partial_stripes); 2818 atomic_dec(&conf->r5c_cached_partial_stripes); 2819 } 2820 2821 if (test_and_clear_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) { 2822 BUG_ON(atomic_read(&conf->r5c_cached_full_stripes) == 0); 2823 atomic_dec(&conf->r5c_flushing_full_stripes); 2824 atomic_dec(&conf->r5c_cached_full_stripes); 2825 } 2826 2827 r5l_append_flush_payload(log, sh->sector); 2828 } 2829 2830 int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh) 2831 { 2832 struct r5conf *conf = sh->raid_conf; 2833 int pages = 0; 2834 int reserve; 2835 int i; 2836 int ret = 0; 2837 2838 BUG_ON(!log); 2839 2840 for (i = 0; i < sh->disks; i++) { 2841 void *addr; 2842 2843 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags)) 2844 continue; 2845 addr = kmap_atomic(sh->dev[i].page); 2846 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum, 2847 addr, PAGE_SIZE); 2848 kunmap_atomic(addr); 2849 pages++; 2850 } 2851 WARN_ON(pages == 0); 2852 2853 /* 2854 * The stripe must enter state machine again to call endio, so 2855 * don't delay. 2856 */ 2857 clear_bit(STRIPE_DELAYED, &sh->state); 2858 atomic_inc(&sh->count); 2859 2860 mutex_lock(&log->io_mutex); 2861 /* meta + data */ 2862 reserve = (1 + pages) << (PAGE_SHIFT - 9); 2863 2864 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) && 2865 sh->log_start == MaxSector) 2866 r5l_add_no_space_stripe(log, sh); 2867 else if (!r5l_has_free_space(log, reserve)) { 2868 if (sh->log_start == log->last_checkpoint) 2869 BUG(); 2870 else 2871 r5l_add_no_space_stripe(log, sh); 2872 } else { 2873 ret = r5l_log_stripe(log, sh, pages, 0); 2874 if (ret) { 2875 spin_lock_irq(&log->io_list_lock); 2876 list_add_tail(&sh->log_list, &log->no_mem_stripes); 2877 spin_unlock_irq(&log->io_list_lock); 2878 } 2879 } 2880 2881 mutex_unlock(&log->io_mutex); 2882 return 0; 2883 } 2884 2885 /* check whether this big stripe is in write back cache. */ 2886 bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect) 2887 { 2888 struct r5l_log *log = conf->log; 2889 sector_t tree_index; 2890 void *slot; 2891 2892 if (!log) 2893 return false; 2894 2895 WARN_ON_ONCE(!rcu_read_lock_held()); 2896 tree_index = r5c_tree_index(conf, sect); 2897 slot = radix_tree_lookup(&log->big_stripe_tree, tree_index); 2898 return slot != NULL; 2899 } 2900 2901 static int r5l_load_log(struct r5l_log *log) 2902 { 2903 struct md_rdev *rdev = log->rdev; 2904 struct page *page; 2905 struct r5l_meta_block *mb; 2906 sector_t cp = log->rdev->journal_tail; 2907 u32 stored_crc, expected_crc; 2908 bool create_super = false; 2909 int ret = 0; 2910 2911 /* Make sure it's valid */ 2912 if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp) 2913 cp = 0; 2914 page = alloc_page(GFP_KERNEL); 2915 if (!page) 2916 return -ENOMEM; 2917 2918 if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, 0, false)) { 2919 ret = -EIO; 2920 goto ioerr; 2921 } 2922 mb = page_address(page); 2923 2924 if (le32_to_cpu(mb->magic) != R5LOG_MAGIC || 2925 mb->version != R5LOG_VERSION) { 2926 create_super = true; 2927 goto create; 2928 } 2929 stored_crc = le32_to_cpu(mb->checksum); 2930 mb->checksum = 0; 2931 expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); 2932 if (stored_crc != expected_crc) { 2933 create_super = true; 2934 goto create; 2935 } 2936 if (le64_to_cpu(mb->position) != cp) { 2937 create_super = true; 2938 goto create; 2939 } 2940 create: 2941 if (create_super) { 2942 log->last_cp_seq = prandom_u32(); 2943 cp = 0; 2944 r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq); 2945 /* 2946 * Make sure super points to correct address. Log might have 2947 * data very soon. If super hasn't correct log tail address, 2948 * recovery can't find the log 2949 */ 2950 r5l_write_super(log, cp); 2951 } else 2952 log->last_cp_seq = le64_to_cpu(mb->seq); 2953 2954 log->device_size = round_down(rdev->sectors, BLOCK_SECTORS); 2955 log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT; 2956 if (log->max_free_space > RECLAIM_MAX_FREE_SPACE) 2957 log->max_free_space = RECLAIM_MAX_FREE_SPACE; 2958 log->last_checkpoint = cp; 2959 2960 __free_page(page); 2961 2962 if (create_super) { 2963 log->log_start = r5l_ring_add(log, cp, BLOCK_SECTORS); 2964 log->seq = log->last_cp_seq + 1; 2965 log->next_checkpoint = cp; 2966 } else 2967 ret = r5l_recovery_log(log); 2968 2969 r5c_update_log_state(log); 2970 return ret; 2971 ioerr: 2972 __free_page(page); 2973 return ret; 2974 } 2975 2976 void r5c_update_on_rdev_error(struct mddev *mddev) 2977 { 2978 struct r5conf *conf = mddev->private; 2979 struct r5l_log *log = conf->log; 2980 2981 if (!log) 2982 return; 2983 2984 if (raid5_calc_degraded(conf) > 0 && 2985 conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) 2986 schedule_work(&log->disable_writeback_work); 2987 } 2988 2989 int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) 2990 { 2991 struct request_queue *q = bdev_get_queue(rdev->bdev); 2992 struct r5l_log *log; 2993 char b[BDEVNAME_SIZE]; 2994 2995 pr_debug("md/raid:%s: using device %s as journal\n", 2996 mdname(conf->mddev), bdevname(rdev->bdev, b)); 2997 2998 if (PAGE_SIZE != 4096) 2999 return -EINVAL; 3000 3001 /* 3002 * The PAGE_SIZE must be big enough to hold 1 r5l_meta_block and 3003 * raid_disks r5l_payload_data_parity. 3004 * 3005 * Write journal and cache does not work for very big array 3006 * (raid_disks > 203) 3007 */ 3008 if (sizeof(struct r5l_meta_block) + 3009 ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) * 3010 conf->raid_disks) > PAGE_SIZE) { 3011 pr_err("md/raid:%s: write journal/cache doesn't work for array with %d disks\n", 3012 mdname(conf->mddev), conf->raid_disks); 3013 return -EINVAL; 3014 } 3015 3016 log = kzalloc(sizeof(*log), GFP_KERNEL); 3017 if (!log) 3018 return -ENOMEM; 3019 log->rdev = rdev; 3020 3021 log->need_cache_flush = test_bit(QUEUE_FLAG_WC, &q->queue_flags) != 0; 3022 3023 log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid, 3024 sizeof(rdev->mddev->uuid)); 3025 3026 mutex_init(&log->io_mutex); 3027 3028 spin_lock_init(&log->io_list_lock); 3029 INIT_LIST_HEAD(&log->running_ios); 3030 INIT_LIST_HEAD(&log->io_end_ios); 3031 INIT_LIST_HEAD(&log->flushing_ios); 3032 INIT_LIST_HEAD(&log->finished_ios); 3033 bio_init(&log->flush_bio, NULL, 0); 3034 3035 log->io_kc = KMEM_CACHE(r5l_io_unit, 0); 3036 if (!log->io_kc) 3037 goto io_kc; 3038 3039 log->io_pool = mempool_create_slab_pool(R5L_POOL_SIZE, log->io_kc); 3040 if (!log->io_pool) 3041 goto io_pool; 3042 3043 log->bs = bioset_create(R5L_POOL_SIZE, 0); 3044 if (!log->bs) 3045 goto io_bs; 3046 3047 log->meta_pool = mempool_create_page_pool(R5L_POOL_SIZE, 0); 3048 if (!log->meta_pool) 3049 goto out_mempool; 3050 3051 spin_lock_init(&log->tree_lock); 3052 INIT_RADIX_TREE(&log->big_stripe_tree, GFP_NOWAIT | __GFP_NOWARN); 3053 3054 log->reclaim_thread = md_register_thread(r5l_reclaim_thread, 3055 log->rdev->mddev, "reclaim"); 3056 if (!log->reclaim_thread) 3057 goto reclaim_thread; 3058 log->reclaim_thread->timeout = R5C_RECLAIM_WAKEUP_INTERVAL; 3059 3060 init_waitqueue_head(&log->iounit_wait); 3061 3062 INIT_LIST_HEAD(&log->no_mem_stripes); 3063 3064 INIT_LIST_HEAD(&log->no_space_stripes); 3065 spin_lock_init(&log->no_space_stripes_lock); 3066 3067 INIT_WORK(&log->deferred_io_work, r5l_submit_io_async); 3068 INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async); 3069 3070 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; 3071 INIT_LIST_HEAD(&log->stripe_in_journal_list); 3072 spin_lock_init(&log->stripe_in_journal_lock); 3073 atomic_set(&log->stripe_in_journal_count, 0); 3074 3075 rcu_assign_pointer(conf->log, log); 3076 3077 if (r5l_load_log(log)) 3078 goto error; 3079 3080 set_bit(MD_HAS_JOURNAL, &conf->mddev->flags); 3081 return 0; 3082 3083 error: 3084 rcu_assign_pointer(conf->log, NULL); 3085 md_unregister_thread(&log->reclaim_thread); 3086 reclaim_thread: 3087 mempool_destroy(log->meta_pool); 3088 out_mempool: 3089 bioset_free(log->bs); 3090 io_bs: 3091 mempool_destroy(log->io_pool); 3092 io_pool: 3093 kmem_cache_destroy(log->io_kc); 3094 io_kc: 3095 kfree(log); 3096 return -EINVAL; 3097 } 3098 3099 void r5l_exit_log(struct r5conf *conf) 3100 { 3101 struct r5l_log *log = conf->log; 3102 3103 conf->log = NULL; 3104 synchronize_rcu(); 3105 3106 flush_work(&log->disable_writeback_work); 3107 md_unregister_thread(&log->reclaim_thread); 3108 mempool_destroy(log->meta_pool); 3109 bioset_free(log->bs); 3110 mempool_destroy(log->io_pool); 3111 kmem_cache_destroy(log->io_kc); 3112 kfree(log); 3113 } 3114