1 /* 2 * Copyright (C) 2008 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/bio.h> 21 #include <linux/buffer_head.h> 22 #include <linux/file.h> 23 #include <linux/fs.h> 24 #include <linux/pagemap.h> 25 #include <linux/highmem.h> 26 #include <linux/time.h> 27 #include <linux/init.h> 28 #include <linux/string.h> 29 #include <linux/backing-dev.h> 30 #include <linux/mpage.h> 31 #include <linux/swap.h> 32 #include <linux/writeback.h> 33 #include <linux/bit_spinlock.h> 34 #include <linux/slab.h> 35 #include "compat.h" 36 #include "ctree.h" 37 #include "disk-io.h" 38 #include "transaction.h" 39 #include "btrfs_inode.h" 40 #include "volumes.h" 41 #include "ordered-data.h" 42 #include "compression.h" 43 #include "extent_io.h" 44 #include "extent_map.h" 45 46 struct compressed_bio { 47 /* number of bios pending for this compressed extent */ 48 atomic_t pending_bios; 49 50 /* the pages with the compressed data on them */ 51 struct page **compressed_pages; 52 53 /* inode that owns this data */ 54 struct inode *inode; 55 56 /* starting offset in the inode for our pages */ 57 u64 start; 58 59 /* number of bytes in the inode we're working on */ 60 unsigned long len; 61 62 /* number of bytes on disk */ 63 unsigned long compressed_len; 64 65 /* the compression algorithm for this bio */ 66 int compress_type; 67 68 /* number of compressed pages in the array */ 69 unsigned long nr_pages; 70 71 /* IO errors */ 72 int errors; 73 int mirror_num; 74 75 /* for reads, this is the bio we are copying the data into */ 76 struct bio *orig_bio; 77 78 /* 79 * the start of a variable length array of checksums only 80 * used by reads 81 */ 82 u32 sums; 83 }; 84 85 static inline int compressed_bio_size(struct btrfs_root *root, 86 unsigned long disk_size) 87 { 88 u16 csum_size = btrfs_super_csum_size(&root->fs_info->super_copy); 89 return sizeof(struct compressed_bio) + 90 ((disk_size + root->sectorsize - 1) / root->sectorsize) * 91 csum_size; 92 } 93 94 static struct bio *compressed_bio_alloc(struct block_device *bdev, 95 u64 first_byte, gfp_t gfp_flags) 96 { 97 int nr_vecs; 98 99 nr_vecs = bio_get_nr_vecs(bdev); 100 return btrfs_bio_alloc(bdev, first_byte >> 9, nr_vecs, gfp_flags); 101 } 102 103 static int check_compressed_csum(struct inode *inode, 104 struct compressed_bio *cb, 105 u64 disk_start) 106 { 107 int ret; 108 struct btrfs_root *root = BTRFS_I(inode)->root; 109 struct page *page; 110 unsigned long i; 111 char *kaddr; 112 u32 csum; 113 u32 *cb_sum = &cb->sums; 114 115 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) 116 return 0; 117 118 for (i = 0; i < cb->nr_pages; i++) { 119 page = cb->compressed_pages[i]; 120 csum = ~(u32)0; 121 122 kaddr = kmap_atomic(page, KM_USER0); 123 csum = btrfs_csum_data(root, kaddr, csum, PAGE_CACHE_SIZE); 124 btrfs_csum_final(csum, (char *)&csum); 125 kunmap_atomic(kaddr, KM_USER0); 126 127 if (csum != *cb_sum) { 128 printk(KERN_INFO "btrfs csum failed ino %lu " 129 "extent %llu csum %u " 130 "wanted %u mirror %d\n", inode->i_ino, 131 (unsigned long long)disk_start, 132 csum, *cb_sum, cb->mirror_num); 133 ret = -EIO; 134 goto fail; 135 } 136 cb_sum++; 137 138 } 139 ret = 0; 140 fail: 141 return ret; 142 } 143 144 /* when we finish reading compressed pages from the disk, we 145 * decompress them and then run the bio end_io routines on the 146 * decompressed pages (in the inode address space). 147 * 148 * This allows the checksumming and other IO error handling routines 149 * to work normally 150 * 151 * The compressed pages are freed here, and it must be run 152 * in process context 153 */ 154 static void end_compressed_bio_read(struct bio *bio, int err) 155 { 156 struct compressed_bio *cb = bio->bi_private; 157 struct inode *inode; 158 struct page *page; 159 unsigned long index; 160 int ret; 161 162 if (err) 163 cb->errors = 1; 164 165 /* if there are more bios still pending for this compressed 166 * extent, just exit 167 */ 168 if (!atomic_dec_and_test(&cb->pending_bios)) 169 goto out; 170 171 inode = cb->inode; 172 ret = check_compressed_csum(inode, cb, (u64)bio->bi_sector << 9); 173 if (ret) 174 goto csum_failed; 175 176 /* ok, we're the last bio for this extent, lets start 177 * the decompression. 178 */ 179 ret = btrfs_decompress_biovec(cb->compress_type, 180 cb->compressed_pages, 181 cb->start, 182 cb->orig_bio->bi_io_vec, 183 cb->orig_bio->bi_vcnt, 184 cb->compressed_len); 185 csum_failed: 186 if (ret) 187 cb->errors = 1; 188 189 /* release the compressed pages */ 190 index = 0; 191 for (index = 0; index < cb->nr_pages; index++) { 192 page = cb->compressed_pages[index]; 193 page->mapping = NULL; 194 page_cache_release(page); 195 } 196 197 /* do io completion on the original bio */ 198 if (cb->errors) { 199 bio_io_error(cb->orig_bio); 200 } else { 201 int bio_index = 0; 202 struct bio_vec *bvec = cb->orig_bio->bi_io_vec; 203 204 /* 205 * we have verified the checksum already, set page 206 * checked so the end_io handlers know about it 207 */ 208 while (bio_index < cb->orig_bio->bi_vcnt) { 209 SetPageChecked(bvec->bv_page); 210 bvec++; 211 bio_index++; 212 } 213 bio_endio(cb->orig_bio, 0); 214 } 215 216 /* finally free the cb struct */ 217 kfree(cb->compressed_pages); 218 kfree(cb); 219 out: 220 bio_put(bio); 221 } 222 223 /* 224 * Clear the writeback bits on all of the file 225 * pages for a compressed write 226 */ 227 static noinline int end_compressed_writeback(struct inode *inode, u64 start, 228 unsigned long ram_size) 229 { 230 unsigned long index = start >> PAGE_CACHE_SHIFT; 231 unsigned long end_index = (start + ram_size - 1) >> PAGE_CACHE_SHIFT; 232 struct page *pages[16]; 233 unsigned long nr_pages = end_index - index + 1; 234 int i; 235 int ret; 236 237 while (nr_pages > 0) { 238 ret = find_get_pages_contig(inode->i_mapping, index, 239 min_t(unsigned long, 240 nr_pages, ARRAY_SIZE(pages)), pages); 241 if (ret == 0) { 242 nr_pages -= 1; 243 index += 1; 244 continue; 245 } 246 for (i = 0; i < ret; i++) { 247 end_page_writeback(pages[i]); 248 page_cache_release(pages[i]); 249 } 250 nr_pages -= ret; 251 index += ret; 252 } 253 /* the inode may be gone now */ 254 return 0; 255 } 256 257 /* 258 * do the cleanup once all the compressed pages hit the disk. 259 * This will clear writeback on the file pages and free the compressed 260 * pages. 261 * 262 * This also calls the writeback end hooks for the file pages so that 263 * metadata and checksums can be updated in the file. 264 */ 265 static void end_compressed_bio_write(struct bio *bio, int err) 266 { 267 struct extent_io_tree *tree; 268 struct compressed_bio *cb = bio->bi_private; 269 struct inode *inode; 270 struct page *page; 271 unsigned long index; 272 273 if (err) 274 cb->errors = 1; 275 276 /* if there are more bios still pending for this compressed 277 * extent, just exit 278 */ 279 if (!atomic_dec_and_test(&cb->pending_bios)) 280 goto out; 281 282 /* ok, we're the last bio for this extent, step one is to 283 * call back into the FS and do all the end_io operations 284 */ 285 inode = cb->inode; 286 tree = &BTRFS_I(inode)->io_tree; 287 cb->compressed_pages[0]->mapping = cb->inode->i_mapping; 288 tree->ops->writepage_end_io_hook(cb->compressed_pages[0], 289 cb->start, 290 cb->start + cb->len - 1, 291 NULL, 1); 292 cb->compressed_pages[0]->mapping = NULL; 293 294 end_compressed_writeback(inode, cb->start, cb->len); 295 /* note, our inode could be gone now */ 296 297 /* 298 * release the compressed pages, these came from alloc_page and 299 * are not attached to the inode at all 300 */ 301 index = 0; 302 for (index = 0; index < cb->nr_pages; index++) { 303 page = cb->compressed_pages[index]; 304 page->mapping = NULL; 305 page_cache_release(page); 306 } 307 308 /* finally free the cb struct */ 309 kfree(cb->compressed_pages); 310 kfree(cb); 311 out: 312 bio_put(bio); 313 } 314 315 /* 316 * worker function to build and submit bios for previously compressed pages. 317 * The corresponding pages in the inode should be marked for writeback 318 * and the compressed pages should have a reference on them for dropping 319 * when the IO is complete. 320 * 321 * This also checksums the file bytes and gets things ready for 322 * the end io hooks. 323 */ 324 int btrfs_submit_compressed_write(struct inode *inode, u64 start, 325 unsigned long len, u64 disk_start, 326 unsigned long compressed_len, 327 struct page **compressed_pages, 328 unsigned long nr_pages) 329 { 330 struct bio *bio = NULL; 331 struct btrfs_root *root = BTRFS_I(inode)->root; 332 struct compressed_bio *cb; 333 unsigned long bytes_left; 334 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 335 int page_index = 0; 336 struct page *page; 337 u64 first_byte = disk_start; 338 struct block_device *bdev; 339 int ret; 340 341 WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1)); 342 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); 343 if (!cb) 344 return -ENOMEM; 345 atomic_set(&cb->pending_bios, 0); 346 cb->errors = 0; 347 cb->inode = inode; 348 cb->start = start; 349 cb->len = len; 350 cb->mirror_num = 0; 351 cb->compressed_pages = compressed_pages; 352 cb->compressed_len = compressed_len; 353 cb->orig_bio = NULL; 354 cb->nr_pages = nr_pages; 355 356 bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; 357 358 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); 359 if(!bio) { 360 kfree(cb); 361 return -ENOMEM; 362 } 363 bio->bi_private = cb; 364 bio->bi_end_io = end_compressed_bio_write; 365 atomic_inc(&cb->pending_bios); 366 367 /* create and submit bios for the compressed pages */ 368 bytes_left = compressed_len; 369 for (page_index = 0; page_index < cb->nr_pages; page_index++) { 370 page = compressed_pages[page_index]; 371 page->mapping = inode->i_mapping; 372 if (bio->bi_size) 373 ret = io_tree->ops->merge_bio_hook(page, 0, 374 PAGE_CACHE_SIZE, 375 bio, 0); 376 else 377 ret = 0; 378 379 page->mapping = NULL; 380 if (ret || bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < 381 PAGE_CACHE_SIZE) { 382 bio_get(bio); 383 384 /* 385 * inc the count before we submit the bio so 386 * we know the end IO handler won't happen before 387 * we inc the count. Otherwise, the cb might get 388 * freed before we're done setting it up 389 */ 390 atomic_inc(&cb->pending_bios); 391 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); 392 BUG_ON(ret); 393 394 ret = btrfs_csum_one_bio(root, inode, bio, start, 1); 395 BUG_ON(ret); 396 397 ret = btrfs_map_bio(root, WRITE, bio, 0, 1); 398 BUG_ON(ret); 399 400 bio_put(bio); 401 402 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); 403 bio->bi_private = cb; 404 bio->bi_end_io = end_compressed_bio_write; 405 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); 406 } 407 if (bytes_left < PAGE_CACHE_SIZE) { 408 printk("bytes left %lu compress len %lu nr %lu\n", 409 bytes_left, cb->compressed_len, cb->nr_pages); 410 } 411 bytes_left -= PAGE_CACHE_SIZE; 412 first_byte += PAGE_CACHE_SIZE; 413 cond_resched(); 414 } 415 bio_get(bio); 416 417 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); 418 BUG_ON(ret); 419 420 ret = btrfs_csum_one_bio(root, inode, bio, start, 1); 421 BUG_ON(ret); 422 423 ret = btrfs_map_bio(root, WRITE, bio, 0, 1); 424 BUG_ON(ret); 425 426 bio_put(bio); 427 return 0; 428 } 429 430 static noinline int add_ra_bio_pages(struct inode *inode, 431 u64 compressed_end, 432 struct compressed_bio *cb) 433 { 434 unsigned long end_index; 435 unsigned long page_index; 436 u64 last_offset; 437 u64 isize = i_size_read(inode); 438 int ret; 439 struct page *page; 440 unsigned long nr_pages = 0; 441 struct extent_map *em; 442 struct address_space *mapping = inode->i_mapping; 443 struct extent_map_tree *em_tree; 444 struct extent_io_tree *tree; 445 u64 end; 446 int misses = 0; 447 448 page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page; 449 last_offset = (page_offset(page) + PAGE_CACHE_SIZE); 450 em_tree = &BTRFS_I(inode)->extent_tree; 451 tree = &BTRFS_I(inode)->io_tree; 452 453 if (isize == 0) 454 return 0; 455 456 end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; 457 458 while (last_offset < compressed_end) { 459 page_index = last_offset >> PAGE_CACHE_SHIFT; 460 461 if (page_index > end_index) 462 break; 463 464 rcu_read_lock(); 465 page = radix_tree_lookup(&mapping->page_tree, page_index); 466 rcu_read_unlock(); 467 if (page) { 468 misses++; 469 if (misses > 4) 470 break; 471 goto next; 472 } 473 474 page = __page_cache_alloc(mapping_gfp_mask(mapping) & 475 ~__GFP_FS); 476 if (!page) 477 break; 478 479 if (add_to_page_cache_lru(page, mapping, page_index, 480 GFP_NOFS)) { 481 page_cache_release(page); 482 goto next; 483 } 484 485 end = last_offset + PAGE_CACHE_SIZE - 1; 486 /* 487 * at this point, we have a locked page in the page cache 488 * for these bytes in the file. But, we have to make 489 * sure they map to this compressed extent on disk. 490 */ 491 set_page_extent_mapped(page); 492 lock_extent(tree, last_offset, end, GFP_NOFS); 493 read_lock(&em_tree->lock); 494 em = lookup_extent_mapping(em_tree, last_offset, 495 PAGE_CACHE_SIZE); 496 read_unlock(&em_tree->lock); 497 498 if (!em || last_offset < em->start || 499 (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) || 500 (em->block_start >> 9) != cb->orig_bio->bi_sector) { 501 free_extent_map(em); 502 unlock_extent(tree, last_offset, end, GFP_NOFS); 503 unlock_page(page); 504 page_cache_release(page); 505 break; 506 } 507 free_extent_map(em); 508 509 if (page->index == end_index) { 510 char *userpage; 511 size_t zero_offset = isize & (PAGE_CACHE_SIZE - 1); 512 513 if (zero_offset) { 514 int zeros; 515 zeros = PAGE_CACHE_SIZE - zero_offset; 516 userpage = kmap_atomic(page, KM_USER0); 517 memset(userpage + zero_offset, 0, zeros); 518 flush_dcache_page(page); 519 kunmap_atomic(userpage, KM_USER0); 520 } 521 } 522 523 ret = bio_add_page(cb->orig_bio, page, 524 PAGE_CACHE_SIZE, 0); 525 526 if (ret == PAGE_CACHE_SIZE) { 527 nr_pages++; 528 page_cache_release(page); 529 } else { 530 unlock_extent(tree, last_offset, end, GFP_NOFS); 531 unlock_page(page); 532 page_cache_release(page); 533 break; 534 } 535 next: 536 last_offset += PAGE_CACHE_SIZE; 537 } 538 return 0; 539 } 540 541 /* 542 * for a compressed read, the bio we get passed has all the inode pages 543 * in it. We don't actually do IO on those pages but allocate new ones 544 * to hold the compressed pages on disk. 545 * 546 * bio->bi_sector points to the compressed extent on disk 547 * bio->bi_io_vec points to all of the inode pages 548 * bio->bi_vcnt is a count of pages 549 * 550 * After the compressed pages are read, we copy the bytes into the 551 * bio we were passed and then call the bio end_io calls 552 */ 553 int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, 554 int mirror_num, unsigned long bio_flags) 555 { 556 struct extent_io_tree *tree; 557 struct extent_map_tree *em_tree; 558 struct compressed_bio *cb; 559 struct btrfs_root *root = BTRFS_I(inode)->root; 560 unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; 561 unsigned long compressed_len; 562 unsigned long nr_pages; 563 unsigned long page_index; 564 struct page *page; 565 struct block_device *bdev; 566 struct bio *comp_bio; 567 u64 cur_disk_byte = (u64)bio->bi_sector << 9; 568 u64 em_len; 569 u64 em_start; 570 struct extent_map *em; 571 int ret = -ENOMEM; 572 u32 *sums; 573 574 tree = &BTRFS_I(inode)->io_tree; 575 em_tree = &BTRFS_I(inode)->extent_tree; 576 577 /* we need the actual starting offset of this extent in the file */ 578 read_lock(&em_tree->lock); 579 em = lookup_extent_mapping(em_tree, 580 page_offset(bio->bi_io_vec->bv_page), 581 PAGE_CACHE_SIZE); 582 read_unlock(&em_tree->lock); 583 584 compressed_len = em->block_len; 585 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); 586 if (!cb) 587 goto out; 588 589 atomic_set(&cb->pending_bios, 0); 590 cb->errors = 0; 591 cb->inode = inode; 592 cb->mirror_num = mirror_num; 593 sums = &cb->sums; 594 595 cb->start = em->orig_start; 596 em_len = em->len; 597 em_start = em->start; 598 599 free_extent_map(em); 600 em = NULL; 601 602 cb->len = uncompressed_len; 603 cb->compressed_len = compressed_len; 604 cb->compress_type = extent_compress_type(bio_flags); 605 cb->orig_bio = bio; 606 607 nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) / 608 PAGE_CACHE_SIZE; 609 cb->compressed_pages = kzalloc(sizeof(struct page *) * nr_pages, 610 GFP_NOFS); 611 if (!cb->compressed_pages) 612 goto fail1; 613 614 bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; 615 616 for (page_index = 0; page_index < nr_pages; page_index++) { 617 cb->compressed_pages[page_index] = alloc_page(GFP_NOFS | 618 __GFP_HIGHMEM); 619 if (!cb->compressed_pages[page_index]) 620 goto fail2; 621 } 622 cb->nr_pages = nr_pages; 623 624 add_ra_bio_pages(inode, em_start + em_len, cb); 625 626 /* include any pages we added in add_ra-bio_pages */ 627 uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; 628 cb->len = uncompressed_len; 629 630 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); 631 if (!comp_bio) 632 goto fail2; 633 comp_bio->bi_private = cb; 634 comp_bio->bi_end_io = end_compressed_bio_read; 635 atomic_inc(&cb->pending_bios); 636 637 for (page_index = 0; page_index < nr_pages; page_index++) { 638 page = cb->compressed_pages[page_index]; 639 page->mapping = inode->i_mapping; 640 page->index = em_start >> PAGE_CACHE_SHIFT; 641 642 if (comp_bio->bi_size) 643 ret = tree->ops->merge_bio_hook(page, 0, 644 PAGE_CACHE_SIZE, 645 comp_bio, 0); 646 else 647 ret = 0; 648 649 page->mapping = NULL; 650 if (ret || bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0) < 651 PAGE_CACHE_SIZE) { 652 bio_get(comp_bio); 653 654 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); 655 BUG_ON(ret); 656 657 /* 658 * inc the count before we submit the bio so 659 * we know the end IO handler won't happen before 660 * we inc the count. Otherwise, the cb might get 661 * freed before we're done setting it up 662 */ 663 atomic_inc(&cb->pending_bios); 664 665 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 666 ret = btrfs_lookup_bio_sums(root, inode, 667 comp_bio, sums); 668 BUG_ON(ret); 669 } 670 sums += (comp_bio->bi_size + root->sectorsize - 1) / 671 root->sectorsize; 672 673 ret = btrfs_map_bio(root, READ, comp_bio, 674 mirror_num, 0); 675 BUG_ON(ret); 676 677 bio_put(comp_bio); 678 679 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, 680 GFP_NOFS); 681 comp_bio->bi_private = cb; 682 comp_bio->bi_end_io = end_compressed_bio_read; 683 684 bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0); 685 } 686 cur_disk_byte += PAGE_CACHE_SIZE; 687 } 688 bio_get(comp_bio); 689 690 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); 691 BUG_ON(ret); 692 693 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 694 ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums); 695 BUG_ON(ret); 696 } 697 698 ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0); 699 BUG_ON(ret); 700 701 bio_put(comp_bio); 702 return 0; 703 704 fail2: 705 for (page_index = 0; page_index < nr_pages; page_index++) 706 free_page((unsigned long)cb->compressed_pages[page_index]); 707 708 kfree(cb->compressed_pages); 709 fail1: 710 kfree(cb); 711 out: 712 free_extent_map(em); 713 return ret; 714 } 715 716 static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES]; 717 static spinlock_t comp_workspace_lock[BTRFS_COMPRESS_TYPES]; 718 static int comp_num_workspace[BTRFS_COMPRESS_TYPES]; 719 static atomic_t comp_alloc_workspace[BTRFS_COMPRESS_TYPES]; 720 static wait_queue_head_t comp_workspace_wait[BTRFS_COMPRESS_TYPES]; 721 722 struct btrfs_compress_op *btrfs_compress_op[] = { 723 &btrfs_zlib_compress, 724 &btrfs_lzo_compress, 725 }; 726 727 int __init btrfs_init_compress(void) 728 { 729 int i; 730 731 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { 732 INIT_LIST_HEAD(&comp_idle_workspace[i]); 733 spin_lock_init(&comp_workspace_lock[i]); 734 atomic_set(&comp_alloc_workspace[i], 0); 735 init_waitqueue_head(&comp_workspace_wait[i]); 736 } 737 return 0; 738 } 739 740 /* 741 * this finds an available workspace or allocates a new one 742 * ERR_PTR is returned if things go bad. 743 */ 744 static struct list_head *find_workspace(int type) 745 { 746 struct list_head *workspace; 747 int cpus = num_online_cpus(); 748 int idx = type - 1; 749 750 struct list_head *idle_workspace = &comp_idle_workspace[idx]; 751 spinlock_t *workspace_lock = &comp_workspace_lock[idx]; 752 atomic_t *alloc_workspace = &comp_alloc_workspace[idx]; 753 wait_queue_head_t *workspace_wait = &comp_workspace_wait[idx]; 754 int *num_workspace = &comp_num_workspace[idx]; 755 again: 756 spin_lock(workspace_lock); 757 if (!list_empty(idle_workspace)) { 758 workspace = idle_workspace->next; 759 list_del(workspace); 760 (*num_workspace)--; 761 spin_unlock(workspace_lock); 762 return workspace; 763 764 } 765 if (atomic_read(alloc_workspace) > cpus) { 766 DEFINE_WAIT(wait); 767 768 spin_unlock(workspace_lock); 769 prepare_to_wait(workspace_wait, &wait, TASK_UNINTERRUPTIBLE); 770 if (atomic_read(alloc_workspace) > cpus && !*num_workspace) 771 schedule(); 772 finish_wait(workspace_wait, &wait); 773 goto again; 774 } 775 atomic_inc(alloc_workspace); 776 spin_unlock(workspace_lock); 777 778 workspace = btrfs_compress_op[idx]->alloc_workspace(); 779 if (IS_ERR(workspace)) { 780 atomic_dec(alloc_workspace); 781 wake_up(workspace_wait); 782 } 783 return workspace; 784 } 785 786 /* 787 * put a workspace struct back on the list or free it if we have enough 788 * idle ones sitting around 789 */ 790 static void free_workspace(int type, struct list_head *workspace) 791 { 792 int idx = type - 1; 793 struct list_head *idle_workspace = &comp_idle_workspace[idx]; 794 spinlock_t *workspace_lock = &comp_workspace_lock[idx]; 795 atomic_t *alloc_workspace = &comp_alloc_workspace[idx]; 796 wait_queue_head_t *workspace_wait = &comp_workspace_wait[idx]; 797 int *num_workspace = &comp_num_workspace[idx]; 798 799 spin_lock(workspace_lock); 800 if (*num_workspace < num_online_cpus()) { 801 list_add_tail(workspace, idle_workspace); 802 (*num_workspace)++; 803 spin_unlock(workspace_lock); 804 goto wake; 805 } 806 spin_unlock(workspace_lock); 807 808 btrfs_compress_op[idx]->free_workspace(workspace); 809 atomic_dec(alloc_workspace); 810 wake: 811 if (waitqueue_active(workspace_wait)) 812 wake_up(workspace_wait); 813 } 814 815 /* 816 * cleanup function for module exit 817 */ 818 static void free_workspaces(void) 819 { 820 struct list_head *workspace; 821 int i; 822 823 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { 824 while (!list_empty(&comp_idle_workspace[i])) { 825 workspace = comp_idle_workspace[i].next; 826 list_del(workspace); 827 btrfs_compress_op[i]->free_workspace(workspace); 828 atomic_dec(&comp_alloc_workspace[i]); 829 } 830 } 831 } 832 833 /* 834 * given an address space and start/len, compress the bytes. 835 * 836 * pages are allocated to hold the compressed result and stored 837 * in 'pages' 838 * 839 * out_pages is used to return the number of pages allocated. There 840 * may be pages allocated even if we return an error 841 * 842 * total_in is used to return the number of bytes actually read. It 843 * may be smaller then len if we had to exit early because we 844 * ran out of room in the pages array or because we cross the 845 * max_out threshold. 846 * 847 * total_out is used to return the total number of compressed bytes 848 * 849 * max_out tells us the max number of bytes that we're allowed to 850 * stuff into pages 851 */ 852 int btrfs_compress_pages(int type, struct address_space *mapping, 853 u64 start, unsigned long len, 854 struct page **pages, 855 unsigned long nr_dest_pages, 856 unsigned long *out_pages, 857 unsigned long *total_in, 858 unsigned long *total_out, 859 unsigned long max_out) 860 { 861 struct list_head *workspace; 862 int ret; 863 864 workspace = find_workspace(type); 865 if (IS_ERR(workspace)) 866 return -1; 867 868 ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping, 869 start, len, pages, 870 nr_dest_pages, out_pages, 871 total_in, total_out, 872 max_out); 873 free_workspace(type, workspace); 874 return ret; 875 } 876 877 /* 878 * pages_in is an array of pages with compressed data. 879 * 880 * disk_start is the starting logical offset of this array in the file 881 * 882 * bvec is a bio_vec of pages from the file that we want to decompress into 883 * 884 * vcnt is the count of pages in the biovec 885 * 886 * srclen is the number of bytes in pages_in 887 * 888 * The basic idea is that we have a bio that was created by readpages. 889 * The pages in the bio are for the uncompressed data, and they may not 890 * be contiguous. They all correspond to the range of bytes covered by 891 * the compressed extent. 892 */ 893 int btrfs_decompress_biovec(int type, struct page **pages_in, u64 disk_start, 894 struct bio_vec *bvec, int vcnt, size_t srclen) 895 { 896 struct list_head *workspace; 897 int ret; 898 899 workspace = find_workspace(type); 900 if (IS_ERR(workspace)) 901 return -ENOMEM; 902 903 ret = btrfs_compress_op[type-1]->decompress_biovec(workspace, pages_in, 904 disk_start, 905 bvec, vcnt, srclen); 906 free_workspace(type, workspace); 907 return ret; 908 } 909 910 /* 911 * a less complex decompression routine. Our compressed data fits in a 912 * single page, and we want to read a single page out of it. 913 * start_byte tells us the offset into the compressed data we're interested in 914 */ 915 int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, 916 unsigned long start_byte, size_t srclen, size_t destlen) 917 { 918 struct list_head *workspace; 919 int ret; 920 921 workspace = find_workspace(type); 922 if (IS_ERR(workspace)) 923 return -ENOMEM; 924 925 ret = btrfs_compress_op[type-1]->decompress(workspace, data_in, 926 dest_page, start_byte, 927 srclen, destlen); 928 929 free_workspace(type, workspace); 930 return ret; 931 } 932 933 void btrfs_exit_compress(void) 934 { 935 free_workspaces(); 936 } 937 938 /* 939 * Copy uncompressed data from working buffer to pages. 940 * 941 * buf_start is the byte offset we're of the start of our workspace buffer. 942 * 943 * total_out is the last byte of the buffer 944 */ 945 int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, 946 unsigned long total_out, u64 disk_start, 947 struct bio_vec *bvec, int vcnt, 948 unsigned long *page_index, 949 unsigned long *pg_offset) 950 { 951 unsigned long buf_offset; 952 unsigned long current_buf_start; 953 unsigned long start_byte; 954 unsigned long working_bytes = total_out - buf_start; 955 unsigned long bytes; 956 char *kaddr; 957 struct page *page_out = bvec[*page_index].bv_page; 958 959 /* 960 * start byte is the first byte of the page we're currently 961 * copying into relative to the start of the compressed data. 962 */ 963 start_byte = page_offset(page_out) - disk_start; 964 965 /* we haven't yet hit data corresponding to this page */ 966 if (total_out <= start_byte) 967 return 1; 968 969 /* 970 * the start of the data we care about is offset into 971 * the middle of our working buffer 972 */ 973 if (total_out > start_byte && buf_start < start_byte) { 974 buf_offset = start_byte - buf_start; 975 working_bytes -= buf_offset; 976 } else { 977 buf_offset = 0; 978 } 979 current_buf_start = buf_start; 980 981 /* copy bytes from the working buffer into the pages */ 982 while (working_bytes > 0) { 983 bytes = min(PAGE_CACHE_SIZE - *pg_offset, 984 PAGE_CACHE_SIZE - buf_offset); 985 bytes = min(bytes, working_bytes); 986 kaddr = kmap_atomic(page_out, KM_USER0); 987 memcpy(kaddr + *pg_offset, buf + buf_offset, bytes); 988 kunmap_atomic(kaddr, KM_USER0); 989 flush_dcache_page(page_out); 990 991 *pg_offset += bytes; 992 buf_offset += bytes; 993 working_bytes -= bytes; 994 current_buf_start += bytes; 995 996 /* check if we need to pick another page */ 997 if (*pg_offset == PAGE_CACHE_SIZE) { 998 (*page_index)++; 999 if (*page_index >= vcnt) 1000 return 0; 1001 1002 page_out = bvec[*page_index].bv_page; 1003 *pg_offset = 0; 1004 start_byte = page_offset(page_out) - disk_start; 1005 1006 /* 1007 * make sure our new page is covered by this 1008 * working buffer 1009 */ 1010 if (total_out <= start_byte) 1011 return 1; 1012 1013 /* 1014 * the next page in the biovec might not be adjacent 1015 * to the last page, but it might still be found 1016 * inside this working buffer. bump our offset pointer 1017 */ 1018 if (total_out > start_byte && 1019 current_buf_start < start_byte) { 1020 buf_offset = start_byte - buf_start; 1021 working_bytes = total_out - start_byte; 1022 current_buf_start = buf_start + buf_offset; 1023 } 1024 } 1025 } 1026 1027 return 1; 1028 } 1029