1
2
3
4
5
6#include <linux/kernel.h>
7#include <linux/bio.h>
8#include <linux/file.h>
9#include <linux/fs.h>
10#include <linux/pagemap.h>
11#include <linux/highmem.h>
12#include <linux/time.h>
13#include <linux/init.h>
14#include <linux/string.h>
15#include <linux/backing-dev.h>
16#include <linux/writeback.h>
17#include <linux/slab.h>
18#include <linux/sched/mm.h>
19#include <linux/log2.h>
20#include <crypto/hash.h>
21#include "misc.h"
22#include "ctree.h"
23#include "disk-io.h"
24#include "transaction.h"
25#include "btrfs_inode.h"
26#include "volumes.h"
27#include "ordered-data.h"
28#include "compression.h"
29#include "extent_io.h"
30#include "extent_map.h"
31#include "zoned.h"
32
33static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
34
35const char* btrfs_compress_type2str(enum btrfs_compression_type type)
36{
37 switch (type) {
38 case BTRFS_COMPRESS_ZLIB:
39 case BTRFS_COMPRESS_LZO:
40 case BTRFS_COMPRESS_ZSTD:
41 case BTRFS_COMPRESS_NONE:
42 return btrfs_compress_types[type];
43 default:
44 break;
45 }
46
47 return NULL;
48}
49
50bool btrfs_compress_is_valid_type(const char *str, size_t len)
51{
52 int i;
53
54 for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
55 size_t comp_len = strlen(btrfs_compress_types[i]);
56
57 if (len < comp_len)
58 continue;
59
60 if (!strncmp(btrfs_compress_types[i], str, comp_len))
61 return true;
62 }
63 return false;
64}
65
66static int compression_compress_pages(int type, struct list_head *ws,
67 struct address_space *mapping, u64 start, struct page **pages,
68 unsigned long *out_pages, unsigned long *total_in,
69 unsigned long *total_out)
70{
71 switch (type) {
72 case BTRFS_COMPRESS_ZLIB:
73 return zlib_compress_pages(ws, mapping, start, pages,
74 out_pages, total_in, total_out);
75 case BTRFS_COMPRESS_LZO:
76 return lzo_compress_pages(ws, mapping, start, pages,
77 out_pages, total_in, total_out);
78 case BTRFS_COMPRESS_ZSTD:
79 return zstd_compress_pages(ws, mapping, start, pages,
80 out_pages, total_in, total_out);
81 case BTRFS_COMPRESS_NONE:
82 default:
83
84
85
86
87
88
89
90
91
92 *out_pages = 0;
93 return -E2BIG;
94 }
95}
96
97static int compression_decompress_bio(int type, struct list_head *ws,
98 struct compressed_bio *cb)
99{
100 switch (type) {
101 case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
102 case BTRFS_COMPRESS_LZO: return lzo_decompress_bio(ws, cb);
103 case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
104 case BTRFS_COMPRESS_NONE:
105 default:
106
107
108
109
110 BUG();
111 }
112}
113
114static int compression_decompress(int type, struct list_head *ws,
115 unsigned char *data_in, struct page *dest_page,
116 unsigned long start_byte, size_t srclen, size_t destlen)
117{
118 switch (type) {
119 case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
120 start_byte, srclen, destlen);
121 case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_page,
122 start_byte, srclen, destlen);
123 case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
124 start_byte, srclen, destlen);
125 case BTRFS_COMPRESS_NONE:
126 default:
127
128
129
130
131 BUG();
132 }
133}
134
135static int btrfs_decompress_bio(struct compressed_bio *cb);
136
137static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
138 unsigned long disk_size)
139{
140 return sizeof(struct compressed_bio) +
141 (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * fs_info->csum_size;
142}
143
144static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio,
145 u64 disk_start)
146{
147 struct btrfs_fs_info *fs_info = inode->root->fs_info;
148 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
149 const u32 csum_size = fs_info->csum_size;
150 const u32 sectorsize = fs_info->sectorsize;
151 struct page *page;
152 unsigned int i;
153 char *kaddr;
154 u8 csum[BTRFS_CSUM_SIZE];
155 struct compressed_bio *cb = bio->bi_private;
156 u8 *cb_sum = cb->sums;
157
158 if (!fs_info->csum_root || (inode->flags & BTRFS_INODE_NODATASUM))
159 return 0;
160
161 shash->tfm = fs_info->csum_shash;
162
163 for (i = 0; i < cb->nr_pages; i++) {
164 u32 pg_offset;
165 u32 bytes_left = PAGE_SIZE;
166 page = cb->compressed_pages[i];
167
168
169 if (i == cb->nr_pages - 1)
170 bytes_left = cb->compressed_len - i * PAGE_SIZE;
171
172
173 for (pg_offset = 0; pg_offset < bytes_left;
174 pg_offset += sectorsize) {
175 kaddr = kmap_atomic(page);
176 crypto_shash_digest(shash, kaddr + pg_offset,
177 sectorsize, csum);
178 kunmap_atomic(kaddr);
179
180 if (memcmp(&csum, cb_sum, csum_size) != 0) {
181 btrfs_print_data_csum_error(inode, disk_start,
182 csum, cb_sum, cb->mirror_num);
183 if (btrfs_io_bio(bio)->device)
184 btrfs_dev_stat_inc_and_print(
185 btrfs_io_bio(bio)->device,
186 BTRFS_DEV_STAT_CORRUPTION_ERRS);
187 return -EIO;
188 }
189 cb_sum += csum_size;
190 disk_start += sectorsize;
191 }
192 }
193 return 0;
194}
195
196
197
198
199
200
201
202
203
204
205
206static void end_compressed_bio_read(struct bio *bio)
207{
208 struct compressed_bio *cb = bio->bi_private;
209 struct inode *inode;
210 struct page *page;
211 unsigned int index;
212 unsigned int mirror = btrfs_io_bio(bio)->mirror_num;
213 int ret = 0;
214
215 if (bio->bi_status)
216 cb->errors = 1;
217
218
219
220
221 if (!refcount_dec_and_test(&cb->pending_bios))
222 goto out;
223
224
225
226
227
228 btrfs_io_bio(cb->orig_bio)->mirror_num = mirror;
229 cb->mirror_num = mirror;
230
231
232
233
234
235 if (cb->errors == 1)
236 goto csum_failed;
237
238 inode = cb->inode;
239 ret = check_compressed_csum(BTRFS_I(inode), bio,
240 bio->bi_iter.bi_sector << 9);
241 if (ret)
242 goto csum_failed;
243
244
245
246
247 ret = btrfs_decompress_bio(cb);
248
249csum_failed:
250 if (ret)
251 cb->errors = 1;
252
253
254 index = 0;
255 for (index = 0; index < cb->nr_pages; index++) {
256 page = cb->compressed_pages[index];
257 page->mapping = NULL;
258 put_page(page);
259 }
260
261
262 if (cb->errors) {
263 bio_io_error(cb->orig_bio);
264 } else {
265 struct bio_vec *bvec;
266 struct bvec_iter_all iter_all;
267
268
269
270
271
272 ASSERT(!bio_flagged(bio, BIO_CLONED));
273 bio_for_each_segment_all(bvec, cb->orig_bio, iter_all)
274 SetPageChecked(bvec->bv_page);
275
276 bio_endio(cb->orig_bio);
277 }
278
279
280 kfree(cb->compressed_pages);
281 kfree(cb);
282out:
283 bio_put(bio);
284}
285
286
287
288
289
290static noinline void end_compressed_writeback(struct inode *inode,
291 const struct compressed_bio *cb)
292{
293 unsigned long index = cb->start >> PAGE_SHIFT;
294 unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
295 struct page *pages[16];
296 unsigned long nr_pages = end_index - index + 1;
297 int i;
298 int ret;
299
300 if (cb->errors)
301 mapping_set_error(inode->i_mapping, -EIO);
302
303 while (nr_pages > 0) {
304 ret = find_get_pages_contig(inode->i_mapping, index,
305 min_t(unsigned long,
306 nr_pages, ARRAY_SIZE(pages)), pages);
307 if (ret == 0) {
308 nr_pages -= 1;
309 index += 1;
310 continue;
311 }
312 for (i = 0; i < ret; i++) {
313 if (cb->errors)
314 SetPageError(pages[i]);
315 end_page_writeback(pages[i]);
316 put_page(pages[i]);
317 }
318 nr_pages -= ret;
319 index += ret;
320 }
321
322}
323
324
325
326
327
328
329
330
331
332static void end_compressed_bio_write(struct bio *bio)
333{
334 struct compressed_bio *cb = bio->bi_private;
335 struct inode *inode;
336 struct page *page;
337 unsigned int index;
338
339 if (bio->bi_status)
340 cb->errors = 1;
341
342
343
344
345 if (!refcount_dec_and_test(&cb->pending_bios))
346 goto out;
347
348
349
350
351 inode = cb->inode;
352 btrfs_record_physical_zoned(inode, cb->start, bio);
353 btrfs_writepage_endio_finish_ordered(BTRFS_I(inode), NULL,
354 cb->start, cb->start + cb->len - 1,
355 !cb->errors);
356
357 end_compressed_writeback(inode, cb);
358
359
360
361
362
363
364 index = 0;
365 for (index = 0; index < cb->nr_pages; index++) {
366 page = cb->compressed_pages[index];
367 page->mapping = NULL;
368 put_page(page);
369 }
370
371
372 kfree(cb->compressed_pages);
373 kfree(cb);
374out:
375 bio_put(bio);
376}
377
378
379
380
381
382
383
384
385
386
387blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
388 unsigned int len, u64 disk_start,
389 unsigned int compressed_len,
390 struct page **compressed_pages,
391 unsigned int nr_pages,
392 unsigned int write_flags,
393 struct cgroup_subsys_state *blkcg_css)
394{
395 struct btrfs_fs_info *fs_info = inode->root->fs_info;
396 struct bio *bio = NULL;
397 struct compressed_bio *cb;
398 unsigned long bytes_left;
399 int pg_index = 0;
400 struct page *page;
401 u64 first_byte = disk_start;
402 blk_status_t ret;
403 int skip_sum = inode->flags & BTRFS_INODE_NODATASUM;
404 const bool use_append = btrfs_use_zone_append(inode, disk_start);
405 const unsigned int bio_op = use_append ? REQ_OP_ZONE_APPEND : REQ_OP_WRITE;
406
407 WARN_ON(!PAGE_ALIGNED(start));
408 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
409 if (!cb)
410 return BLK_STS_RESOURCE;
411 refcount_set(&cb->pending_bios, 0);
412 cb->errors = 0;
413 cb->inode = &inode->vfs_inode;
414 cb->start = start;
415 cb->len = len;
416 cb->mirror_num = 0;
417 cb->compressed_pages = compressed_pages;
418 cb->compressed_len = compressed_len;
419 cb->orig_bio = NULL;
420 cb->nr_pages = nr_pages;
421
422 bio = btrfs_bio_alloc(first_byte);
423 bio->bi_opf = bio_op | write_flags;
424 bio->bi_private = cb;
425 bio->bi_end_io = end_compressed_bio_write;
426
427 if (use_append) {
428 struct btrfs_device *device;
429
430 device = btrfs_zoned_get_device(fs_info, disk_start, PAGE_SIZE);
431 if (IS_ERR(device)) {
432 kfree(cb);
433 bio_put(bio);
434 return BLK_STS_NOTSUPP;
435 }
436
437 bio_set_dev(bio, device->bdev);
438 }
439
440 if (blkcg_css) {
441 bio->bi_opf |= REQ_CGROUP_PUNT;
442 kthread_associate_blkcg(blkcg_css);
443 }
444 refcount_set(&cb->pending_bios, 1);
445
446
447 bytes_left = compressed_len;
448 for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
449 int submit = 0;
450 int len = 0;
451
452 page = compressed_pages[pg_index];
453 page->mapping = inode->vfs_inode.i_mapping;
454 if (bio->bi_iter.bi_size)
455 submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio,
456 0);
457
458
459
460
461
462 if (!submit) {
463 if (pg_index == 0 && use_append)
464 len = bio_add_zone_append_page(bio, page,
465 PAGE_SIZE, 0);
466 else
467 len = bio_add_page(bio, page, PAGE_SIZE, 0);
468 }
469
470 page->mapping = NULL;
471 if (submit || len < PAGE_SIZE) {
472
473
474
475
476
477
478 refcount_inc(&cb->pending_bios);
479 ret = btrfs_bio_wq_end_io(fs_info, bio,
480 BTRFS_WQ_ENDIO_DATA);
481 BUG_ON(ret);
482
483 if (!skip_sum) {
484 ret = btrfs_csum_one_bio(inode, bio, start, 1);
485 BUG_ON(ret);
486 }
487
488 ret = btrfs_map_bio(fs_info, bio, 0);
489 if (ret) {
490 bio->bi_status = ret;
491 bio_endio(bio);
492 }
493
494 bio = btrfs_bio_alloc(first_byte);
495 bio->bi_opf = bio_op | write_flags;
496 bio->bi_private = cb;
497 bio->bi_end_io = end_compressed_bio_write;
498 if (blkcg_css)
499 bio->bi_opf |= REQ_CGROUP_PUNT;
500
501
502
503
504 bio_add_page(bio, page, PAGE_SIZE, 0);
505 }
506 if (bytes_left < PAGE_SIZE) {
507 btrfs_info(fs_info,
508 "bytes left %lu compress len %u nr %u",
509 bytes_left, cb->compressed_len, cb->nr_pages);
510 }
511 bytes_left -= PAGE_SIZE;
512 first_byte += PAGE_SIZE;
513 cond_resched();
514 }
515
516 ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
517 BUG_ON(ret);
518
519 if (!skip_sum) {
520 ret = btrfs_csum_one_bio(inode, bio, start, 1);
521 BUG_ON(ret);
522 }
523
524 ret = btrfs_map_bio(fs_info, bio, 0);
525 if (ret) {
526 bio->bi_status = ret;
527 bio_endio(bio);
528 }
529
530 if (blkcg_css)
531 kthread_associate_blkcg(NULL);
532
533 return 0;
534}
535
536static u64 bio_end_offset(struct bio *bio)
537{
538 struct bio_vec *last = bio_last_bvec_all(bio);
539
540 return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
541}
542
543static noinline int add_ra_bio_pages(struct inode *inode,
544 u64 compressed_end,
545 struct compressed_bio *cb)
546{
547 unsigned long end_index;
548 unsigned long pg_index;
549 u64 last_offset;
550 u64 isize = i_size_read(inode);
551 int ret;
552 struct page *page;
553 unsigned long nr_pages = 0;
554 struct extent_map *em;
555 struct address_space *mapping = inode->i_mapping;
556 struct extent_map_tree *em_tree;
557 struct extent_io_tree *tree;
558 u64 end;
559 int misses = 0;
560
561 last_offset = bio_end_offset(cb->orig_bio);
562 em_tree = &BTRFS_I(inode)->extent_tree;
563 tree = &BTRFS_I(inode)->io_tree;
564
565 if (isize == 0)
566 return 0;
567
568 end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
569
570 while (last_offset < compressed_end) {
571 pg_index = last_offset >> PAGE_SHIFT;
572
573 if (pg_index > end_index)
574 break;
575
576 page = xa_load(&mapping->i_pages, pg_index);
577 if (page && !xa_is_value(page)) {
578 misses++;
579 if (misses > 4)
580 break;
581 goto next;
582 }
583
584 page = __page_cache_alloc(mapping_gfp_constraint(mapping,
585 ~__GFP_FS));
586 if (!page)
587 break;
588
589 if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
590 put_page(page);
591 goto next;
592 }
593
594
595
596
597
598
599 ret = set_page_extent_mapped(page);
600 if (ret < 0) {
601 unlock_page(page);
602 put_page(page);
603 break;
604 }
605
606 end = last_offset + PAGE_SIZE - 1;
607 lock_extent(tree, last_offset, end);
608 read_lock(&em_tree->lock);
609 em = lookup_extent_mapping(em_tree, last_offset,
610 PAGE_SIZE);
611 read_unlock(&em_tree->lock);
612
613 if (!em || last_offset < em->start ||
614 (last_offset + PAGE_SIZE > extent_map_end(em)) ||
615 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
616 free_extent_map(em);
617 unlock_extent(tree, last_offset, end);
618 unlock_page(page);
619 put_page(page);
620 break;
621 }
622 free_extent_map(em);
623
624 if (page->index == end_index) {
625 size_t zero_offset = offset_in_page(isize);
626
627 if (zero_offset) {
628 int zeros;
629 zeros = PAGE_SIZE - zero_offset;
630 memzero_page(page, zero_offset, zeros);
631 flush_dcache_page(page);
632 }
633 }
634
635 ret = bio_add_page(cb->orig_bio, page,
636 PAGE_SIZE, 0);
637
638 if (ret == PAGE_SIZE) {
639 nr_pages++;
640 put_page(page);
641 } else {
642 unlock_extent(tree, last_offset, end);
643 unlock_page(page);
644 put_page(page);
645 break;
646 }
647next:
648 last_offset += PAGE_SIZE;
649 }
650 return 0;
651}
652
653
654
655
656
657
658
659
660
661
662
663
664blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
665 int mirror_num, unsigned long bio_flags)
666{
667 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
668 struct extent_map_tree *em_tree;
669 struct compressed_bio *cb;
670 unsigned int compressed_len;
671 unsigned int nr_pages;
672 unsigned int pg_index;
673 struct page *page;
674 struct bio *comp_bio;
675 u64 cur_disk_byte = bio->bi_iter.bi_sector << 9;
676 u64 em_len;
677 u64 em_start;
678 struct extent_map *em;
679 blk_status_t ret = BLK_STS_RESOURCE;
680 int faili = 0;
681 u8 *sums;
682
683 em_tree = &BTRFS_I(inode)->extent_tree;
684
685
686 read_lock(&em_tree->lock);
687 em = lookup_extent_mapping(em_tree,
688 page_offset(bio_first_page_all(bio)),
689 fs_info->sectorsize);
690 read_unlock(&em_tree->lock);
691 if (!em)
692 return BLK_STS_IOERR;
693
694 compressed_len = em->block_len;
695 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
696 if (!cb)
697 goto out;
698
699 refcount_set(&cb->pending_bios, 0);
700 cb->errors = 0;
701 cb->inode = inode;
702 cb->mirror_num = mirror_num;
703 sums = cb->sums;
704
705 cb->start = em->orig_start;
706 em_len = em->len;
707 em_start = em->start;
708
709 free_extent_map(em);
710 em = NULL;
711
712 cb->len = bio->bi_iter.bi_size;
713 cb->compressed_len = compressed_len;
714 cb->compress_type = extent_compress_type(bio_flags);
715 cb->orig_bio = bio;
716
717 nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
718 cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
719 GFP_NOFS);
720 if (!cb->compressed_pages)
721 goto fail1;
722
723 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
724 cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
725 __GFP_HIGHMEM);
726 if (!cb->compressed_pages[pg_index]) {
727 faili = pg_index - 1;
728 ret = BLK_STS_RESOURCE;
729 goto fail2;
730 }
731 }
732 faili = nr_pages - 1;
733 cb->nr_pages = nr_pages;
734
735 add_ra_bio_pages(inode, em_start + em_len, cb);
736
737
738 cb->len = bio->bi_iter.bi_size;
739
740 comp_bio = btrfs_bio_alloc(cur_disk_byte);
741 comp_bio->bi_opf = REQ_OP_READ;
742 comp_bio->bi_private = cb;
743 comp_bio->bi_end_io = end_compressed_bio_read;
744 refcount_set(&cb->pending_bios, 1);
745
746 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
747 u32 pg_len = PAGE_SIZE;
748 int submit = 0;
749
750
751
752
753
754
755
756
757 if (pg_index == nr_pages - 1)
758 pg_len = min_t(u32, PAGE_SIZE,
759 compressed_len - pg_index * PAGE_SIZE);
760
761 page = cb->compressed_pages[pg_index];
762 page->mapping = inode->i_mapping;
763 page->index = em_start >> PAGE_SHIFT;
764
765 if (comp_bio->bi_iter.bi_size)
766 submit = btrfs_bio_fits_in_stripe(page, pg_len,
767 comp_bio, 0);
768
769 page->mapping = NULL;
770 if (submit || bio_add_page(comp_bio, page, pg_len, 0) < pg_len) {
771 unsigned int nr_sectors;
772
773 ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
774 BTRFS_WQ_ENDIO_DATA);
775 BUG_ON(ret);
776
777
778
779
780
781
782
783 refcount_inc(&cb->pending_bios);
784
785 ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
786 BUG_ON(ret);
787
788 nr_sectors = DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
789 fs_info->sectorsize);
790 sums += fs_info->csum_size * nr_sectors;
791
792 ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
793 if (ret) {
794 comp_bio->bi_status = ret;
795 bio_endio(comp_bio);
796 }
797
798 comp_bio = btrfs_bio_alloc(cur_disk_byte);
799 comp_bio->bi_opf = REQ_OP_READ;
800 comp_bio->bi_private = cb;
801 comp_bio->bi_end_io = end_compressed_bio_read;
802
803 bio_add_page(comp_bio, page, pg_len, 0);
804 }
805 cur_disk_byte += pg_len;
806 }
807
808 ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
809 BUG_ON(ret);
810
811 ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
812 BUG_ON(ret);
813
814 ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
815 if (ret) {
816 comp_bio->bi_status = ret;
817 bio_endio(comp_bio);
818 }
819
820 return 0;
821
822fail2:
823 while (faili >= 0) {
824 __free_page(cb->compressed_pages[faili]);
825 faili--;
826 }
827
828 kfree(cb->compressed_pages);
829fail1:
830 kfree(cb);
831out:
832 free_extent_map(em);
833 return ret;
834}
835
836
837
838
839
840
841
842
843#define SAMPLING_READ_SIZE (16)
844#define SAMPLING_INTERVAL (256)
845
846
847
848
849
850
851#define BUCKET_SIZE (256)
852
853
854
855
856
857
858
859
860
861
862
863
864
865#define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \
866 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
867
868struct bucket_item {
869 u32 count;
870};
871
872struct heuristic_ws {
873
874 u8 *sample;
875 u32 sample_size;
876
877 struct bucket_item *bucket;
878
879 struct bucket_item *bucket_b;
880 struct list_head list;
881};
882
883static struct workspace_manager heuristic_wsm;
884
885static void free_heuristic_ws(struct list_head *ws)
886{
887 struct heuristic_ws *workspace;
888
889 workspace = list_entry(ws, struct heuristic_ws, list);
890
891 kvfree(workspace->sample);
892 kfree(workspace->bucket);
893 kfree(workspace->bucket_b);
894 kfree(workspace);
895}
896
897static struct list_head *alloc_heuristic_ws(unsigned int level)
898{
899 struct heuristic_ws *ws;
900
901 ws = kzalloc(sizeof(*ws), GFP_KERNEL);
902 if (!ws)
903 return ERR_PTR(-ENOMEM);
904
905 ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
906 if (!ws->sample)
907 goto fail;
908
909 ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
910 if (!ws->bucket)
911 goto fail;
912
913 ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
914 if (!ws->bucket_b)
915 goto fail;
916
917 INIT_LIST_HEAD(&ws->list);
918 return &ws->list;
919fail:
920 free_heuristic_ws(&ws->list);
921 return ERR_PTR(-ENOMEM);
922}
923
924const struct btrfs_compress_op btrfs_heuristic_compress = {
925 .workspace_manager = &heuristic_wsm,
926};
927
928static const struct btrfs_compress_op * const btrfs_compress_op[] = {
929
930 &btrfs_heuristic_compress,
931 &btrfs_zlib_compress,
932 &btrfs_lzo_compress,
933 &btrfs_zstd_compress,
934};
935
936static struct list_head *alloc_workspace(int type, unsigned int level)
937{
938 switch (type) {
939 case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level);
940 case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level);
941 case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace(level);
942 case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level);
943 default:
944
945
946
947
948 BUG();
949 }
950}
951
952static void free_workspace(int type, struct list_head *ws)
953{
954 switch (type) {
955 case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws);
956 case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws);
957 case BTRFS_COMPRESS_LZO: return lzo_free_workspace(ws);
958 case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws);
959 default:
960
961
962
963
964 BUG();
965 }
966}
967
968static void btrfs_init_workspace_manager(int type)
969{
970 struct workspace_manager *wsm;
971 struct list_head *workspace;
972
973 wsm = btrfs_compress_op[type]->workspace_manager;
974 INIT_LIST_HEAD(&wsm->idle_ws);
975 spin_lock_init(&wsm->ws_lock);
976 atomic_set(&wsm->total_ws, 0);
977 init_waitqueue_head(&wsm->ws_wait);
978
979
980
981
982
983 workspace = alloc_workspace(type, 0);
984 if (IS_ERR(workspace)) {
985 pr_warn(
986 "BTRFS: cannot preallocate compression workspace, will try later\n");
987 } else {
988 atomic_set(&wsm->total_ws, 1);
989 wsm->free_ws = 1;
990 list_add(workspace, &wsm->idle_ws);
991 }
992}
993
994static void btrfs_cleanup_workspace_manager(int type)
995{
996 struct workspace_manager *wsman;
997 struct list_head *ws;
998
999 wsman = btrfs_compress_op[type]->workspace_manager;
1000 while (!list_empty(&wsman->idle_ws)) {
1001 ws = wsman->idle_ws.next;
1002 list_del(ws);
1003 free_workspace(type, ws);
1004 atomic_dec(&wsman->total_ws);
1005 }
1006}
1007
1008
1009
1010
1011
1012
1013
1014struct list_head *btrfs_get_workspace(int type, unsigned int level)
1015{
1016 struct workspace_manager *wsm;
1017 struct list_head *workspace;
1018 int cpus = num_online_cpus();
1019 unsigned nofs_flag;
1020 struct list_head *idle_ws;
1021 spinlock_t *ws_lock;
1022 atomic_t *total_ws;
1023 wait_queue_head_t *ws_wait;
1024 int *free_ws;
1025
1026 wsm = btrfs_compress_op[type]->workspace_manager;
1027 idle_ws = &wsm->idle_ws;
1028 ws_lock = &wsm->ws_lock;
1029 total_ws = &wsm->total_ws;
1030 ws_wait = &wsm->ws_wait;
1031 free_ws = &wsm->free_ws;
1032
1033again:
1034 spin_lock(ws_lock);
1035 if (!list_empty(idle_ws)) {
1036 workspace = idle_ws->next;
1037 list_del(workspace);
1038 (*free_ws)--;
1039 spin_unlock(ws_lock);
1040 return workspace;
1041
1042 }
1043 if (atomic_read(total_ws) > cpus) {
1044 DEFINE_WAIT(wait);
1045
1046 spin_unlock(ws_lock);
1047 prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
1048 if (atomic_read(total_ws) > cpus && !*free_ws)
1049 schedule();
1050 finish_wait(ws_wait, &wait);
1051 goto again;
1052 }
1053 atomic_inc(total_ws);
1054 spin_unlock(ws_lock);
1055
1056
1057
1058
1059
1060
1061 nofs_flag = memalloc_nofs_save();
1062 workspace = alloc_workspace(type, level);
1063 memalloc_nofs_restore(nofs_flag);
1064
1065 if (IS_ERR(workspace)) {
1066 atomic_dec(total_ws);
1067 wake_up(ws_wait);
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079 if (atomic_read(total_ws) == 0) {
1080 static DEFINE_RATELIMIT_STATE(_rs,
1081 60 * HZ,
1082 1);
1083
1084 if (__ratelimit(&_rs)) {
1085 pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
1086 }
1087 }
1088 goto again;
1089 }
1090 return workspace;
1091}
1092
1093static struct list_head *get_workspace(int type, int level)
1094{
1095 switch (type) {
1096 case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level);
1097 case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level);
1098 case BTRFS_COMPRESS_LZO: return btrfs_get_workspace(type, level);
1099 case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level);
1100 default:
1101
1102
1103
1104
1105 BUG();
1106 }
1107}
1108
1109
1110
1111
1112
1113void btrfs_put_workspace(int type, struct list_head *ws)
1114{
1115 struct workspace_manager *wsm;
1116 struct list_head *idle_ws;
1117 spinlock_t *ws_lock;
1118 atomic_t *total_ws;
1119 wait_queue_head_t *ws_wait;
1120 int *free_ws;
1121
1122 wsm = btrfs_compress_op[type]->workspace_manager;
1123 idle_ws = &wsm->idle_ws;
1124 ws_lock = &wsm->ws_lock;
1125 total_ws = &wsm->total_ws;
1126 ws_wait = &wsm->ws_wait;
1127 free_ws = &wsm->free_ws;
1128
1129 spin_lock(ws_lock);
1130 if (*free_ws <= num_online_cpus()) {
1131 list_add(ws, idle_ws);
1132 (*free_ws)++;
1133 spin_unlock(ws_lock);
1134 goto wake;
1135 }
1136 spin_unlock(ws_lock);
1137
1138 free_workspace(type, ws);
1139 atomic_dec(total_ws);
1140wake:
1141 cond_wake_up(ws_wait);
1142}
1143
1144static void put_workspace(int type, struct list_head *ws)
1145{
1146 switch (type) {
1147 case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws);
1148 case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws);
1149 case BTRFS_COMPRESS_LZO: return btrfs_put_workspace(type, ws);
1150 case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws);
1151 default:
1152
1153
1154
1155
1156 BUG();
1157 }
1158}
1159
1160
1161
1162
1163
1164static unsigned int btrfs_compress_set_level(int type, unsigned level)
1165{
1166 const struct btrfs_compress_op *ops = btrfs_compress_op[type];
1167
1168 if (level == 0)
1169 level = ops->default_level;
1170 else
1171 level = min(level, ops->max_level);
1172
1173 return level;
1174}
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
1197 u64 start, struct page **pages,
1198 unsigned long *out_pages,
1199 unsigned long *total_in,
1200 unsigned long *total_out)
1201{
1202 int type = btrfs_compress_type(type_level);
1203 int level = btrfs_compress_level(type_level);
1204 struct list_head *workspace;
1205 int ret;
1206
1207 level = btrfs_compress_set_level(type, level);
1208 workspace = get_workspace(type, level);
1209 ret = compression_compress_pages(type, workspace, mapping, start, pages,
1210 out_pages, total_in, total_out);
1211 put_workspace(type, workspace);
1212 return ret;
1213}
1214
1215static int btrfs_decompress_bio(struct compressed_bio *cb)
1216{
1217 struct list_head *workspace;
1218 int ret;
1219 int type = cb->compress_type;
1220
1221 workspace = get_workspace(type, 0);
1222 ret = compression_decompress_bio(type, workspace, cb);
1223 put_workspace(type, workspace);
1224
1225 return ret;
1226}
1227
1228
1229
1230
1231
1232
1233int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
1234 unsigned long start_byte, size_t srclen, size_t destlen)
1235{
1236 struct list_head *workspace;
1237 int ret;
1238
1239 workspace = get_workspace(type, 0);
1240 ret = compression_decompress(type, workspace, data_in, dest_page,
1241 start_byte, srclen, destlen);
1242 put_workspace(type, workspace);
1243
1244 return ret;
1245}
1246
1247void __init btrfs_init_compress(void)
1248{
1249 btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE);
1250 btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB);
1251 btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO);
1252 zstd_init_workspace_manager();
1253}
1254
1255void __cold btrfs_exit_compress(void)
1256{
1257 btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE);
1258 btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB);
1259 btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO);
1260 zstd_cleanup_workspace_manager();
1261}
1262
1263
1264
1265
1266
1267
1268
1269
1270int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
1271 unsigned long total_out, u64 disk_start,
1272 struct bio *bio)
1273{
1274 unsigned long buf_offset;
1275 unsigned long current_buf_start;
1276 unsigned long start_byte;
1277 unsigned long prev_start_byte;
1278 unsigned long working_bytes = total_out - buf_start;
1279 unsigned long bytes;
1280 struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
1281
1282
1283
1284
1285
1286 start_byte = page_offset(bvec.bv_page) - disk_start;
1287
1288
1289 if (total_out <= start_byte)
1290 return 1;
1291
1292
1293
1294
1295
1296 if (total_out > start_byte && buf_start < start_byte) {
1297 buf_offset = start_byte - buf_start;
1298 working_bytes -= buf_offset;
1299 } else {
1300 buf_offset = 0;
1301 }
1302 current_buf_start = buf_start;
1303
1304
1305 while (working_bytes > 0) {
1306 bytes = min_t(unsigned long, bvec.bv_len,
1307 PAGE_SIZE - (buf_offset % PAGE_SIZE));
1308 bytes = min(bytes, working_bytes);
1309
1310 memcpy_to_page(bvec.bv_page, bvec.bv_offset, buf + buf_offset,
1311 bytes);
1312 flush_dcache_page(bvec.bv_page);
1313
1314 buf_offset += bytes;
1315 working_bytes -= bytes;
1316 current_buf_start += bytes;
1317
1318
1319 bio_advance(bio, bytes);
1320 if (!bio->bi_iter.bi_size)
1321 return 0;
1322 bvec = bio_iter_iovec(bio, bio->bi_iter);
1323 prev_start_byte = start_byte;
1324 start_byte = page_offset(bvec.bv_page) - disk_start;
1325
1326
1327
1328
1329
1330
1331
1332 if (start_byte != prev_start_byte) {
1333
1334
1335
1336
1337 if (total_out <= start_byte)
1338 return 1;
1339
1340
1341
1342
1343
1344
1345 if (total_out > start_byte &&
1346 current_buf_start < start_byte) {
1347 buf_offset = start_byte - buf_start;
1348 working_bytes = total_out - start_byte;
1349 current_buf_start = buf_start + buf_offset;
1350 }
1351 }
1352 }
1353
1354 return 1;
1355}
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374#define ENTROPY_LVL_ACEPTABLE (65)
1375#define ENTROPY_LVL_HIGH (80)
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387static inline u32 ilog2_w(u64 n)
1388{
1389 return ilog2(n * n * n * n);
1390}
1391
1392static u32 shannon_entropy(struct heuristic_ws *ws)
1393{
1394 const u32 entropy_max = 8 * ilog2_w(2);
1395 u32 entropy_sum = 0;
1396 u32 p, p_base, sz_base;
1397 u32 i;
1398
1399 sz_base = ilog2_w(ws->sample_size);
1400 for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1401 p = ws->bucket[i].count;
1402 p_base = ilog2_w(p);
1403 entropy_sum += p * (sz_base - p_base);
1404 }
1405
1406 entropy_sum /= ws->sample_size;
1407 return entropy_sum * 100 / entropy_max;
1408}
1409
1410#define RADIX_BASE 4U
1411#define COUNTERS_SIZE (1U << RADIX_BASE)
1412
1413static u8 get4bits(u64 num, int shift) {
1414 u8 low4bits;
1415
1416 num >>= shift;
1417
1418 low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
1419 return low4bits;
1420}
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
1432 int num)
1433{
1434 u64 max_num;
1435 u64 buf_num;
1436 u32 counters[COUNTERS_SIZE];
1437 u32 new_addr;
1438 u32 addr;
1439 int bitlen;
1440 int shift;
1441 int i;
1442
1443
1444
1445
1446
1447 max_num = array[0].count;
1448 for (i = 1; i < num; i++) {
1449 buf_num = array[i].count;
1450 if (buf_num > max_num)
1451 max_num = buf_num;
1452 }
1453
1454 buf_num = ilog2(max_num);
1455 bitlen = ALIGN(buf_num, RADIX_BASE * 2);
1456
1457 shift = 0;
1458 while (shift < bitlen) {
1459 memset(counters, 0, sizeof(counters));
1460
1461 for (i = 0; i < num; i++) {
1462 buf_num = array[i].count;
1463 addr = get4bits(buf_num, shift);
1464 counters[addr]++;
1465 }
1466
1467 for (i = 1; i < COUNTERS_SIZE; i++)
1468 counters[i] += counters[i - 1];
1469
1470 for (i = num - 1; i >= 0; i--) {
1471 buf_num = array[i].count;
1472 addr = get4bits(buf_num, shift);
1473 counters[addr]--;
1474 new_addr = counters[addr];
1475 array_buf[new_addr] = array[i];
1476 }
1477
1478 shift += RADIX_BASE;
1479
1480
1481
1482
1483
1484
1485
1486 memset(counters, 0, sizeof(counters));
1487
1488 for (i = 0; i < num; i ++) {
1489 buf_num = array_buf[i].count;
1490 addr = get4bits(buf_num, shift);
1491 counters[addr]++;
1492 }
1493
1494 for (i = 1; i < COUNTERS_SIZE; i++)
1495 counters[i] += counters[i - 1];
1496
1497 for (i = num - 1; i >= 0; i--) {
1498 buf_num = array_buf[i].count;
1499 addr = get4bits(buf_num, shift);
1500 counters[addr]--;
1501 new_addr = counters[addr];
1502 array[new_addr] = array_buf[i];
1503 }
1504
1505 shift += RADIX_BASE;
1506 }
1507}
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525#define BYTE_CORE_SET_LOW (64)
1526#define BYTE_CORE_SET_HIGH (200)
1527
1528static int byte_core_set_size(struct heuristic_ws *ws)
1529{
1530 u32 i;
1531 u32 coreset_sum = 0;
1532 const u32 core_set_threshold = ws->sample_size * 90 / 100;
1533 struct bucket_item *bucket = ws->bucket;
1534
1535
1536 radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1537
1538 for (i = 0; i < BYTE_CORE_SET_LOW; i++)
1539 coreset_sum += bucket[i].count;
1540
1541 if (coreset_sum > core_set_threshold)
1542 return i;
1543
1544 for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
1545 coreset_sum += bucket[i].count;
1546 if (coreset_sum > core_set_threshold)
1547 break;
1548 }
1549
1550 return i;
1551}
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564#define BYTE_SET_THRESHOLD (64)
1565
1566static u32 byte_set_size(const struct heuristic_ws *ws)
1567{
1568 u32 i;
1569 u32 byte_set_size = 0;
1570
1571 for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
1572 if (ws->bucket[i].count > 0)
1573 byte_set_size++;
1574 }
1575
1576
1577
1578
1579
1580
1581 for (; i < BUCKET_SIZE; i++) {
1582 if (ws->bucket[i].count > 0) {
1583 byte_set_size++;
1584 if (byte_set_size > BYTE_SET_THRESHOLD)
1585 return byte_set_size;
1586 }
1587 }
1588
1589 return byte_set_size;
1590}
1591
1592static bool sample_repeated_patterns(struct heuristic_ws *ws)
1593{
1594 const u32 half_of_sample = ws->sample_size / 2;
1595 const u8 *data = ws->sample;
1596
1597 return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
1598}
1599
1600static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1601 struct heuristic_ws *ws)
1602{
1603 struct page *page;
1604 u64 index, index_end;
1605 u32 i, curr_sample_pos;
1606 u8 *in_data;
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617 if (end - start > BTRFS_MAX_UNCOMPRESSED)
1618 end = start + BTRFS_MAX_UNCOMPRESSED;
1619
1620 index = start >> PAGE_SHIFT;
1621 index_end = end >> PAGE_SHIFT;
1622
1623
1624 if (!IS_ALIGNED(end, PAGE_SIZE))
1625 index_end++;
1626
1627 curr_sample_pos = 0;
1628 while (index < index_end) {
1629 page = find_get_page(inode->i_mapping, index);
1630 in_data = kmap_local_page(page);
1631
1632 i = start % PAGE_SIZE;
1633 while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
1634
1635 if (start > end - SAMPLING_READ_SIZE)
1636 break;
1637 memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1638 SAMPLING_READ_SIZE);
1639 i += SAMPLING_INTERVAL;
1640 start += SAMPLING_INTERVAL;
1641 curr_sample_pos += SAMPLING_READ_SIZE;
1642 }
1643 kunmap_local(in_data);
1644 put_page(page);
1645
1646 index++;
1647 }
1648
1649 ws->sample_size = curr_sample_pos;
1650}
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
1668{
1669 struct list_head *ws_list = get_workspace(0, 0);
1670 struct heuristic_ws *ws;
1671 u32 i;
1672 u8 byte;
1673 int ret = 0;
1674
1675 ws = list_entry(ws_list, struct heuristic_ws, list);
1676
1677 heuristic_collect_sample(inode, start, end, ws);
1678
1679 if (sample_repeated_patterns(ws)) {
1680 ret = 1;
1681 goto out;
1682 }
1683
1684 memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1685
1686 for (i = 0; i < ws->sample_size; i++) {
1687 byte = ws->sample[i];
1688 ws->bucket[byte].count++;
1689 }
1690
1691 i = byte_set_size(ws);
1692 if (i < BYTE_SET_THRESHOLD) {
1693 ret = 2;
1694 goto out;
1695 }
1696
1697 i = byte_core_set_size(ws);
1698 if (i <= BYTE_CORE_SET_LOW) {
1699 ret = 3;
1700 goto out;
1701 }
1702
1703 if (i >= BYTE_CORE_SET_HIGH) {
1704 ret = 0;
1705 goto out;
1706 }
1707
1708 i = shannon_entropy(ws);
1709 if (i <= ENTROPY_LVL_ACEPTABLE) {
1710 ret = 4;
1711 goto out;
1712 }
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729 if (i < ENTROPY_LVL_HIGH) {
1730 ret = 5;
1731 goto out;
1732 } else {
1733 ret = 0;
1734 goto out;
1735 }
1736
1737out:
1738 put_workspace(0, ws_list);
1739 return ret;
1740}
1741
1742
1743
1744
1745
1746unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
1747{
1748 unsigned int level = 0;
1749 int ret;
1750
1751 if (!type)
1752 return 0;
1753
1754 if (str[0] == ':') {
1755 ret = kstrtouint(str + 1, 10, &level);
1756 if (ret)
1757 level = 0;
1758 }
1759
1760 level = btrfs_compress_set_level(type, level);
1761
1762 return level;
1763}
1764