linux/fs/crypto/bio.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * This contains encryption functions for per-file encryption.
   4 *
   5 * Copyright (C) 2015, Google, Inc.
   6 * Copyright (C) 2015, Motorola Mobility
   7 *
   8 * Written by Michael Halcrow, 2014.
   9 *
  10 * Filename encryption additions
  11 *      Uday Savagaonkar, 2014
  12 * Encryption policy handling additions
  13 *      Ildar Muslukhov, 2014
  14 * Add fscrypt_pullback_bio_page()
  15 *      Jaegeuk Kim, 2015.
  16 *
  17 * This has not yet undergone a rigorous security audit.
  18 *
  19 * The usage of AES-XTS should conform to recommendations in NIST
  20 * Special Publication 800-38E and IEEE P1619/D16.
  21 */
  22
  23#include <linux/pagemap.h>
  24#include <linux/module.h>
  25#include <linux/bio.h>
  26#include <linux/namei.h>
  27#include "fscrypt_private.h"
  28
  29void fscrypt_decrypt_bio(struct bio *bio)
  30{
  31        struct bio_vec *bv;
  32        struct bvec_iter_all iter_all;
  33
  34        bio_for_each_segment_all(bv, bio, iter_all) {
  35                struct page *page = bv->bv_page;
  36                int ret = fscrypt_decrypt_pagecache_blocks(page, bv->bv_len,
  37                                                           bv->bv_offset);
  38                if (ret)
  39                        SetPageError(page);
  40        }
  41}
  42EXPORT_SYMBOL(fscrypt_decrypt_bio);
  43
  44static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode,
  45                                              pgoff_t lblk, sector_t pblk,
  46                                              unsigned int len)
  47{
  48        const unsigned int blockbits = inode->i_blkbits;
  49        const unsigned int blocks_per_page = 1 << (PAGE_SHIFT - blockbits);
  50        struct bio *bio;
  51        int ret, err = 0;
  52        int num_pages = 0;
  53
  54        /* This always succeeds since __GFP_DIRECT_RECLAIM is set. */
  55        bio = bio_alloc(GFP_NOFS, BIO_MAX_VECS);
  56
  57        while (len) {
  58                unsigned int blocks_this_page = min(len, blocks_per_page);
  59                unsigned int bytes_this_page = blocks_this_page << blockbits;
  60
  61                if (num_pages == 0) {
  62                        fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOFS);
  63                        bio_set_dev(bio, inode->i_sb->s_bdev);
  64                        bio->bi_iter.bi_sector =
  65                                        pblk << (blockbits - SECTOR_SHIFT);
  66                        bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
  67                }
  68                ret = bio_add_page(bio, ZERO_PAGE(0), bytes_this_page, 0);
  69                if (WARN_ON(ret != bytes_this_page)) {
  70                        err = -EIO;
  71                        goto out;
  72                }
  73                num_pages++;
  74                len -= blocks_this_page;
  75                lblk += blocks_this_page;
  76                pblk += blocks_this_page;
  77                if (num_pages == BIO_MAX_VECS || !len ||
  78                    !fscrypt_mergeable_bio(bio, inode, lblk)) {
  79                        err = submit_bio_wait(bio);
  80                        if (err)
  81                                goto out;
  82                        bio_reset(bio);
  83                        num_pages = 0;
  84                }
  85        }
  86out:
  87        bio_put(bio);
  88        return err;
  89}
  90
  91/**
  92 * fscrypt_zeroout_range() - zero out a range of blocks in an encrypted file
  93 * @inode: the file's inode
  94 * @lblk: the first file logical block to zero out
  95 * @pblk: the first filesystem physical block to zero out
  96 * @len: number of blocks to zero out
  97 *
  98 * Zero out filesystem blocks in an encrypted regular file on-disk, i.e. write
  99 * ciphertext blocks which decrypt to the all-zeroes block.  The blocks must be
 100 * both logically and physically contiguous.  It's also assumed that the
 101 * filesystem only uses a single block device, ->s_bdev.
 102 *
 103 * Note that since each block uses a different IV, this involves writing a
 104 * different ciphertext to each block; we can't simply reuse the same one.
 105 *
 106 * Return: 0 on success; -errno on failure.
 107 */
 108int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
 109                          sector_t pblk, unsigned int len)
 110{
 111        const unsigned int blockbits = inode->i_blkbits;
 112        const unsigned int blocksize = 1 << blockbits;
 113        const unsigned int blocks_per_page_bits = PAGE_SHIFT - blockbits;
 114        const unsigned int blocks_per_page = 1 << blocks_per_page_bits;
 115        struct page *pages[16]; /* write up to 16 pages at a time */
 116        unsigned int nr_pages;
 117        unsigned int i;
 118        unsigned int offset;
 119        struct bio *bio;
 120        int ret, err;
 121
 122        if (len == 0)
 123                return 0;
 124
 125        if (fscrypt_inode_uses_inline_crypto(inode))
 126                return fscrypt_zeroout_range_inline_crypt(inode, lblk, pblk,
 127                                                          len);
 128
 129        BUILD_BUG_ON(ARRAY_SIZE(pages) > BIO_MAX_VECS);
 130        nr_pages = min_t(unsigned int, ARRAY_SIZE(pages),
 131                         (len + blocks_per_page - 1) >> blocks_per_page_bits);
 132
 133        /*
 134         * We need at least one page for ciphertext.  Allocate the first one
 135         * from a mempool, with __GFP_DIRECT_RECLAIM set so that it can't fail.
 136         *
 137         * Any additional page allocations are allowed to fail, as they only
 138         * help performance, and waiting on the mempool for them could deadlock.
 139         */
 140        for (i = 0; i < nr_pages; i++) {
 141                pages[i] = fscrypt_alloc_bounce_page(i == 0 ? GFP_NOFS :
 142                                                     GFP_NOWAIT | __GFP_NOWARN);
 143                if (!pages[i])
 144                        break;
 145        }
 146        nr_pages = i;
 147        if (WARN_ON(nr_pages <= 0))
 148                return -EINVAL;
 149
 150        /* This always succeeds since __GFP_DIRECT_RECLAIM is set. */
 151        bio = bio_alloc(GFP_NOFS, nr_pages);
 152
 153        do {
 154                bio_set_dev(bio, inode->i_sb->s_bdev);
 155                bio->bi_iter.bi_sector = pblk << (blockbits - 9);
 156                bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
 157
 158                i = 0;
 159                offset = 0;
 160                do {
 161                        err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk,
 162                                                  ZERO_PAGE(0), pages[i],
 163                                                  blocksize, offset, GFP_NOFS);
 164                        if (err)
 165                                goto out;
 166                        lblk++;
 167                        pblk++;
 168                        len--;
 169                        offset += blocksize;
 170                        if (offset == PAGE_SIZE || len == 0) {
 171                                ret = bio_add_page(bio, pages[i++], offset, 0);
 172                                if (WARN_ON(ret != offset)) {
 173                                        err = -EIO;
 174                                        goto out;
 175                                }
 176                                offset = 0;
 177                        }
 178                } while (i != nr_pages && len != 0);
 179
 180                err = submit_bio_wait(bio);
 181                if (err)
 182                        goto out;
 183                bio_reset(bio);
 184        } while (len != 0);
 185        err = 0;
 186out:
 187        bio_put(bio);
 188        for (i = 0; i < nr_pages; i++)
 189                fscrypt_free_bounce_page(pages[i]);
 190        return err;
 191}
 192EXPORT_SYMBOL(fscrypt_zeroout_range);
 193