linux/block/blk-crypto-fallback.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright 2019 Google LLC
   4 */
   5
   6/*
   7 * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
   8 */
   9
  10#define pr_fmt(fmt) "blk-crypto-fallback: " fmt
  11
  12#include <crypto/skcipher.h>
  13#include <linux/blk-crypto.h>
  14#include <linux/blk-crypto-profile.h>
  15#include <linux/blkdev.h>
  16#include <linux/crypto.h>
  17#include <linux/mempool.h>
  18#include <linux/module.h>
  19#include <linux/random.h>
  20#include <linux/scatterlist.h>
  21
  22#include "blk-cgroup.h"
  23#include "blk-crypto-internal.h"
  24
  25static unsigned int num_prealloc_bounce_pg = 32;
  26module_param(num_prealloc_bounce_pg, uint, 0);
  27MODULE_PARM_DESC(num_prealloc_bounce_pg,
  28                 "Number of preallocated bounce pages for the blk-crypto crypto API fallback");
  29
  30static unsigned int blk_crypto_num_keyslots = 100;
  31module_param_named(num_keyslots, blk_crypto_num_keyslots, uint, 0);
  32MODULE_PARM_DESC(num_keyslots,
  33                 "Number of keyslots for the blk-crypto crypto API fallback");
  34
  35static unsigned int num_prealloc_fallback_crypt_ctxs = 128;
  36module_param(num_prealloc_fallback_crypt_ctxs, uint, 0);
  37MODULE_PARM_DESC(num_prealloc_crypt_fallback_ctxs,
  38                 "Number of preallocated bio fallback crypto contexts for blk-crypto to use during crypto API fallback");
  39
  40struct bio_fallback_crypt_ctx {
  41        struct bio_crypt_ctx crypt_ctx;
  42        /*
  43         * Copy of the bvec_iter when this bio was submitted.
  44         * We only want to en/decrypt the part of the bio as described by the
  45         * bvec_iter upon submission because bio might be split before being
  46         * resubmitted
  47         */
  48        struct bvec_iter crypt_iter;
  49        union {
  50                struct {
  51                        struct work_struct work;
  52                        struct bio *bio;
  53                };
  54                struct {
  55                        void *bi_private_orig;
  56                        bio_end_io_t *bi_end_io_orig;
  57                };
  58        };
  59};
  60
  61static struct kmem_cache *bio_fallback_crypt_ctx_cache;
  62static mempool_t *bio_fallback_crypt_ctx_pool;
  63
  64/*
  65 * Allocating a crypto tfm during I/O can deadlock, so we have to preallocate
  66 * all of a mode's tfms when that mode starts being used. Since each mode may
  67 * need all the keyslots at some point, each mode needs its own tfm for each
  68 * keyslot; thus, a keyslot may contain tfms for multiple modes.  However, to
  69 * match the behavior of real inline encryption hardware (which only supports a
  70 * single encryption context per keyslot), we only allow one tfm per keyslot to
  71 * be used at a time - the rest of the unused tfms have their keys cleared.
  72 */
  73static DEFINE_MUTEX(tfms_init_lock);
  74static bool tfms_inited[BLK_ENCRYPTION_MODE_MAX];
  75
  76static struct blk_crypto_fallback_keyslot {
  77        enum blk_crypto_mode_num crypto_mode;
  78        struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX];
  79} *blk_crypto_keyslots;
  80
  81static struct blk_crypto_profile *blk_crypto_fallback_profile;
  82static struct workqueue_struct *blk_crypto_wq;
  83static mempool_t *blk_crypto_bounce_page_pool;
  84static struct bio_set crypto_bio_split;
  85
  86/*
  87 * This is the key we set when evicting a keyslot. This *should* be the all 0's
  88 * key, but AES-XTS rejects that key, so we use some random bytes instead.
  89 */
  90static u8 blank_key[BLK_CRYPTO_MAX_KEY_SIZE];
  91
  92static void blk_crypto_fallback_evict_keyslot(unsigned int slot)
  93{
  94        struct blk_crypto_fallback_keyslot *slotp = &blk_crypto_keyslots[slot];
  95        enum blk_crypto_mode_num crypto_mode = slotp->crypto_mode;
  96        int err;
  97
  98        WARN_ON(slotp->crypto_mode == BLK_ENCRYPTION_MODE_INVALID);
  99
 100        /* Clear the key in the skcipher */
 101        err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], blank_key,
 102                                     blk_crypto_modes[crypto_mode].keysize);
 103        WARN_ON(err);
 104        slotp->crypto_mode = BLK_ENCRYPTION_MODE_INVALID;
 105}
 106
 107static int
 108blk_crypto_fallback_keyslot_program(struct blk_crypto_profile *profile,
 109                                    const struct blk_crypto_key *key,
 110                                    unsigned int slot)
 111{
 112        struct blk_crypto_fallback_keyslot *slotp = &blk_crypto_keyslots[slot];
 113        const enum blk_crypto_mode_num crypto_mode =
 114                                                key->crypto_cfg.crypto_mode;
 115        int err;
 116
 117        if (crypto_mode != slotp->crypto_mode &&
 118            slotp->crypto_mode != BLK_ENCRYPTION_MODE_INVALID)
 119                blk_crypto_fallback_evict_keyslot(slot);
 120
 121        slotp->crypto_mode = crypto_mode;
 122        err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], key->raw,
 123                                     key->size);
 124        if (err) {
 125                blk_crypto_fallback_evict_keyslot(slot);
 126                return err;
 127        }
 128        return 0;
 129}
 130
 131static int blk_crypto_fallback_keyslot_evict(struct blk_crypto_profile *profile,
 132                                             const struct blk_crypto_key *key,
 133                                             unsigned int slot)
 134{
 135        blk_crypto_fallback_evict_keyslot(slot);
 136        return 0;
 137}
 138
 139static const struct blk_crypto_ll_ops blk_crypto_fallback_ll_ops = {
 140        .keyslot_program        = blk_crypto_fallback_keyslot_program,
 141        .keyslot_evict          = blk_crypto_fallback_keyslot_evict,
 142};
 143
 144static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio)
 145{
 146        struct bio *src_bio = enc_bio->bi_private;
 147        int i;
 148
 149        for (i = 0; i < enc_bio->bi_vcnt; i++)
 150                mempool_free(enc_bio->bi_io_vec[i].bv_page,
 151                             blk_crypto_bounce_page_pool);
 152
 153        src_bio->bi_status = enc_bio->bi_status;
 154
 155        bio_uninit(enc_bio);
 156        kfree(enc_bio);
 157        bio_endio(src_bio);
 158}
 159
 160static struct bio *blk_crypto_fallback_clone_bio(struct bio *bio_src)
 161{
 162        unsigned int nr_segs = bio_segments(bio_src);
 163        struct bvec_iter iter;
 164        struct bio_vec bv;
 165        struct bio *bio;
 166
 167        bio = bio_kmalloc(nr_segs, GFP_NOIO);
 168        if (!bio)
 169                return NULL;
 170        bio_init(bio, bio_src->bi_bdev, bio->bi_inline_vecs, nr_segs,
 171                 bio_src->bi_opf);
 172        if (bio_flagged(bio_src, BIO_REMAPPED))
 173                bio_set_flag(bio, BIO_REMAPPED);
 174        bio->bi_ioprio          = bio_src->bi_ioprio;
 175        bio->bi_iter.bi_sector  = bio_src->bi_iter.bi_sector;
 176        bio->bi_iter.bi_size    = bio_src->bi_iter.bi_size;
 177
 178        bio_for_each_segment(bv, bio_src, iter)
 179                bio->bi_io_vec[bio->bi_vcnt++] = bv;
 180
 181        bio_clone_blkg_association(bio, bio_src);
 182
 183        return bio;
 184}
 185
 186static bool
 187blk_crypto_fallback_alloc_cipher_req(struct blk_crypto_keyslot *slot,
 188                                     struct skcipher_request **ciph_req_ret,
 189                                     struct crypto_wait *wait)
 190{
 191        struct skcipher_request *ciph_req;
 192        const struct blk_crypto_fallback_keyslot *slotp;
 193        int keyslot_idx = blk_crypto_keyslot_index(slot);
 194
 195        slotp = &blk_crypto_keyslots[keyslot_idx];
 196        ciph_req = skcipher_request_alloc(slotp->tfms[slotp->crypto_mode],
 197                                          GFP_NOIO);
 198        if (!ciph_req)
 199                return false;
 200
 201        skcipher_request_set_callback(ciph_req,
 202                                      CRYPTO_TFM_REQ_MAY_BACKLOG |
 203                                      CRYPTO_TFM_REQ_MAY_SLEEP,
 204                                      crypto_req_done, wait);
 205        *ciph_req_ret = ciph_req;
 206
 207        return true;
 208}
 209
 210static bool blk_crypto_fallback_split_bio_if_needed(struct bio **bio_ptr)
 211{
 212        struct bio *bio = *bio_ptr;
 213        unsigned int i = 0;
 214        unsigned int num_sectors = 0;
 215        struct bio_vec bv;
 216        struct bvec_iter iter;
 217
 218        bio_for_each_segment(bv, bio, iter) {
 219                num_sectors += bv.bv_len >> SECTOR_SHIFT;
 220                if (++i == BIO_MAX_VECS)
 221                        break;
 222        }
 223        if (num_sectors < bio_sectors(bio)) {
 224                struct bio *split_bio;
 225
 226                split_bio = bio_split(bio, num_sectors, GFP_NOIO,
 227                                      &crypto_bio_split);
 228                if (!split_bio) {
 229                        bio->bi_status = BLK_STS_RESOURCE;
 230                        return false;
 231                }
 232                bio_chain(split_bio, bio);
 233                submit_bio_noacct(bio);
 234                *bio_ptr = split_bio;
 235        }
 236
 237        return true;
 238}
 239
 240union blk_crypto_iv {
 241        __le64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
 242        u8 bytes[BLK_CRYPTO_MAX_IV_SIZE];
 243};
 244
 245static void blk_crypto_dun_to_iv(const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
 246                                 union blk_crypto_iv *iv)
 247{
 248        int i;
 249
 250        for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++)
 251                iv->dun[i] = cpu_to_le64(dun[i]);
 252}
 253
 254/*
 255 * The crypto API fallback's encryption routine.
 256 * Allocate a bounce bio for encryption, encrypt the input bio using crypto API,
 257 * and replace *bio_ptr with the bounce bio. May split input bio if it's too
 258 * large. Returns true on success. Returns false and sets bio->bi_status on
 259 * error.
 260 */
 261static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr)
 262{
 263        struct bio *src_bio, *enc_bio;
 264        struct bio_crypt_ctx *bc;
 265        struct blk_crypto_keyslot *slot;
 266        int data_unit_size;
 267        struct skcipher_request *ciph_req = NULL;
 268        DECLARE_CRYPTO_WAIT(wait);
 269        u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
 270        struct scatterlist src, dst;
 271        union blk_crypto_iv iv;
 272        unsigned int i, j;
 273        bool ret = false;
 274        blk_status_t blk_st;
 275
 276        /* Split the bio if it's too big for single page bvec */
 277        if (!blk_crypto_fallback_split_bio_if_needed(bio_ptr))
 278                return false;
 279
 280        src_bio = *bio_ptr;
 281        bc = src_bio->bi_crypt_context;
 282        data_unit_size = bc->bc_key->crypto_cfg.data_unit_size;
 283
 284        /* Allocate bounce bio for encryption */
 285        enc_bio = blk_crypto_fallback_clone_bio(src_bio);
 286        if (!enc_bio) {
 287                src_bio->bi_status = BLK_STS_RESOURCE;
 288                return false;
 289        }
 290
 291        /*
 292         * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for
 293         * this bio's algorithm and key.
 294         */
 295        blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile,
 296                                        bc->bc_key, &slot);
 297        if (blk_st != BLK_STS_OK) {
 298                src_bio->bi_status = blk_st;
 299                goto out_put_enc_bio;
 300        }
 301
 302        /* and then allocate an skcipher_request for it */
 303        if (!blk_crypto_fallback_alloc_cipher_req(slot, &ciph_req, &wait)) {
 304                src_bio->bi_status = BLK_STS_RESOURCE;
 305                goto out_release_keyslot;
 306        }
 307
 308        memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun));
 309        sg_init_table(&src, 1);
 310        sg_init_table(&dst, 1);
 311
 312        skcipher_request_set_crypt(ciph_req, &src, &dst, data_unit_size,
 313                                   iv.bytes);
 314
 315        /* Encrypt each page in the bounce bio */
 316        for (i = 0; i < enc_bio->bi_vcnt; i++) {
 317                struct bio_vec *enc_bvec = &enc_bio->bi_io_vec[i];
 318                struct page *plaintext_page = enc_bvec->bv_page;
 319                struct page *ciphertext_page =
 320                        mempool_alloc(blk_crypto_bounce_page_pool, GFP_NOIO);
 321
 322                enc_bvec->bv_page = ciphertext_page;
 323
 324                if (!ciphertext_page) {
 325                        src_bio->bi_status = BLK_STS_RESOURCE;
 326                        goto out_free_bounce_pages;
 327                }
 328
 329                sg_set_page(&src, plaintext_page, data_unit_size,
 330                            enc_bvec->bv_offset);
 331                sg_set_page(&dst, ciphertext_page, data_unit_size,
 332                            enc_bvec->bv_offset);
 333
 334                /* Encrypt each data unit in this page */
 335                for (j = 0; j < enc_bvec->bv_len; j += data_unit_size) {
 336                        blk_crypto_dun_to_iv(curr_dun, &iv);
 337                        if (crypto_wait_req(crypto_skcipher_encrypt(ciph_req),
 338                                            &wait)) {
 339                                i++;
 340                                src_bio->bi_status = BLK_STS_IOERR;
 341                                goto out_free_bounce_pages;
 342                        }
 343                        bio_crypt_dun_increment(curr_dun, 1);
 344                        src.offset += data_unit_size;
 345                        dst.offset += data_unit_size;
 346                }
 347        }
 348
 349        enc_bio->bi_private = src_bio;
 350        enc_bio->bi_end_io = blk_crypto_fallback_encrypt_endio;
 351        *bio_ptr = enc_bio;
 352        ret = true;
 353
 354        enc_bio = NULL;
 355        goto out_free_ciph_req;
 356
 357out_free_bounce_pages:
 358        while (i > 0)
 359                mempool_free(enc_bio->bi_io_vec[--i].bv_page,
 360                             blk_crypto_bounce_page_pool);
 361out_free_ciph_req:
 362        skcipher_request_free(ciph_req);
 363out_release_keyslot:
 364        blk_crypto_put_keyslot(slot);
 365out_put_enc_bio:
 366        if (enc_bio)
 367                bio_uninit(enc_bio);
 368        kfree(enc_bio);
 369        return ret;
 370}
 371
 372/*
 373 * The crypto API fallback's main decryption routine.
 374 * Decrypts input bio in place, and calls bio_endio on the bio.
 375 */
 376static void blk_crypto_fallback_decrypt_bio(struct work_struct *work)
 377{
 378        struct bio_fallback_crypt_ctx *f_ctx =
 379                container_of(work, struct bio_fallback_crypt_ctx, work);
 380        struct bio *bio = f_ctx->bio;
 381        struct bio_crypt_ctx *bc = &f_ctx->crypt_ctx;
 382        struct blk_crypto_keyslot *slot;
 383        struct skcipher_request *ciph_req = NULL;
 384        DECLARE_CRYPTO_WAIT(wait);
 385        u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
 386        union blk_crypto_iv iv;
 387        struct scatterlist sg;
 388        struct bio_vec bv;
 389        struct bvec_iter iter;
 390        const int data_unit_size = bc->bc_key->crypto_cfg.data_unit_size;
 391        unsigned int i;
 392        blk_status_t blk_st;
 393
 394        /*
 395         * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for
 396         * this bio's algorithm and key.
 397         */
 398        blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile,
 399                                        bc->bc_key, &slot);
 400        if (blk_st != BLK_STS_OK) {
 401                bio->bi_status = blk_st;
 402                goto out_no_keyslot;
 403        }
 404
 405        /* and then allocate an skcipher_request for it */
 406        if (!blk_crypto_fallback_alloc_cipher_req(slot, &ciph_req, &wait)) {
 407                bio->bi_status = BLK_STS_RESOURCE;
 408                goto out;
 409        }
 410
 411        memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun));
 412        sg_init_table(&sg, 1);
 413        skcipher_request_set_crypt(ciph_req, &sg, &sg, data_unit_size,
 414                                   iv.bytes);
 415
 416        /* Decrypt each segment in the bio */
 417        __bio_for_each_segment(bv, bio, iter, f_ctx->crypt_iter) {
 418                struct page *page = bv.bv_page;
 419
 420                sg_set_page(&sg, page, data_unit_size, bv.bv_offset);
 421
 422                /* Decrypt each data unit in the segment */
 423                for (i = 0; i < bv.bv_len; i += data_unit_size) {
 424                        blk_crypto_dun_to_iv(curr_dun, &iv);
 425                        if (crypto_wait_req(crypto_skcipher_decrypt(ciph_req),
 426                                            &wait)) {
 427                                bio->bi_status = BLK_STS_IOERR;
 428                                goto out;
 429                        }
 430                        bio_crypt_dun_increment(curr_dun, 1);
 431                        sg.offset += data_unit_size;
 432                }
 433        }
 434
 435out:
 436        skcipher_request_free(ciph_req);
 437        blk_crypto_put_keyslot(slot);
 438out_no_keyslot:
 439        mempool_free(f_ctx, bio_fallback_crypt_ctx_pool);
 440        bio_endio(bio);
 441}
 442
 443/**
 444 * blk_crypto_fallback_decrypt_endio - queue bio for fallback decryption
 445 *
 446 * @bio: the bio to queue
 447 *
 448 * Restore bi_private and bi_end_io, and queue the bio for decryption into a
 449 * workqueue, since this function will be called from an atomic context.
 450 */
 451static void blk_crypto_fallback_decrypt_endio(struct bio *bio)
 452{
 453        struct bio_fallback_crypt_ctx *f_ctx = bio->bi_private;
 454
 455        bio->bi_private = f_ctx->bi_private_orig;
 456        bio->bi_end_io = f_ctx->bi_end_io_orig;
 457
 458        /* If there was an IO error, don't queue for decrypt. */
 459        if (bio->bi_status) {
 460                mempool_free(f_ctx, bio_fallback_crypt_ctx_pool);
 461                bio_endio(bio);
 462                return;
 463        }
 464
 465        INIT_WORK(&f_ctx->work, blk_crypto_fallback_decrypt_bio);
 466        f_ctx->bio = bio;
 467        queue_work(blk_crypto_wq, &f_ctx->work);
 468}
 469
 470/**
 471 * blk_crypto_fallback_bio_prep - Prepare a bio to use fallback en/decryption
 472 *
 473 * @bio_ptr: pointer to the bio to prepare
 474 *
 475 * If bio is doing a WRITE operation, this splits the bio into two parts if it's
 476 * too big (see blk_crypto_fallback_split_bio_if_needed()). It then allocates a
 477 * bounce bio for the first part, encrypts it, and updates bio_ptr to point to
 478 * the bounce bio.
 479 *
 480 * For a READ operation, we mark the bio for decryption by using bi_private and
 481 * bi_end_io.
 482 *
 483 * In either case, this function will make the bio look like a regular bio (i.e.
 484 * as if no encryption context was ever specified) for the purposes of the rest
 485 * of the stack except for blk-integrity (blk-integrity and blk-crypto are not
 486 * currently supported together).
 487 *
 488 * Return: true on success. Sets bio->bi_status and returns false on error.
 489 */
 490bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
 491{
 492        struct bio *bio = *bio_ptr;
 493        struct bio_crypt_ctx *bc = bio->bi_crypt_context;
 494        struct bio_fallback_crypt_ctx *f_ctx;
 495
 496        if (WARN_ON_ONCE(!tfms_inited[bc->bc_key->crypto_cfg.crypto_mode])) {
 497                /* User didn't call blk_crypto_start_using_key() first */
 498                bio->bi_status = BLK_STS_IOERR;
 499                return false;
 500        }
 501
 502        if (!__blk_crypto_cfg_supported(blk_crypto_fallback_profile,
 503                                        &bc->bc_key->crypto_cfg)) {
 504                bio->bi_status = BLK_STS_NOTSUPP;
 505                return false;
 506        }
 507
 508        if (bio_data_dir(bio) == WRITE)
 509                return blk_crypto_fallback_encrypt_bio(bio_ptr);
 510
 511        /*
 512         * bio READ case: Set up a f_ctx in the bio's bi_private and set the
 513         * bi_end_io appropriately to trigger decryption when the bio is ended.
 514         */
 515        f_ctx = mempool_alloc(bio_fallback_crypt_ctx_pool, GFP_NOIO);
 516        f_ctx->crypt_ctx = *bc;
 517        f_ctx->crypt_iter = bio->bi_iter;
 518        f_ctx->bi_private_orig = bio->bi_private;
 519        f_ctx->bi_end_io_orig = bio->bi_end_io;
 520        bio->bi_private = (void *)f_ctx;
 521        bio->bi_end_io = blk_crypto_fallback_decrypt_endio;
 522        bio_crypt_free_ctx(bio);
 523
 524        return true;
 525}
 526
 527int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
 528{
 529        return __blk_crypto_evict_key(blk_crypto_fallback_profile, key);
 530}
 531
 532static bool blk_crypto_fallback_inited;
 533static int blk_crypto_fallback_init(void)
 534{
 535        int i;
 536        int err;
 537
 538        if (blk_crypto_fallback_inited)
 539                return 0;
 540
 541        get_random_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE);
 542
 543        err = bioset_init(&crypto_bio_split, 64, 0, 0);
 544        if (err)
 545                goto out;
 546
 547        /* Dynamic allocation is needed because of lockdep_register_key(). */
 548        blk_crypto_fallback_profile =
 549                kzalloc(sizeof(*blk_crypto_fallback_profile), GFP_KERNEL);
 550        if (!blk_crypto_fallback_profile) {
 551                err = -ENOMEM;
 552                goto fail_free_bioset;
 553        }
 554
 555        err = blk_crypto_profile_init(blk_crypto_fallback_profile,
 556                                      blk_crypto_num_keyslots);
 557        if (err)
 558                goto fail_free_profile;
 559        err = -ENOMEM;
 560
 561        blk_crypto_fallback_profile->ll_ops = blk_crypto_fallback_ll_ops;
 562        blk_crypto_fallback_profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
 563
 564        /* All blk-crypto modes have a crypto API fallback. */
 565        for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++)
 566                blk_crypto_fallback_profile->modes_supported[i] = 0xFFFFFFFF;
 567        blk_crypto_fallback_profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
 568
 569        blk_crypto_wq = alloc_workqueue("blk_crypto_wq",
 570                                        WQ_UNBOUND | WQ_HIGHPRI |
 571                                        WQ_MEM_RECLAIM, num_online_cpus());
 572        if (!blk_crypto_wq)
 573                goto fail_destroy_profile;
 574
 575        blk_crypto_keyslots = kcalloc(blk_crypto_num_keyslots,
 576                                      sizeof(blk_crypto_keyslots[0]),
 577                                      GFP_KERNEL);
 578        if (!blk_crypto_keyslots)
 579                goto fail_free_wq;
 580
 581        blk_crypto_bounce_page_pool =
 582                mempool_create_page_pool(num_prealloc_bounce_pg, 0);
 583        if (!blk_crypto_bounce_page_pool)
 584                goto fail_free_keyslots;
 585
 586        bio_fallback_crypt_ctx_cache = KMEM_CACHE(bio_fallback_crypt_ctx, 0);
 587        if (!bio_fallback_crypt_ctx_cache)
 588                goto fail_free_bounce_page_pool;
 589
 590        bio_fallback_crypt_ctx_pool =
 591                mempool_create_slab_pool(num_prealloc_fallback_crypt_ctxs,
 592                                         bio_fallback_crypt_ctx_cache);
 593        if (!bio_fallback_crypt_ctx_pool)
 594                goto fail_free_crypt_ctx_cache;
 595
 596        blk_crypto_fallback_inited = true;
 597
 598        return 0;
 599fail_free_crypt_ctx_cache:
 600        kmem_cache_destroy(bio_fallback_crypt_ctx_cache);
 601fail_free_bounce_page_pool:
 602        mempool_destroy(blk_crypto_bounce_page_pool);
 603fail_free_keyslots:
 604        kfree(blk_crypto_keyslots);
 605fail_free_wq:
 606        destroy_workqueue(blk_crypto_wq);
 607fail_destroy_profile:
 608        blk_crypto_profile_destroy(blk_crypto_fallback_profile);
 609fail_free_profile:
 610        kfree(blk_crypto_fallback_profile);
 611fail_free_bioset:
 612        bioset_exit(&crypto_bio_split);
 613out:
 614        return err;
 615}
 616
 617/*
 618 * Prepare blk-crypto-fallback for the specified crypto mode.
 619 * Returns -ENOPKG if the needed crypto API support is missing.
 620 */
 621int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
 622{
 623        const char *cipher_str = blk_crypto_modes[mode_num].cipher_str;
 624        struct blk_crypto_fallback_keyslot *slotp;
 625        unsigned int i;
 626        int err = 0;
 627
 628        /*
 629         * Fast path
 630         * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num]
 631         * for each i are visible before we try to access them.
 632         */
 633        if (likely(smp_load_acquire(&tfms_inited[mode_num])))
 634                return 0;
 635
 636        mutex_lock(&tfms_init_lock);
 637        if (tfms_inited[mode_num])
 638                goto out;
 639
 640        err = blk_crypto_fallback_init();
 641        if (err)
 642                goto out;
 643
 644        for (i = 0; i < blk_crypto_num_keyslots; i++) {
 645                slotp = &blk_crypto_keyslots[i];
 646                slotp->tfms[mode_num] = crypto_alloc_skcipher(cipher_str, 0, 0);
 647                if (IS_ERR(slotp->tfms[mode_num])) {
 648                        err = PTR_ERR(slotp->tfms[mode_num]);
 649                        if (err == -ENOENT) {
 650                                pr_warn_once("Missing crypto API support for \"%s\"\n",
 651                                             cipher_str);
 652                                err = -ENOPKG;
 653                        }
 654                        slotp->tfms[mode_num] = NULL;
 655                        goto out_free_tfms;
 656                }
 657
 658                crypto_skcipher_set_flags(slotp->tfms[mode_num],
 659                                          CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
 660        }
 661
 662        /*
 663         * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num]
 664         * for each i are visible before we set tfms_inited[mode_num].
 665         */
 666        smp_store_release(&tfms_inited[mode_num], true);
 667        goto out;
 668
 669out_free_tfms:
 670        for (i = 0; i < blk_crypto_num_keyslots; i++) {
 671                slotp = &blk_crypto_keyslots[i];
 672                crypto_free_skcipher(slotp->tfms[mode_num]);
 673                slotp->tfms[mode_num] = NULL;
 674        }
 675out:
 676        mutex_unlock(&tfms_init_lock);
 677        return err;
 678}
 679