1
2
3
4
5
6
7
8
9
10#define pr_fmt(fmt) "blk-crypto: " fmt
11
12#include <linux/bio.h>
13#include <linux/blkdev.h>
14#include <linux/keyslot-manager.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17
18#include "blk-crypto-internal.h"
19
20const struct blk_crypto_mode blk_crypto_modes[] = {
21 [BLK_ENCRYPTION_MODE_AES_256_XTS] = {
22 .cipher_str = "xts(aes)",
23 .keysize = 64,
24 .ivsize = 16,
25 },
26 [BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = {
27 .cipher_str = "essiv(cbc(aes),sha256)",
28 .keysize = 16,
29 .ivsize = 16,
30 },
31 [BLK_ENCRYPTION_MODE_ADIANTUM] = {
32 .cipher_str = "adiantum(xchacha12,aes)",
33 .keysize = 32,
34 .ivsize = 32,
35 },
36};
37
38
39
40
41
42
43
44static int num_prealloc_crypt_ctxs = 128;
45
46module_param(num_prealloc_crypt_ctxs, int, 0444);
47MODULE_PARM_DESC(num_prealloc_crypt_ctxs,
48 "Number of bio crypto contexts to preallocate");
49
50static struct kmem_cache *bio_crypt_ctx_cache;
51static mempool_t *bio_crypt_ctx_pool;
52
53static int __init bio_crypt_ctx_init(void)
54{
55 size_t i;
56
57 bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0);
58 if (!bio_crypt_ctx_cache)
59 goto out_no_mem;
60
61 bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs,
62 bio_crypt_ctx_cache);
63 if (!bio_crypt_ctx_pool)
64 goto out_no_mem;
65
66
67 BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0);
68
69
70 for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) {
71 BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE);
72 BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE);
73 }
74
75 return 0;
76out_no_mem:
77 panic("Failed to allocate mem for bio crypt ctxs\n");
78}
79subsys_initcall(bio_crypt_ctx_init);
80
81void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key,
82 const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask)
83{
84 struct bio_crypt_ctx *bc;
85
86
87
88
89
90 WARN_ON_ONCE(!(gfp_mask & __GFP_DIRECT_RECLAIM));
91
92 bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
93
94 bc->bc_key = key;
95 memcpy(bc->bc_dun, dun, sizeof(bc->bc_dun));
96
97 bio->bi_crypt_context = bc;
98}
99
100void __bio_crypt_free_ctx(struct bio *bio)
101{
102 mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool);
103 bio->bi_crypt_context = NULL;
104}
105
106int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask)
107{
108 dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
109 if (!dst->bi_crypt_context)
110 return -ENOMEM;
111 *dst->bi_crypt_context = *src->bi_crypt_context;
112 return 0;
113}
114EXPORT_SYMBOL_GPL(__bio_crypt_clone);
115
116
117void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
118 unsigned int inc)
119{
120 int i;
121
122 for (i = 0; inc && i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
123 dun[i] += inc;
124
125
126
127
128 if (dun[i] < inc)
129 inc = 1;
130 else
131 inc = 0;
132 }
133}
134
135void __bio_crypt_advance(struct bio *bio, unsigned int bytes)
136{
137 struct bio_crypt_ctx *bc = bio->bi_crypt_context;
138
139 bio_crypt_dun_increment(bc->bc_dun,
140 bytes >> bc->bc_key->data_unit_size_bits);
141}
142
143
144
145
146
147bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc,
148 unsigned int bytes,
149 const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
150{
151 int i;
152 unsigned int carry = bytes >> bc->bc_key->data_unit_size_bits;
153
154 for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
155 if (bc->bc_dun[i] + carry != next_dun[i])
156 return false;
157
158
159
160
161 if ((bc->bc_dun[i] + carry) < carry)
162 carry = 1;
163 else
164 carry = 0;
165 }
166
167
168 return carry == 0;
169}
170
171
172
173
174
175static bool bio_crypt_ctx_compatible(struct bio_crypt_ctx *bc1,
176 struct bio_crypt_ctx *bc2)
177{
178 if (!bc1)
179 return !bc2;
180
181 return bc2 && bc1->bc_key == bc2->bc_key;
182}
183
184bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio)
185{
186 return bio_crypt_ctx_compatible(rq->crypt_ctx, bio->bi_crypt_context);
187}
188
189
190
191
192
193
194bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
195 struct bio_crypt_ctx *bc2)
196{
197 if (!bio_crypt_ctx_compatible(bc1, bc2))
198 return false;
199
200 return !bc1 || bio_crypt_dun_is_contiguous(bc1, bc1_bytes, bc2->bc_dun);
201}
202
203
204static bool bio_crypt_check_alignment(struct bio *bio)
205{
206 const unsigned int data_unit_size =
207 bio->bi_crypt_context->bc_key->crypto_cfg.data_unit_size;
208 struct bvec_iter iter;
209 struct bio_vec bv;
210
211 bio_for_each_segment(bv, bio, iter) {
212 if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size))
213 return false;
214 }
215
216 return true;
217}
218
219blk_status_t __blk_crypto_init_request(struct request *rq)
220{
221 return blk_ksm_get_slot_for_key(rq->q->ksm, rq->crypt_ctx->bc_key,
222 &rq->crypt_keyslot);
223}
224
225
226
227
228
229
230
231
232
233
234void __blk_crypto_free_request(struct request *rq)
235{
236 blk_ksm_put_slot(rq->crypt_keyslot);
237 mempool_free(rq->crypt_ctx, bio_crypt_ctx_pool);
238 blk_crypto_rq_set_defaults(rq);
239}
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263bool __blk_crypto_bio_prep(struct bio **bio_ptr)
264{
265 struct bio *bio = *bio_ptr;
266 const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key;
267
268
269 if (WARN_ON_ONCE(!bio_has_data(bio))) {
270 bio->bi_status = BLK_STS_IOERR;
271 goto fail;
272 }
273
274 if (!bio_crypt_check_alignment(bio)) {
275 bio->bi_status = BLK_STS_IOERR;
276 goto fail;
277 }
278
279
280
281
282
283 if (blk_ksm_crypto_cfg_supported(bio->bi_bdev->bd_disk->queue->ksm,
284 &bc_key->crypto_cfg))
285 return true;
286
287 if (blk_crypto_fallback_bio_prep(bio_ptr))
288 return true;
289fail:
290 bio_endio(*bio_ptr);
291 return false;
292}
293
294int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
295 gfp_t gfp_mask)
296{
297 if (!rq->crypt_ctx) {
298 rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
299 if (!rq->crypt_ctx)
300 return -ENOMEM;
301 }
302 *rq->crypt_ctx = *bio->bi_crypt_context;
303 return 0;
304}
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
320 enum blk_crypto_mode_num crypto_mode,
321 unsigned int dun_bytes,
322 unsigned int data_unit_size)
323{
324 const struct blk_crypto_mode *mode;
325
326 memset(blk_key, 0, sizeof(*blk_key));
327
328 if (crypto_mode >= ARRAY_SIZE(blk_crypto_modes))
329 return -EINVAL;
330
331 mode = &blk_crypto_modes[crypto_mode];
332 if (mode->keysize == 0)
333 return -EINVAL;
334
335 if (dun_bytes == 0 || dun_bytes > mode->ivsize)
336 return -EINVAL;
337
338 if (!is_power_of_2(data_unit_size))
339 return -EINVAL;
340
341 blk_key->crypto_cfg.crypto_mode = crypto_mode;
342 blk_key->crypto_cfg.dun_bytes = dun_bytes;
343 blk_key->crypto_cfg.data_unit_size = data_unit_size;
344 blk_key->data_unit_size_bits = ilog2(data_unit_size);
345 blk_key->size = mode->keysize;
346 memcpy(blk_key->raw, raw_key, mode->keysize);
347
348 return 0;
349}
350
351
352
353
354
355
356bool blk_crypto_config_supported(struct request_queue *q,
357 const struct blk_crypto_config *cfg)
358{
359 return IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) ||
360 blk_ksm_crypto_cfg_supported(q->ksm, cfg);
361}
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378int blk_crypto_start_using_key(const struct blk_crypto_key *key,
379 struct request_queue *q)
380{
381 if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg))
382 return 0;
383 return blk_crypto_fallback_start_using_mode(key->crypto_cfg.crypto_mode);
384}
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399int blk_crypto_evict_key(struct request_queue *q,
400 const struct blk_crypto_key *key)
401{
402 if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg))
403 return blk_ksm_evict_key(q->ksm, key);
404
405
406
407
408
409
410 return blk_crypto_fallback_evict_key(key);
411}
412EXPORT_SYMBOL_GPL(blk_crypto_evict_key);
413