1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/blk-crypto.h>
16#include <linux/blkdev.h>
17#include <linux/buffer_head.h>
18#include <linux/sched/mm.h>
19#include <linux/slab.h>
20
21#include "fscrypt_private.h"
22
23struct fscrypt_blk_crypto_key {
24 struct blk_crypto_key base;
25 int num_devs;
26 struct request_queue *devs[];
27};
28
29static int fscrypt_get_num_devices(struct super_block *sb)
30{
31 if (sb->s_cop->get_num_devices)
32 return sb->s_cop->get_num_devices(sb);
33 return 1;
34}
35
36static void fscrypt_get_devices(struct super_block *sb, int num_devs,
37 struct request_queue **devs)
38{
39 if (num_devs == 1)
40 devs[0] = bdev_get_queue(sb->s_bdev);
41 else
42 sb->s_cop->get_devices(sb, devs);
43}
44
45static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_info *ci)
46{
47 struct super_block *sb = ci->ci_inode->i_sb;
48 unsigned int flags = fscrypt_policy_flags(&ci->ci_policy);
49 int ino_bits = 64, lblk_bits = 64;
50
51 if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY)
52 return offsetofend(union fscrypt_iv, nonce);
53
54 if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64)
55 return sizeof(__le64);
56
57 if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)
58 return sizeof(__le32);
59
60
61 if (sb->s_cop->get_ino_and_lblk_bits)
62 sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits);
63 return DIV_ROUND_UP(lblk_bits, 8);
64}
65
66
67int fscrypt_select_encryption_impl(struct fscrypt_info *ci)
68{
69 const struct inode *inode = ci->ci_inode;
70 struct super_block *sb = inode->i_sb;
71 struct blk_crypto_config crypto_cfg;
72 int num_devs;
73 struct request_queue **devs;
74 int i;
75
76
77 if (!S_ISREG(inode->i_mode))
78 return 0;
79
80
81 if (ci->ci_mode->blk_crypto_mode == BLK_ENCRYPTION_MODE_INVALID)
82 return 0;
83
84
85 if (!(sb->s_flags & SB_INLINECRYPT))
86 return 0;
87
88
89
90
91
92
93
94
95
96 if ((fscrypt_policy_flags(&ci->ci_policy) &
97 FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) &&
98 sb->s_blocksize != PAGE_SIZE)
99 return 0;
100
101
102
103
104
105 crypto_cfg.crypto_mode = ci->ci_mode->blk_crypto_mode;
106 crypto_cfg.data_unit_size = sb->s_blocksize;
107 crypto_cfg.dun_bytes = fscrypt_get_dun_bytes(ci);
108 num_devs = fscrypt_get_num_devices(sb);
109 devs = kmalloc_array(num_devs, sizeof(*devs), GFP_KERNEL);
110 if (!devs)
111 return -ENOMEM;
112 fscrypt_get_devices(sb, num_devs, devs);
113
114 for (i = 0; i < num_devs; i++) {
115 if (!blk_crypto_config_supported(devs[i], &crypto_cfg))
116 goto out_free_devs;
117 }
118
119 ci->ci_inlinecrypt = true;
120out_free_devs:
121 kfree(devs);
122
123 return 0;
124}
125
126int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
127 const u8 *raw_key,
128 const struct fscrypt_info *ci)
129{
130 const struct inode *inode = ci->ci_inode;
131 struct super_block *sb = inode->i_sb;
132 enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode;
133 int num_devs = fscrypt_get_num_devices(sb);
134 int queue_refs = 0;
135 struct fscrypt_blk_crypto_key *blk_key;
136 int err;
137 int i;
138
139 blk_key = kzalloc(struct_size(blk_key, devs, num_devs), GFP_KERNEL);
140 if (!blk_key)
141 return -ENOMEM;
142
143 blk_key->num_devs = num_devs;
144 fscrypt_get_devices(sb, num_devs, blk_key->devs);
145
146 err = blk_crypto_init_key(&blk_key->base, raw_key, crypto_mode,
147 fscrypt_get_dun_bytes(ci), sb->s_blocksize);
148 if (err) {
149 fscrypt_err(inode, "error %d initializing blk-crypto key", err);
150 goto fail;
151 }
152
153
154
155
156
157
158
159
160 for (i = 0; i < num_devs; i++) {
161 if (!blk_get_queue(blk_key->devs[i])) {
162 fscrypt_err(inode, "couldn't get request_queue");
163 err = -EAGAIN;
164 goto fail;
165 }
166 queue_refs++;
167
168 err = blk_crypto_start_using_key(&blk_key->base,
169 blk_key->devs[i]);
170 if (err) {
171 fscrypt_err(inode,
172 "error %d starting to use blk-crypto", err);
173 goto fail;
174 }
175 }
176
177
178
179
180
181
182 smp_store_release(&prep_key->blk_key, blk_key);
183 return 0;
184
185fail:
186 for (i = 0; i < queue_refs; i++)
187 blk_put_queue(blk_key->devs[i]);
188 kfree_sensitive(blk_key);
189 return err;
190}
191
192void fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key)
193{
194 struct fscrypt_blk_crypto_key *blk_key = prep_key->blk_key;
195 int i;
196
197 if (blk_key) {
198 for (i = 0; i < blk_key->num_devs; i++) {
199 blk_crypto_evict_key(blk_key->devs[i], &blk_key->base);
200 blk_put_queue(blk_key->devs[i]);
201 }
202 kfree_sensitive(blk_key);
203 }
204}
205
206bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode)
207{
208 return inode->i_crypt_info->ci_inlinecrypt;
209}
210EXPORT_SYMBOL_GPL(__fscrypt_inode_uses_inline_crypto);
211
212static void fscrypt_generate_dun(const struct fscrypt_info *ci, u64 lblk_num,
213 u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
214{
215 union fscrypt_iv iv;
216 int i;
217
218 fscrypt_generate_iv(&iv, lblk_num, ci);
219
220 BUILD_BUG_ON(FSCRYPT_MAX_IV_SIZE > BLK_CRYPTO_MAX_IV_SIZE);
221 memset(dun, 0, BLK_CRYPTO_MAX_IV_SIZE);
222 for (i = 0; i < ci->ci_mode->ivsize/sizeof(dun[0]); i++)
223 dun[i] = le64_to_cpu(iv.dun[i]);
224}
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
243 u64 first_lblk, gfp_t gfp_mask)
244{
245 const struct fscrypt_info *ci;
246 u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
247
248 if (!fscrypt_inode_uses_inline_crypto(inode))
249 return;
250 ci = inode->i_crypt_info;
251
252 fscrypt_generate_dun(ci, first_lblk, dun);
253 bio_crypt_set_ctx(bio, &ci->ci_enc_key.blk_key->base, dun, gfp_mask);
254}
255EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx);
256
257
258static bool bh_get_inode_and_lblk_num(const struct buffer_head *bh,
259 const struct inode **inode_ret,
260 u64 *lblk_num_ret)
261{
262 struct page *page = bh->b_page;
263 const struct address_space *mapping;
264 const struct inode *inode;
265
266
267
268
269
270 mapping = page_mapping(page);
271 if (!mapping)
272 return false;
273 inode = mapping->host;
274
275 *inode_ret = inode;
276 *lblk_num_ret = ((u64)page->index << (PAGE_SHIFT - inode->i_blkbits)) +
277 (bh_offset(bh) >> inode->i_blkbits);
278 return true;
279}
280
281
282
283
284
285
286
287
288
289
290
291void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio,
292 const struct buffer_head *first_bh,
293 gfp_t gfp_mask)
294{
295 const struct inode *inode;
296 u64 first_lblk;
297
298 if (bh_get_inode_and_lblk_num(first_bh, &inode, &first_lblk))
299 fscrypt_set_bio_crypt_ctx(bio, inode, first_lblk, gfp_mask);
300}
301EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx_bh);
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
321 u64 next_lblk)
322{
323 const struct bio_crypt_ctx *bc = bio->bi_crypt_context;
324 u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
325
326 if (!!bc != fscrypt_inode_uses_inline_crypto(inode))
327 return false;
328 if (!bc)
329 return true;
330
331
332
333
334
335
336 if (bc->bc_key != &inode->i_crypt_info->ci_enc_key.blk_key->base)
337 return false;
338
339 fscrypt_generate_dun(inode->i_crypt_info, next_lblk, next_dun);
340 return bio_crypt_dun_is_contiguous(bc, bio->bi_iter.bi_size, next_dun);
341}
342EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio);
343
344
345
346
347
348
349
350
351
352
353
354bool fscrypt_mergeable_bio_bh(struct bio *bio,
355 const struct buffer_head *next_bh)
356{
357 const struct inode *inode;
358 u64 next_lblk;
359
360 if (!bh_get_inode_and_lblk_num(next_bh, &inode, &next_lblk))
361 return !bio->bi_crypt_context;
362
363 return fscrypt_mergeable_bio(bio, inode, next_lblk);
364}
365EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio_bh);
366