1
2
3
4
5
6
7
8
9#include <linux/blk-integrity.h>
10#include <linux/backing-dev.h>
11#include <linux/mempool.h>
12#include <linux/bio.h>
13#include <linux/scatterlist.h>
14#include <linux/export.h>
15#include <linux/slab.h>
16
17#include "blk.h"
18
19
20
21
22
23
24
25
26
27int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
28{
29 struct bio_vec iv, ivprv = { NULL };
30 unsigned int segments = 0;
31 unsigned int seg_size = 0;
32 struct bvec_iter iter;
33 int prev = 0;
34
35 bio_for_each_integrity_vec(iv, bio, iter) {
36
37 if (prev) {
38 if (!biovec_phys_mergeable(q, &ivprv, &iv))
39 goto new_segment;
40 if (seg_size + iv.bv_len > queue_max_segment_size(q))
41 goto new_segment;
42
43 seg_size += iv.bv_len;
44 } else {
45new_segment:
46 segments++;
47 seg_size = iv.bv_len;
48 }
49
50 prev = 1;
51 ivprv = iv;
52 }
53
54 return segments;
55}
56EXPORT_SYMBOL(blk_rq_count_integrity_sg);
57
58
59
60
61
62
63
64
65
66
67
68int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
69 struct scatterlist *sglist)
70{
71 struct bio_vec iv, ivprv = { NULL };
72 struct scatterlist *sg = NULL;
73 unsigned int segments = 0;
74 struct bvec_iter iter;
75 int prev = 0;
76
77 bio_for_each_integrity_vec(iv, bio, iter) {
78
79 if (prev) {
80 if (!biovec_phys_mergeable(q, &ivprv, &iv))
81 goto new_segment;
82 if (sg->length + iv.bv_len > queue_max_segment_size(q))
83 goto new_segment;
84
85 sg->length += iv.bv_len;
86 } else {
87new_segment:
88 if (!sg)
89 sg = sglist;
90 else {
91 sg_unmark_end(sg);
92 sg = sg_next(sg);
93 }
94
95 sg_set_page(sg, iv.bv_page, iv.bv_len, iv.bv_offset);
96 segments++;
97 }
98
99 prev = 1;
100 ivprv = iv;
101 }
102
103 if (sg)
104 sg_mark_end(sg);
105
106 return segments;
107}
108EXPORT_SYMBOL(blk_rq_map_integrity_sg);
109
110
111
112
113
114
115
116
117
118
119
120
121int blk_integrity_compare(struct gendisk *gd1, struct gendisk *gd2)
122{
123 struct blk_integrity *b1 = &gd1->queue->integrity;
124 struct blk_integrity *b2 = &gd2->queue->integrity;
125
126 if (!b1->profile && !b2->profile)
127 return 0;
128
129 if (!b1->profile || !b2->profile)
130 return -1;
131
132 if (b1->interval_exp != b2->interval_exp) {
133 pr_err("%s: %s/%s protection interval %u != %u\n",
134 __func__, gd1->disk_name, gd2->disk_name,
135 1 << b1->interval_exp, 1 << b2->interval_exp);
136 return -1;
137 }
138
139 if (b1->tuple_size != b2->tuple_size) {
140 pr_err("%s: %s/%s tuple sz %u != %u\n", __func__,
141 gd1->disk_name, gd2->disk_name,
142 b1->tuple_size, b2->tuple_size);
143 return -1;
144 }
145
146 if (b1->tag_size && b2->tag_size && (b1->tag_size != b2->tag_size)) {
147 pr_err("%s: %s/%s tag sz %u != %u\n", __func__,
148 gd1->disk_name, gd2->disk_name,
149 b1->tag_size, b2->tag_size);
150 return -1;
151 }
152
153 if (b1->profile != b2->profile) {
154 pr_err("%s: %s/%s type %s != %s\n", __func__,
155 gd1->disk_name, gd2->disk_name,
156 b1->profile->name, b2->profile->name);
157 return -1;
158 }
159
160 return 0;
161}
162EXPORT_SYMBOL(blk_integrity_compare);
163
164bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
165 struct request *next)
166{
167 if (blk_integrity_rq(req) == 0 && blk_integrity_rq(next) == 0)
168 return true;
169
170 if (blk_integrity_rq(req) == 0 || blk_integrity_rq(next) == 0)
171 return false;
172
173 if (bio_integrity(req->bio)->bip_flags !=
174 bio_integrity(next->bio)->bip_flags)
175 return false;
176
177 if (req->nr_integrity_segments + next->nr_integrity_segments >
178 q->limits.max_integrity_segments)
179 return false;
180
181 if (integrity_req_gap_back_merge(req, next->bio))
182 return false;
183
184 return true;
185}
186
187bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
188 struct bio *bio)
189{
190 int nr_integrity_segs;
191 struct bio *next = bio->bi_next;
192
193 if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL)
194 return true;
195
196 if (blk_integrity_rq(req) == 0 || bio_integrity(bio) == NULL)
197 return false;
198
199 if (bio_integrity(req->bio)->bip_flags != bio_integrity(bio)->bip_flags)
200 return false;
201
202 bio->bi_next = NULL;
203 nr_integrity_segs = blk_rq_count_integrity_sg(q, bio);
204 bio->bi_next = next;
205
206 if (req->nr_integrity_segments + nr_integrity_segs >
207 q->limits.max_integrity_segments)
208 return false;
209
210 req->nr_integrity_segments += nr_integrity_segs;
211
212 return true;
213}
214
215static inline struct blk_integrity *dev_to_bi(struct device *dev)
216{
217 return &dev_to_disk(dev)->queue->integrity;
218}
219
220static ssize_t format_show(struct device *dev, struct device_attribute *attr,
221 char *page)
222{
223 struct blk_integrity *bi = dev_to_bi(dev);
224
225 if (bi->profile && bi->profile->name)
226 return sysfs_emit(page, "%s\n", bi->profile->name);
227 return sysfs_emit(page, "none\n");
228}
229
230static ssize_t tag_size_show(struct device *dev, struct device_attribute *attr,
231 char *page)
232{
233 struct blk_integrity *bi = dev_to_bi(dev);
234
235 return sysfs_emit(page, "%u\n", bi->tag_size);
236}
237
238static ssize_t protection_interval_bytes_show(struct device *dev,
239 struct device_attribute *attr,
240 char *page)
241{
242 struct blk_integrity *bi = dev_to_bi(dev);
243
244 return sysfs_emit(page, "%u\n",
245 bi->interval_exp ? 1 << bi->interval_exp : 0);
246}
247
248static ssize_t read_verify_store(struct device *dev,
249 struct device_attribute *attr,
250 const char *page, size_t count)
251{
252 struct blk_integrity *bi = dev_to_bi(dev);
253 char *p = (char *) page;
254 unsigned long val = simple_strtoul(p, &p, 10);
255
256 if (val)
257 bi->flags |= BLK_INTEGRITY_VERIFY;
258 else
259 bi->flags &= ~BLK_INTEGRITY_VERIFY;
260
261 return count;
262}
263
264static ssize_t read_verify_show(struct device *dev,
265 struct device_attribute *attr, char *page)
266{
267 struct blk_integrity *bi = dev_to_bi(dev);
268
269 return sysfs_emit(page, "%d\n", !!(bi->flags & BLK_INTEGRITY_VERIFY));
270}
271
272static ssize_t write_generate_store(struct device *dev,
273 struct device_attribute *attr,
274 const char *page, size_t count)
275{
276 struct blk_integrity *bi = dev_to_bi(dev);
277
278 char *p = (char *) page;
279 unsigned long val = simple_strtoul(p, &p, 10);
280
281 if (val)
282 bi->flags |= BLK_INTEGRITY_GENERATE;
283 else
284 bi->flags &= ~BLK_INTEGRITY_GENERATE;
285
286 return count;
287}
288
289static ssize_t write_generate_show(struct device *dev,
290 struct device_attribute *attr, char *page)
291{
292 struct blk_integrity *bi = dev_to_bi(dev);
293
294 return sysfs_emit(page, "%d\n", !!(bi->flags & BLK_INTEGRITY_GENERATE));
295}
296
297static ssize_t device_is_integrity_capable_show(struct device *dev,
298 struct device_attribute *attr,
299 char *page)
300{
301 struct blk_integrity *bi = dev_to_bi(dev);
302
303 return sysfs_emit(page, "%u\n",
304 !!(bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE));
305}
306
307static DEVICE_ATTR_RO(format);
308static DEVICE_ATTR_RO(tag_size);
309static DEVICE_ATTR_RO(protection_interval_bytes);
310static DEVICE_ATTR_RW(read_verify);
311static DEVICE_ATTR_RW(write_generate);
312static DEVICE_ATTR_RO(device_is_integrity_capable);
313
314static struct attribute *integrity_attrs[] = {
315 &dev_attr_format.attr,
316 &dev_attr_tag_size.attr,
317 &dev_attr_protection_interval_bytes.attr,
318 &dev_attr_read_verify.attr,
319 &dev_attr_write_generate.attr,
320 &dev_attr_device_is_integrity_capable.attr,
321 NULL
322};
323
324const struct attribute_group blk_integrity_attr_group = {
325 .name = "integrity",
326 .attrs = integrity_attrs,
327};
328
329static blk_status_t blk_integrity_nop_fn(struct blk_integrity_iter *iter)
330{
331 return BLK_STS_OK;
332}
333
334static void blk_integrity_nop_prepare(struct request *rq)
335{
336}
337
338static void blk_integrity_nop_complete(struct request *rq,
339 unsigned int nr_bytes)
340{
341}
342
343static const struct blk_integrity_profile nop_profile = {
344 .name = "nop",
345 .generate_fn = blk_integrity_nop_fn,
346 .verify_fn = blk_integrity_nop_fn,
347 .prepare_fn = blk_integrity_nop_prepare,
348 .complete_fn = blk_integrity_nop_complete,
349};
350
351
352
353
354
355
356
357
358
359
360
361
362void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
363{
364 struct blk_integrity *bi = &disk->queue->integrity;
365
366 bi->flags = BLK_INTEGRITY_VERIFY | BLK_INTEGRITY_GENERATE |
367 template->flags;
368 bi->interval_exp = template->interval_exp ? :
369 ilog2(queue_logical_block_size(disk->queue));
370 bi->profile = template->profile ? template->profile : &nop_profile;
371 bi->tuple_size = template->tuple_size;
372 bi->tag_size = template->tag_size;
373
374 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, disk->queue);
375
376#ifdef CONFIG_BLK_INLINE_ENCRYPTION
377 if (disk->queue->crypto_profile) {
378 pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together. Disabling hardware inline encryption.\n");
379 disk->queue->crypto_profile = NULL;
380 }
381#endif
382}
383EXPORT_SYMBOL(blk_integrity_register);
384
385
386
387
388
389
390
391
392void blk_integrity_unregister(struct gendisk *disk)
393{
394 struct blk_integrity *bi = &disk->queue->integrity;
395
396 if (!bi->profile)
397 return;
398
399
400 blk_flush_integrity();
401 blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue);
402 memset(bi, 0, sizeof(*bi));
403}
404EXPORT_SYMBOL(blk_integrity_unregister);
405