1
2
3
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <scsi/sg.h>
9
10#include "blk.h"
11
12int blk_rq_append_bio(struct request_queue *q, struct request *rq,
13 struct bio *bio)
14{
15 if (!rq->bio)
16 blk_rq_bio_prep(q, rq, bio);
17 else if (!ll_back_merge_fn(q, rq, bio))
18 return -EINVAL;
19 else {
20 rq->biotail->bi_next = bio;
21 rq->biotail = bio;
22
23 rq->data_len += bio->bi_size;
24 }
25 return 0;
26}
27EXPORT_SYMBOL(blk_rq_append_bio);
28
29static int __blk_rq_unmap_user(struct bio *bio)
30{
31 int ret = 0;
32
33 if (bio) {
34 if (bio_flagged(bio, BIO_USER_MAPPED))
35 bio_unmap_user(bio);
36 else
37 ret = bio_uncopy_user(bio);
38 }
39
40 return ret;
41}
42
43static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
44 struct rq_map_data *map_data, void __user *ubuf,
45 unsigned int len, gfp_t gfp_mask)
46{
47 unsigned long uaddr;
48 struct bio *bio, *orig_bio;
49 int reading, ret;
50
51 reading = rq_data_dir(rq) == READ;
52
53
54
55
56
57 uaddr = (unsigned long) ubuf;
58 if (blk_rq_aligned(q, ubuf, len) && !map_data)
59 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
60 else
61 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
62
63 if (IS_ERR(bio))
64 return PTR_ERR(bio);
65
66 if (map_data && map_data->null_mapped)
67 bio->bi_flags |= (1 << BIO_NULL_MAPPED);
68
69 orig_bio = bio;
70 blk_queue_bounce(q, &bio);
71
72
73
74
75
76 bio_get(bio);
77
78 ret = blk_rq_append_bio(q, rq, bio);
79 if (!ret)
80 return bio->bi_size;
81
82
83 bio_endio(bio, 0);
84 __blk_rq_unmap_user(orig_bio);
85 bio_put(bio);
86 return ret;
87}
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111int blk_rq_map_user(struct request_queue *q, struct request *rq,
112 struct rq_map_data *map_data, void __user *ubuf,
113 unsigned long len, gfp_t gfp_mask)
114{
115 unsigned long bytes_read = 0;
116 struct bio *bio = NULL;
117 int ret;
118
119 if (len > (q->max_hw_sectors << 9))
120 return -EINVAL;
121 if (!len)
122 return -EINVAL;
123
124 if (!ubuf && (!map_data || !map_data->null_mapped))
125 return -EINVAL;
126
127 while (bytes_read != len) {
128 unsigned long map_len, end, start;
129
130 map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
131 end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
132 >> PAGE_SHIFT;
133 start = (unsigned long)ubuf >> PAGE_SHIFT;
134
135
136
137
138
139
140 if (end - start > BIO_MAX_PAGES)
141 map_len -= PAGE_SIZE;
142
143 ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
144 gfp_mask);
145 if (ret < 0)
146 goto unmap_rq;
147 if (!bio)
148 bio = rq->bio;
149 bytes_read += ret;
150 ubuf += ret;
151
152 if (map_data)
153 map_data->offset += ret;
154 }
155
156 if (!bio_flagged(bio, BIO_USER_MAPPED))
157 rq->cmd_flags |= REQ_COPY_USER;
158
159 rq->buffer = rq->data = NULL;
160 return 0;
161unmap_rq:
162 blk_rq_unmap_user(bio);
163 rq->bio = NULL;
164 return ret;
165}
166EXPORT_SYMBOL(blk_rq_map_user);
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
192 struct rq_map_data *map_data, struct sg_iovec *iov,
193 int iov_count, unsigned int len, gfp_t gfp_mask)
194{
195 struct bio *bio;
196 int i, read = rq_data_dir(rq) == READ;
197 int unaligned = 0;
198
199 if (!iov || iov_count <= 0)
200 return -EINVAL;
201
202 for (i = 0; i < iov_count; i++) {
203 unsigned long uaddr = (unsigned long)iov[i].iov_base;
204
205 if (uaddr & queue_dma_alignment(q)) {
206 unaligned = 1;
207 break;
208 }
209 }
210
211 if (unaligned || (q->dma_pad_mask & len) || map_data)
212 bio = bio_copy_user_iov(q, map_data, iov, iov_count, read,
213 gfp_mask);
214 else
215 bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask);
216
217 if (IS_ERR(bio))
218 return PTR_ERR(bio);
219
220 if (bio->bi_size != len) {
221
222
223
224
225
226 bio_get(bio);
227 bio_endio(bio, 0);
228 __blk_rq_unmap_user(bio);
229 return -EINVAL;
230 }
231
232 if (!bio_flagged(bio, BIO_USER_MAPPED))
233 rq->cmd_flags |= REQ_COPY_USER;
234
235 blk_queue_bounce(q, &bio);
236 bio_get(bio);
237 blk_rq_bio_prep(q, rq, bio);
238 rq->buffer = rq->data = NULL;
239 return 0;
240}
241EXPORT_SYMBOL(blk_rq_map_user_iov);
242
243
244
245
246
247
248
249
250
251
252int blk_rq_unmap_user(struct bio *bio)
253{
254 struct bio *mapped_bio;
255 int ret = 0, ret2;
256
257 while (bio) {
258 mapped_bio = bio;
259 if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
260 mapped_bio = bio->bi_private;
261
262 ret2 = __blk_rq_unmap_user(mapped_bio);
263 if (ret2 && !ret)
264 ret = ret2;
265
266 mapped_bio = bio;
267 bio = bio->bi_next;
268 bio_put(mapped_bio);
269 }
270
271 return ret;
272}
273EXPORT_SYMBOL(blk_rq_unmap_user);
274
275
276
277
278
279
280
281
282
283
284
285
286
287int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
288 unsigned int len, gfp_t gfp_mask)
289{
290 int reading = rq_data_dir(rq) == READ;
291 int do_copy = 0;
292 struct bio *bio;
293
294 if (len > (q->max_hw_sectors << 9))
295 return -EINVAL;
296 if (!len || !kbuf)
297 return -EINVAL;
298
299 do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
300 if (do_copy)
301 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
302 else
303 bio = bio_map_kern(q, kbuf, len, gfp_mask);
304
305 if (IS_ERR(bio))
306 return PTR_ERR(bio);
307
308 if (rq_data_dir(rq) == WRITE)
309 bio->bi_rw |= (1 << BIO_RW);
310
311 if (do_copy)
312 rq->cmd_flags |= REQ_COPY_USER;
313
314 blk_rq_bio_prep(q, rq, bio);
315 blk_queue_bounce(q, &rq->bio);
316 rq->buffer = rq->data = NULL;
317 return 0;
318}
319EXPORT_SYMBOL(blk_rq_map_kern);
320