1
2
3
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <scsi/sg.h>
9
10#include "blk.h"
11
12int blk_rq_append_bio(struct request_queue *q, struct request *rq,
13 struct bio *bio)
14{
15 if (!rq->bio)
16 blk_rq_bio_prep(q, rq, bio);
17 else if (!ll_back_merge_fn(q, rq, bio))
18 return -EINVAL;
19 else {
20 rq->biotail->bi_next = bio;
21 rq->biotail = bio;
22
23 rq->data_len += bio->bi_size;
24 }
25 return 0;
26}
27EXPORT_SYMBOL(blk_rq_append_bio);
28
29static int __blk_rq_unmap_user(struct bio *bio)
30{
31 int ret = 0;
32
33 if (bio) {
34 if (bio_flagged(bio, BIO_USER_MAPPED))
35 bio_unmap_user(bio);
36 else
37 ret = bio_uncopy_user(bio);
38 }
39
40 return ret;
41}
42
43static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
44 void __user *ubuf, unsigned int len)
45{
46 unsigned long uaddr;
47 unsigned int alignment;
48 struct bio *bio, *orig_bio;
49 int reading, ret;
50
51 reading = rq_data_dir(rq) == READ;
52
53
54
55
56
57 uaddr = (unsigned long) ubuf;
58 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
59 if (!(uaddr & alignment) && !(len & alignment))
60 bio = bio_map_user(q, NULL, uaddr, len, reading);
61 else
62 bio = bio_copy_user(q, uaddr, len, reading);
63
64 if (IS_ERR(bio))
65 return PTR_ERR(bio);
66
67 orig_bio = bio;
68 blk_queue_bounce(q, &bio);
69
70
71
72
73
74 bio_get(bio);
75
76 ret = blk_rq_append_bio(q, rq, bio);
77 if (!ret)
78 return bio->bi_size;
79
80
81 bio_endio(bio, 0);
82 __blk_rq_unmap_user(orig_bio);
83 bio_put(bio);
84 return ret;
85}
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107int blk_rq_map_user(struct request_queue *q, struct request *rq,
108 void __user *ubuf, unsigned long len)
109{
110 unsigned long bytes_read = 0;
111 struct bio *bio = NULL;
112 int ret;
113
114 if (len > (q->max_hw_sectors << 9))
115 return -EINVAL;
116 if (!len || !ubuf)
117 return -EINVAL;
118
119 while (bytes_read != len) {
120 unsigned long map_len, end, start;
121
122 map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
123 end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
124 >> PAGE_SHIFT;
125 start = (unsigned long)ubuf >> PAGE_SHIFT;
126
127
128
129
130
131
132 if (end - start > BIO_MAX_PAGES)
133 map_len -= PAGE_SIZE;
134
135 ret = __blk_rq_map_user(q, rq, ubuf, map_len);
136 if (ret < 0)
137 goto unmap_rq;
138 if (!bio)
139 bio = rq->bio;
140 bytes_read += ret;
141 ubuf += ret;
142 }
143
144 if (!bio_flagged(bio, BIO_USER_MAPPED))
145 rq->cmd_flags |= REQ_COPY_USER;
146
147 rq->buffer = rq->data = NULL;
148 return 0;
149unmap_rq:
150 blk_rq_unmap_user(bio);
151 rq->bio = NULL;
152 return ret;
153}
154EXPORT_SYMBOL(blk_rq_map_user);
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
178 struct sg_iovec *iov, int iov_count, unsigned int len)
179{
180 struct bio *bio;
181 int i, read = rq_data_dir(rq) == READ;
182 int unaligned = 0;
183
184 if (!iov || iov_count <= 0)
185 return -EINVAL;
186
187 for (i = 0; i < iov_count; i++) {
188 unsigned long uaddr = (unsigned long)iov[i].iov_base;
189
190 if (uaddr & queue_dma_alignment(q)) {
191 unaligned = 1;
192 break;
193 }
194 if (!iov[i].iov_len)
195 return -EINVAL;
196 }
197
198 if (unaligned || (q->dma_pad_mask & len))
199 bio = bio_copy_user_iov(q, iov, iov_count, read);
200 else
201 bio = bio_map_user_iov(q, NULL, iov, iov_count, read);
202
203 if (IS_ERR(bio))
204 return PTR_ERR(bio);
205
206 if (bio->bi_size != len) {
207 bio_endio(bio, 0);
208 bio_unmap_user(bio);
209 return -EINVAL;
210 }
211
212 if (!bio_flagged(bio, BIO_USER_MAPPED))
213 rq->cmd_flags |= REQ_COPY_USER;
214
215 blk_queue_bounce(q, &bio);
216 bio_get(bio);
217 blk_rq_bio_prep(q, rq, bio);
218 rq->buffer = rq->data = NULL;
219 return 0;
220}
221
222
223
224
225
226
227
228
229
230
231int blk_rq_unmap_user(struct bio *bio)
232{
233 struct bio *mapped_bio;
234 int ret = 0, ret2;
235
236 while (bio) {
237 mapped_bio = bio;
238 if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
239 mapped_bio = bio->bi_private;
240
241 ret2 = __blk_rq_unmap_user(mapped_bio);
242 if (ret2 && !ret)
243 ret = ret2;
244
245 mapped_bio = bio;
246 bio = bio->bi_next;
247 bio_put(mapped_bio);
248 }
249
250 return ret;
251}
252EXPORT_SYMBOL(blk_rq_unmap_user);
253
254
255
256
257
258
259
260
261
262
263
264
265
266int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
267 unsigned int len, gfp_t gfp_mask)
268{
269 unsigned long kaddr;
270 unsigned int alignment;
271 int reading = rq_data_dir(rq) == READ;
272 int do_copy = 0;
273 struct bio *bio;
274
275 if (len > (q->max_hw_sectors << 9))
276 return -EINVAL;
277 if (!len || !kbuf)
278 return -EINVAL;
279
280 kaddr = (unsigned long)kbuf;
281 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
282 do_copy = ((kaddr & alignment) || (len & alignment) ||
283 object_is_on_stack(kbuf));
284
285 if (do_copy)
286 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
287 else
288 bio = bio_map_kern(q, kbuf, len, gfp_mask);
289
290 if (IS_ERR(bio))
291 return PTR_ERR(bio);
292
293 if (rq_data_dir(rq) == WRITE)
294 bio->bi_rw |= (1 << BIO_RW);
295
296 if (do_copy)
297 rq->cmd_flags |= REQ_COPY_USER;
298
299 blk_rq_bio_prep(q, rq, bio);
300 blk_queue_bounce(q, &rq->bio);
301 rq->buffer = rq->data = NULL;
302 return 0;
303}
304EXPORT_SYMBOL(blk_rq_map_kern);
305