1
2
3
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/slab.h>
9
10#include "blk.h"
11
12
13
14
15
16
17
18
19
20
21
22
23struct request *blk_queue_find_tag(struct request_queue *q, int tag)
24{
25 return blk_map_queue_find_tag(q->queue_tags, tag);
26}
27EXPORT_SYMBOL(blk_queue_find_tag);
28
29
30
31
32
33
34
35
36static int __blk_free_tags(struct blk_queue_tag *bqt)
37{
38 int retval;
39
40 retval = atomic_dec_and_test(&bqt->refcnt);
41 if (retval) {
42 BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
43 bqt->max_depth);
44
45 kfree(bqt->tag_index);
46 bqt->tag_index = NULL;
47
48 kfree(bqt->tag_map);
49 bqt->tag_map = NULL;
50
51 kfree(bqt);
52 }
53
54 return retval;
55}
56
57
58
59
60
61
62
63
64
65void __blk_queue_free_tags(struct request_queue *q)
66{
67 struct blk_queue_tag *bqt = q->queue_tags;
68
69 if (!bqt)
70 return;
71
72 __blk_free_tags(bqt);
73
74 q->queue_tags = NULL;
75 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
76}
77
78
79
80
81
82
83
84
85
86void blk_free_tags(struct blk_queue_tag *bqt)
87{
88 if (unlikely(!__blk_free_tags(bqt)))
89 BUG();
90}
91EXPORT_SYMBOL(blk_free_tags);
92
93
94
95
96
97
98
99
100
101void blk_queue_free_tags(struct request_queue *q)
102{
103 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
104}
105EXPORT_SYMBOL(blk_queue_free_tags);
106
107static int
108init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
109{
110 struct request **tag_index;
111 unsigned long *tag_map;
112 int nr_ulongs;
113
114 if (q && depth > q->nr_requests * 2) {
115 depth = q->nr_requests * 2;
116 printk(KERN_ERR "%s: adjusted depth to %d\n",
117 __func__, depth);
118 }
119
120 tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
121 if (!tag_index)
122 goto fail;
123
124 nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
125 tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
126 if (!tag_map)
127 goto fail;
128
129 tags->real_max_depth = depth;
130 tags->max_depth = depth;
131 tags->tag_index = tag_index;
132 tags->tag_map = tag_map;
133
134 return 0;
135fail:
136 kfree(tag_index);
137 return -ENOMEM;
138}
139
140static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
141 int depth)
142{
143 struct blk_queue_tag *tags;
144
145 tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
146 if (!tags)
147 goto fail;
148
149 if (init_tag_map(q, tags, depth))
150 goto fail;
151
152 atomic_set(&tags->refcnt, 1);
153 return tags;
154fail:
155 kfree(tags);
156 return NULL;
157}
158
159
160
161
162
163struct blk_queue_tag *blk_init_tags(int depth)
164{
165 return __blk_queue_init_tags(NULL, depth);
166}
167EXPORT_SYMBOL(blk_init_tags);
168
169
170
171
172
173
174
175
176
177
178int blk_queue_init_tags(struct request_queue *q, int depth,
179 struct blk_queue_tag *tags)
180{
181 int rc;
182
183 BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
184
185 if (!tags && !q->queue_tags) {
186 tags = __blk_queue_init_tags(q, depth);
187
188 if (!tags)
189 goto fail;
190 } else if (q->queue_tags) {
191 rc = blk_queue_resize_tags(q, depth);
192 if (rc)
193 return rc;
194 queue_flag_set(QUEUE_FLAG_QUEUED, q);
195 return 0;
196 } else
197 atomic_inc(&tags->refcnt);
198
199
200
201
202 q->queue_tags = tags;
203 queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
204 INIT_LIST_HEAD(&q->tag_busy_list);
205 return 0;
206fail:
207 kfree(tags);
208 return -ENOMEM;
209}
210EXPORT_SYMBOL(blk_queue_init_tags);
211
212
213
214
215
216
217
218
219
220int blk_queue_resize_tags(struct request_queue *q, int new_depth)
221{
222 struct blk_queue_tag *bqt = q->queue_tags;
223 struct request **tag_index;
224 unsigned long *tag_map;
225 int max_depth, nr_ulongs;
226
227 if (!bqt)
228 return -ENXIO;
229
230
231
232
233
234
235
236 if (new_depth <= bqt->real_max_depth) {
237 bqt->max_depth = new_depth;
238 return 0;
239 }
240
241
242
243
244
245 if (atomic_read(&bqt->refcnt) != 1)
246 return -EBUSY;
247
248
249
250
251 tag_index = bqt->tag_index;
252 tag_map = bqt->tag_map;
253 max_depth = bqt->real_max_depth;
254
255 if (init_tag_map(q, bqt, new_depth))
256 return -ENOMEM;
257
258 memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
259 nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
260 memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
261
262 kfree(tag_index);
263 kfree(tag_map);
264 return 0;
265}
266EXPORT_SYMBOL(blk_queue_resize_tags);
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282void blk_queue_end_tag(struct request_queue *q, struct request *rq)
283{
284 struct blk_queue_tag *bqt = q->queue_tags;
285 unsigned tag = rq->tag;
286
287 BUG_ON(tag >= bqt->real_max_depth);
288
289 list_del_init(&rq->queuelist);
290 rq->cmd_flags &= ~REQ_QUEUED;
291 rq->tag = -1;
292
293 if (unlikely(bqt->tag_index[tag] == NULL))
294 printk(KERN_ERR "%s: tag %d is missing\n",
295 __func__, tag);
296
297 bqt->tag_index[tag] = NULL;
298
299 if (unlikely(!test_bit(tag, bqt->tag_map))) {
300 printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
301 __func__, tag);
302 return;
303 }
304
305
306
307
308 clear_bit_unlock(tag, bqt->tag_map);
309}
310EXPORT_SYMBOL(blk_queue_end_tag);
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330int blk_queue_start_tag(struct request_queue *q, struct request *rq)
331{
332 struct blk_queue_tag *bqt = q->queue_tags;
333 unsigned max_depth;
334 int tag;
335
336 if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
337 printk(KERN_ERR
338 "%s: request %p for device [%s] already tagged %d",
339 __func__, rq,
340 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
341 BUG();
342 }
343
344
345
346
347
348
349
350
351 max_depth = bqt->max_depth;
352 if (!rq_is_sync(rq) && max_depth > 1) {
353 max_depth -= 2;
354 if (!max_depth)
355 max_depth = 1;
356 if (q->in_flight[BLK_RW_ASYNC] > max_depth)
357 return 1;
358 }
359
360 do {
361 tag = find_first_zero_bit(bqt->tag_map, max_depth);
362 if (tag >= max_depth)
363 return 1;
364
365 } while (test_and_set_bit_lock(tag, bqt->tag_map));
366
367
368
369
370
371 rq->cmd_flags |= REQ_QUEUED;
372 rq->tag = tag;
373 bqt->tag_index[tag] = rq;
374 blk_start_request(rq);
375 list_add(&rq->queuelist, &q->tag_busy_list);
376 return 0;
377}
378EXPORT_SYMBOL(blk_queue_start_tag);
379
380
381
382
383
384
385
386
387
388
389
390
391
392void blk_queue_invalidate_tags(struct request_queue *q)
393{
394 struct list_head *tmp, *n;
395
396 list_for_each_safe(tmp, n, &q->tag_busy_list)
397 blk_requeue_request(q, list_entry_rq(tmp));
398}
399EXPORT_SYMBOL(blk_queue_invalidate_tags);
400