1
2
3
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/init.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/bootmem.h>
10#include <linux/slab.h>
11
12#include "blk.h"
13
14
15
16
17static struct kmem_cache *iocontext_cachep;
18
19
20
21
22
23
24
25void get_io_context(struct io_context *ioc)
26{
27 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
28 atomic_long_inc(&ioc->refcount);
29}
30EXPORT_SYMBOL(get_io_context);
31
32static void icq_free_icq_rcu(struct rcu_head *head)
33{
34 struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
35
36 kmem_cache_free(icq->__rcu_icq_cache, icq);
37}
38
39
40static void ioc_exit_icq(struct io_cq *icq)
41{
42 struct elevator_type *et = icq->q->elevator->type;
43
44 if (icq->flags & ICQ_EXITED)
45 return;
46
47 if (et->ops.elevator_exit_icq_fn)
48 et->ops.elevator_exit_icq_fn(icq);
49
50 icq->flags |= ICQ_EXITED;
51}
52
53
54static void ioc_destroy_icq(struct io_cq *icq)
55{
56 struct io_context *ioc = icq->ioc;
57 struct request_queue *q = icq->q;
58 struct elevator_type *et = q->elevator->type;
59
60 lockdep_assert_held(&ioc->lock);
61 lockdep_assert_held(q->queue_lock);
62
63 radix_tree_delete(&ioc->icq_tree, icq->q->id);
64 hlist_del_init(&icq->ioc_node);
65 list_del_init(&icq->q_node);
66
67
68
69
70
71
72 if (rcu_dereference_raw(ioc->icq_hint) == icq)
73 rcu_assign_pointer(ioc->icq_hint, NULL);
74
75 ioc_exit_icq(icq);
76
77
78
79
80
81 icq->__rcu_icq_cache = et->icq_cache;
82 call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
83}
84
85
86
87
88
89static void ioc_release_fn(struct work_struct *work)
90{
91 struct io_context *ioc = container_of(work, struct io_context,
92 release_work);
93 unsigned long flags;
94
95
96
97
98
99
100
101 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
102
103 while (!hlist_empty(&ioc->icq_list)) {
104 struct io_cq *icq = hlist_entry(ioc->icq_list.first,
105 struct io_cq, ioc_node);
106 struct request_queue *q = icq->q;
107
108 if (spin_trylock(q->queue_lock)) {
109 ioc_destroy_icq(icq);
110 spin_unlock(q->queue_lock);
111 } else {
112 spin_unlock_irqrestore(&ioc->lock, flags);
113 cpu_relax();
114 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
115 }
116 }
117
118 spin_unlock_irqrestore(&ioc->lock, flags);
119
120 kmem_cache_free(iocontext_cachep, ioc);
121}
122
123
124
125
126
127
128
129
130void put_io_context(struct io_context *ioc)
131{
132 unsigned long flags;
133 bool free_ioc = false;
134
135 if (ioc == NULL)
136 return;
137
138 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
139
140
141
142
143
144 if (atomic_long_dec_and_test(&ioc->refcount)) {
145 spin_lock_irqsave(&ioc->lock, flags);
146 if (!hlist_empty(&ioc->icq_list))
147 schedule_work(&ioc->release_work);
148 else
149 free_ioc = true;
150 spin_unlock_irqrestore(&ioc->lock, flags);
151 }
152
153 if (free_ioc)
154 kmem_cache_free(iocontext_cachep, ioc);
155}
156EXPORT_SYMBOL(put_io_context);
157
158
159void exit_io_context(struct task_struct *task)
160{
161 struct io_context *ioc;
162 struct io_cq *icq;
163 struct hlist_node *n;
164 unsigned long flags;
165
166 task_lock(task);
167 ioc = task->io_context;
168 task->io_context = NULL;
169 task_unlock(task);
170
171 if (!atomic_dec_and_test(&ioc->nr_tasks)) {
172 put_io_context(ioc);
173 return;
174 }
175
176
177
178
179
180
181retry:
182 spin_lock_irqsave_nested(&ioc->lock, flags, 1);
183 hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) {
184 if (icq->flags & ICQ_EXITED)
185 continue;
186 if (spin_trylock(icq->q->queue_lock)) {
187 ioc_exit_icq(icq);
188 spin_unlock(icq->q->queue_lock);
189 } else {
190 spin_unlock_irqrestore(&ioc->lock, flags);
191 cpu_relax();
192 goto retry;
193 }
194 }
195 spin_unlock_irqrestore(&ioc->lock, flags);
196
197 put_io_context(ioc);
198}
199
200
201
202
203
204
205
206void ioc_clear_queue(struct request_queue *q)
207{
208 lockdep_assert_held(q->queue_lock);
209
210 while (!list_empty(&q->icq_list)) {
211 struct io_cq *icq = list_entry(q->icq_list.next,
212 struct io_cq, q_node);
213 struct io_context *ioc = icq->ioc;
214
215 spin_lock(&ioc->lock);
216 ioc_destroy_icq(icq);
217 spin_unlock(&ioc->lock);
218 }
219}
220
221void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
222 int node)
223{
224 struct io_context *ioc;
225
226 ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
227 node);
228 if (unlikely(!ioc))
229 return;
230
231
232 atomic_long_set(&ioc->refcount, 1);
233 atomic_set(&ioc->nr_tasks, 1);
234 spin_lock_init(&ioc->lock);
235 INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
236 INIT_HLIST_HEAD(&ioc->icq_list);
237 INIT_WORK(&ioc->release_work, ioc_release_fn);
238
239
240
241
242
243
244
245
246 task_lock(task);
247 if (!task->io_context &&
248 (task == current || !(task->flags & PF_EXITING)))
249 task->io_context = ioc;
250 else
251 kmem_cache_free(iocontext_cachep, ioc);
252 task_unlock(task);
253}
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268struct io_context *get_task_io_context(struct task_struct *task,
269 gfp_t gfp_flags, int node)
270{
271 struct io_context *ioc;
272
273 might_sleep_if(gfp_flags & __GFP_WAIT);
274
275 do {
276 task_lock(task);
277 ioc = task->io_context;
278 if (likely(ioc)) {
279 get_io_context(ioc);
280 task_unlock(task);
281 return ioc;
282 }
283 task_unlock(task);
284 } while (create_io_context(task, gfp_flags, node));
285
286 return NULL;
287}
288EXPORT_SYMBOL(get_task_io_context);
289
290
291
292
293
294
295
296
297
298struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
299{
300 struct io_cq *icq;
301
302 lockdep_assert_held(q->queue_lock);
303
304
305
306
307
308
309
310 rcu_read_lock();
311 icq = rcu_dereference(ioc->icq_hint);
312 if (icq && icq->q == q)
313 goto out;
314
315 icq = radix_tree_lookup(&ioc->icq_tree, q->id);
316 if (icq && icq->q == q)
317 rcu_assign_pointer(ioc->icq_hint, icq);
318 else
319 icq = NULL;
320out:
321 rcu_read_unlock();
322 return icq;
323}
324EXPORT_SYMBOL(ioc_lookup_icq);
325
326
327
328
329
330
331
332
333
334
335
336
337struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
338{
339 struct elevator_type *et = q->elevator->type;
340 struct io_context *ioc;
341 struct io_cq *icq;
342
343
344 ioc = create_io_context(current, gfp_mask, q->node);
345 if (!ioc)
346 return NULL;
347
348 icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
349 q->node);
350 if (!icq)
351 return NULL;
352
353 if (radix_tree_preload(gfp_mask) < 0) {
354 kmem_cache_free(et->icq_cache, icq);
355 return NULL;
356 }
357
358 icq->ioc = ioc;
359 icq->q = q;
360 INIT_LIST_HEAD(&icq->q_node);
361 INIT_HLIST_NODE(&icq->ioc_node);
362
363
364 spin_lock_irq(q->queue_lock);
365 spin_lock(&ioc->lock);
366
367 if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
368 hlist_add_head(&icq->ioc_node, &ioc->icq_list);
369 list_add(&icq->q_node, &q->icq_list);
370 if (et->ops.elevator_init_icq_fn)
371 et->ops.elevator_init_icq_fn(icq);
372 } else {
373 kmem_cache_free(et->icq_cache, icq);
374 icq = ioc_lookup_icq(ioc, q);
375 if (!icq)
376 printk(KERN_ERR "cfq: icq link failed!\n");
377 }
378
379 spin_unlock(&ioc->lock);
380 spin_unlock_irq(q->queue_lock);
381 radix_tree_preload_end();
382 return icq;
383}
384
385void ioc_set_icq_flags(struct io_context *ioc, unsigned int flags)
386{
387 struct io_cq *icq;
388 struct hlist_node *n;
389
390 hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node)
391 icq->flags |= flags;
392}
393
394
395
396
397
398
399
400
401
402
403void ioc_ioprio_changed(struct io_context *ioc, int ioprio)
404{
405 unsigned long flags;
406
407 spin_lock_irqsave(&ioc->lock, flags);
408 ioc->ioprio = ioprio;
409 ioc_set_icq_flags(ioc, ICQ_IOPRIO_CHANGED);
410 spin_unlock_irqrestore(&ioc->lock, flags);
411}
412
413
414
415
416
417
418
419
420
421void ioc_cgroup_changed(struct io_context *ioc)
422{
423 unsigned long flags;
424
425 spin_lock_irqsave(&ioc->lock, flags);
426 ioc_set_icq_flags(ioc, ICQ_CGROUP_CHANGED);
427 spin_unlock_irqrestore(&ioc->lock, flags);
428}
429EXPORT_SYMBOL(ioc_cgroup_changed);
430
431
432
433
434
435
436
437
438unsigned icq_get_changed(struct io_cq *icq)
439{
440 unsigned int changed = 0;
441 unsigned long flags;
442
443 if (unlikely(icq->flags & ICQ_CHANGED_MASK)) {
444 spin_lock_irqsave(&icq->ioc->lock, flags);
445 changed = icq->flags & ICQ_CHANGED_MASK;
446 icq->flags &= ~ICQ_CHANGED_MASK;
447 spin_unlock_irqrestore(&icq->ioc->lock, flags);
448 }
449 return changed;
450}
451EXPORT_SYMBOL(icq_get_changed);
452
453static int __init blk_ioc_init(void)
454{
455 iocontext_cachep = kmem_cache_create("blkdev_ioc",
456 sizeof(struct io_context), 0, SLAB_PANIC, NULL);
457 return 0;
458}
459subsys_initcall(blk_ioc_init);
460