1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66#include <linux/kernel.h>
67#include <linux/module.h>
68#include <linux/bio.h>
69#include <linux/blkdev.h>
70#include <linux/gfp.h>
71#include <linux/part_stat.h>
72
73#include "blk.h"
74#include "blk-mq.h"
75#include "blk-mq-sched.h"
76
77
78enum {
79 REQ_FSEQ_PREFLUSH = (1 << 0),
80 REQ_FSEQ_DATA = (1 << 1),
81 REQ_FSEQ_POSTFLUSH = (1 << 2),
82 REQ_FSEQ_DONE = (1 << 3),
83
84 REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
85 REQ_FSEQ_POSTFLUSH,
86
87
88
89
90
91 FLUSH_PENDING_TIMEOUT = 5 * HZ,
92};
93
94static void blk_kick_flush(struct request_queue *q,
95 struct blk_flush_queue *fq, blk_opf_t flags);
96
97static inline struct blk_flush_queue *
98blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
99{
100 return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
101}
102
103static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
104{
105 unsigned int policy = 0;
106
107 if (blk_rq_sectors(rq))
108 policy |= REQ_FSEQ_DATA;
109
110 if (fflags & (1UL << QUEUE_FLAG_WC)) {
111 if (rq->cmd_flags & REQ_PREFLUSH)
112 policy |= REQ_FSEQ_PREFLUSH;
113 if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
114 (rq->cmd_flags & REQ_FUA))
115 policy |= REQ_FSEQ_POSTFLUSH;
116 }
117 return policy;
118}
119
120static unsigned int blk_flush_cur_seq(struct request *rq)
121{
122 return 1 << ffz(rq->flush.seq);
123}
124
125static void blk_flush_restore_request(struct request *rq)
126{
127
128
129
130
131
132 rq->bio = rq->biotail;
133
134
135 rq->rq_flags &= ~RQF_FLUSH_SEQ;
136 rq->end_io = rq->flush.saved_end_io;
137}
138
139static void blk_account_io_flush(struct request *rq)
140{
141 struct block_device *part = rq->q->disk->part0;
142
143 part_stat_lock();
144 part_stat_inc(part, ios[STAT_FLUSH]);
145 part_stat_add(part, nsecs[STAT_FLUSH],
146 ktime_get_ns() - rq->start_time_ns);
147 part_stat_unlock();
148}
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163static void blk_flush_complete_seq(struct request *rq,
164 struct blk_flush_queue *fq,
165 unsigned int seq, blk_status_t error)
166{
167 struct request_queue *q = rq->q;
168 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
169 blk_opf_t cmd_flags;
170
171 BUG_ON(rq->flush.seq & seq);
172 rq->flush.seq |= seq;
173 cmd_flags = rq->cmd_flags;
174
175 if (likely(!error))
176 seq = blk_flush_cur_seq(rq);
177 else
178 seq = REQ_FSEQ_DONE;
179
180 switch (seq) {
181 case REQ_FSEQ_PREFLUSH:
182 case REQ_FSEQ_POSTFLUSH:
183
184 if (list_empty(pending))
185 fq->flush_pending_since = jiffies;
186 list_move_tail(&rq->queuelist, pending);
187 break;
188
189 case REQ_FSEQ_DATA:
190 fq->flush_data_in_flight++;
191 spin_lock(&q->requeue_lock);
192 list_move(&rq->queuelist, &q->requeue_list);
193 spin_unlock(&q->requeue_lock);
194 blk_mq_kick_requeue_list(q);
195 break;
196
197 case REQ_FSEQ_DONE:
198
199
200
201
202
203
204 list_del_init(&rq->queuelist);
205 blk_flush_restore_request(rq);
206 blk_mq_end_request(rq, error);
207 break;
208
209 default:
210 BUG();
211 }
212
213 blk_kick_flush(q, fq, cmd_flags);
214}
215
216static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
217 blk_status_t error)
218{
219 struct request_queue *q = flush_rq->q;
220 struct list_head *running;
221 struct request *rq, *n;
222 unsigned long flags = 0;
223 struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
224
225
226 spin_lock_irqsave(&fq->mq_flush_lock, flags);
227
228 if (!req_ref_put_and_test(flush_rq)) {
229 fq->rq_status = error;
230 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
231 return RQ_END_IO_NONE;
232 }
233
234 blk_account_io_flush(flush_rq);
235
236
237
238
239
240 WRITE_ONCE(flush_rq->state, MQ_RQ_IDLE);
241 if (fq->rq_status != BLK_STS_OK) {
242 error = fq->rq_status;
243 fq->rq_status = BLK_STS_OK;
244 }
245
246 if (!q->elevator) {
247 flush_rq->tag = BLK_MQ_NO_TAG;
248 } else {
249 blk_mq_put_driver_tag(flush_rq);
250 flush_rq->internal_tag = BLK_MQ_NO_TAG;
251 }
252
253 running = &fq->flush_queue[fq->flush_running_idx];
254 BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
255
256
257 fq->flush_running_idx ^= 1;
258
259
260 list_for_each_entry_safe(rq, n, running, queuelist) {
261 unsigned int seq = blk_flush_cur_seq(rq);
262
263 BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
264 blk_flush_complete_seq(rq, fq, seq, error);
265 }
266
267 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
268 return RQ_END_IO_NONE;
269}
270
271bool is_flush_rq(struct request *rq)
272{
273 return rq->end_io == flush_end_io;
274}
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
290 blk_opf_t flags)
291{
292 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
293 struct request *first_rq =
294 list_first_entry(pending, struct request, queuelist);
295 struct request *flush_rq = fq->flush_rq;
296
297
298 if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
299 return;
300
301
302 if (fq->flush_data_in_flight &&
303 time_before(jiffies,
304 fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
305 return;
306
307
308
309
310
311 fq->flush_pending_idx ^= 1;
312
313 blk_rq_init(q, flush_rq);
314
315
316
317
318
319
320
321
322
323 flush_rq->mq_ctx = first_rq->mq_ctx;
324 flush_rq->mq_hctx = first_rq->mq_hctx;
325
326 if (!q->elevator)
327 flush_rq->tag = first_rq->tag;
328 else
329 flush_rq->internal_tag = first_rq->internal_tag;
330
331 flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
332 flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
333 flush_rq->rq_flags |= RQF_FLUSH_SEQ;
334 flush_rq->end_io = flush_end_io;
335
336
337
338
339
340
341 smp_wmb();
342 req_ref_set(flush_rq, 1);
343
344 spin_lock(&q->requeue_lock);
345 list_add_tail(&flush_rq->queuelist, &q->flush_list);
346 spin_unlock(&q->requeue_lock);
347
348 blk_mq_kick_requeue_list(q);
349}
350
351static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
352 blk_status_t error)
353{
354 struct request_queue *q = rq->q;
355 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
356 struct blk_mq_ctx *ctx = rq->mq_ctx;
357 unsigned long flags;
358 struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
359
360 if (q->elevator) {
361 WARN_ON(rq->tag < 0);
362 blk_mq_put_driver_tag(rq);
363 }
364
365
366
367
368
369 spin_lock_irqsave(&fq->mq_flush_lock, flags);
370 fq->flush_data_in_flight--;
371
372
373
374
375 INIT_LIST_HEAD(&rq->queuelist);
376 blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
377 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
378
379 blk_mq_sched_restart(hctx);
380 return RQ_END_IO_NONE;
381}
382
383static void blk_rq_init_flush(struct request *rq)
384{
385 rq->flush.seq = 0;
386 rq->rq_flags |= RQF_FLUSH_SEQ;
387 rq->flush.saved_end_io = rq->end_io;
388 rq->end_io = mq_flush_data_end_io;
389}
390
391
392
393
394
395
396bool blk_insert_flush(struct request *rq)
397{
398 struct request_queue *q = rq->q;
399 unsigned long fflags = q->queue_flags;
400 unsigned int policy = blk_flush_policy(fflags, rq);
401 struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
402
403
404 WARN_ON_ONCE(rq->bio != rq->biotail);
405
406
407
408
409
410 rq->cmd_flags &= ~REQ_PREFLUSH;
411 if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
412 rq->cmd_flags &= ~REQ_FUA;
413
414
415
416
417
418
419 rq->cmd_flags |= REQ_SYNC;
420
421 switch (policy) {
422 case 0:
423
424
425
426
427
428
429 blk_mq_end_request(rq, 0);
430 return true;
431 case REQ_FSEQ_DATA:
432
433
434
435
436
437 return false;
438 case REQ_FSEQ_DATA | REQ_FSEQ_POSTFLUSH:
439
440
441
442
443 blk_rq_init_flush(rq);
444 rq->flush.seq |= REQ_FSEQ_PREFLUSH;
445 spin_lock_irq(&fq->mq_flush_lock);
446 fq->flush_data_in_flight++;
447 spin_unlock_irq(&fq->mq_flush_lock);
448 return false;
449 default:
450
451
452
453
454 blk_rq_init_flush(rq);
455 spin_lock_irq(&fq->mq_flush_lock);
456 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
457 spin_unlock_irq(&fq->mq_flush_lock);
458 return true;
459 }
460}
461
462
463
464
465
466
467
468
469int blkdev_issue_flush(struct block_device *bdev)
470{
471 struct bio bio;
472
473 bio_init(&bio, bdev, NULL, 0, REQ_OP_WRITE | REQ_PREFLUSH);
474 return submit_bio_wait(&bio);
475}
476EXPORT_SYMBOL(blkdev_issue_flush);
477
478struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
479 gfp_t flags)
480{
481 struct blk_flush_queue *fq;
482 int rq_sz = sizeof(struct request);
483
484 fq = kzalloc_node(sizeof(*fq), flags, node);
485 if (!fq)
486 goto fail;
487
488 spin_lock_init(&fq->mq_flush_lock);
489
490 rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
491 fq->flush_rq = kzalloc_node(rq_sz, flags, node);
492 if (!fq->flush_rq)
493 goto fail_rq;
494
495 INIT_LIST_HEAD(&fq->flush_queue[0]);
496 INIT_LIST_HEAD(&fq->flush_queue[1]);
497
498 return fq;
499
500 fail_rq:
501 kfree(fq);
502 fail:
503 return NULL;
504}
505
506void blk_free_flush_queue(struct blk_flush_queue *fq)
507{
508
509 if (!fq)
510 return;
511
512 kfree(fq->flush_rq);
513 kfree(fq);
514}
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
535 struct lock_class_key *key)
536{
537 lockdep_set_class(&hctx->fq->mq_flush_lock, key);
538}
539EXPORT_SYMBOL_GPL(blk_mq_hctx_set_fq_lock_class);
540