linux/block/blk-sysfs.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Functions related to sysfs handling
   4 */
   5#include <linux/kernel.h>
   6#include <linux/slab.h>
   7#include <linux/module.h>
   8#include <linux/bio.h>
   9#include <linux/blkdev.h>
  10#include <linux/backing-dev.h>
  11#include <linux/blktrace_api.h>
  12#include <linux/debugfs.h>
  13
  14#include "blk.h"
  15#include "blk-mq.h"
  16#include "blk-mq-debugfs.h"
  17#include "blk-mq-sched.h"
  18#include "blk-rq-qos.h"
  19#include "blk-wbt.h"
  20#include "blk-cgroup.h"
  21#include "blk-throttle.h"
  22
  23struct queue_sysfs_entry {
  24        struct attribute attr;
  25        ssize_t (*show)(struct request_queue *, char *);
  26        ssize_t (*store)(struct request_queue *, const char *, size_t);
  27};
  28
  29static ssize_t
  30queue_var_show(unsigned long var, char *page)
  31{
  32        return sprintf(page, "%lu\n", var);
  33}
  34
  35static ssize_t
  36queue_var_store(unsigned long *var, const char *page, size_t count)
  37{
  38        int err;
  39        unsigned long v;
  40
  41        err = kstrtoul(page, 10, &v);
  42        if (err || v > UINT_MAX)
  43                return -EINVAL;
  44
  45        *var = v;
  46
  47        return count;
  48}
  49
  50static ssize_t queue_requests_show(struct request_queue *q, char *page)
  51{
  52        return queue_var_show(q->nr_requests, page);
  53}
  54
  55static ssize_t
  56queue_requests_store(struct request_queue *q, const char *page, size_t count)
  57{
  58        unsigned long nr;
  59        int ret, err;
  60
  61        if (!queue_is_mq(q))
  62                return -EINVAL;
  63
  64        ret = queue_var_store(&nr, page, count);
  65        if (ret < 0)
  66                return ret;
  67
  68        if (nr < BLKDEV_MIN_RQ)
  69                nr = BLKDEV_MIN_RQ;
  70
  71        err = blk_mq_update_nr_requests(q, nr);
  72        if (err)
  73                return err;
  74
  75        return ret;
  76}
  77
  78static ssize_t queue_ra_show(struct request_queue *q, char *page)
  79{
  80        unsigned long ra_kb;
  81
  82        if (!q->disk)
  83                return -EINVAL;
  84        ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10);
  85        return queue_var_show(ra_kb, page);
  86}
  87
  88static ssize_t
  89queue_ra_store(struct request_queue *q, const char *page, size_t count)
  90{
  91        unsigned long ra_kb;
  92        ssize_t ret;
  93
  94        if (!q->disk)
  95                return -EINVAL;
  96        ret = queue_var_store(&ra_kb, page, count);
  97        if (ret < 0)
  98                return ret;
  99        q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
 100        return ret;
 101}
 102
 103static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
 104{
 105        int max_sectors_kb = queue_max_sectors(q) >> 1;
 106
 107        return queue_var_show(max_sectors_kb, page);
 108}
 109
 110static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
 111{
 112        return queue_var_show(queue_max_segments(q), page);
 113}
 114
 115static ssize_t queue_max_discard_segments_show(struct request_queue *q,
 116                char *page)
 117{
 118        return queue_var_show(queue_max_discard_segments(q), page);
 119}
 120
 121static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
 122{
 123        return queue_var_show(q->limits.max_integrity_segments, page);
 124}
 125
 126static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
 127{
 128        return queue_var_show(queue_max_segment_size(q), page);
 129}
 130
 131static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
 132{
 133        return queue_var_show(queue_logical_block_size(q), page);
 134}
 135
 136static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
 137{
 138        return queue_var_show(queue_physical_block_size(q), page);
 139}
 140
 141static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
 142{
 143        return queue_var_show(q->limits.chunk_sectors, page);
 144}
 145
 146static ssize_t queue_io_min_show(struct request_queue *q, char *page)
 147{
 148        return queue_var_show(queue_io_min(q), page);
 149}
 150
 151static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
 152{
 153        return queue_var_show(queue_io_opt(q), page);
 154}
 155
 156static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
 157{
 158        return queue_var_show(q->limits.discard_granularity, page);
 159}
 160
 161static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
 162{
 163
 164        return sprintf(page, "%llu\n",
 165                (unsigned long long)q->limits.max_hw_discard_sectors << 9);
 166}
 167
 168static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
 169{
 170        return sprintf(page, "%llu\n",
 171                       (unsigned long long)q->limits.max_discard_sectors << 9);
 172}
 173
 174static ssize_t queue_discard_max_store(struct request_queue *q,
 175                                       const char *page, size_t count)
 176{
 177        unsigned long max_discard;
 178        ssize_t ret = queue_var_store(&max_discard, page, count);
 179
 180        if (ret < 0)
 181                return ret;
 182
 183        if (max_discard & (q->limits.discard_granularity - 1))
 184                return -EINVAL;
 185
 186        max_discard >>= 9;
 187        if (max_discard > UINT_MAX)
 188                return -EINVAL;
 189
 190        if (max_discard > q->limits.max_hw_discard_sectors)
 191                max_discard = q->limits.max_hw_discard_sectors;
 192
 193        q->limits.max_discard_sectors = max_discard;
 194        return ret;
 195}
 196
 197static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
 198{
 199        return queue_var_show(0, page);
 200}
 201
 202static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
 203{
 204        return queue_var_show(0, page);
 205}
 206
 207static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
 208{
 209        return sprintf(page, "%llu\n",
 210                (unsigned long long)q->limits.max_write_zeroes_sectors << 9);
 211}
 212
 213static ssize_t queue_zone_write_granularity_show(struct request_queue *q,
 214                                                 char *page)
 215{
 216        return queue_var_show(queue_zone_write_granularity(q), page);
 217}
 218
 219static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
 220{
 221        unsigned long long max_sectors = q->limits.max_zone_append_sectors;
 222
 223        return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT);
 224}
 225
 226static ssize_t
 227queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
 228{
 229        unsigned long var;
 230        unsigned int max_sectors_kb,
 231                max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
 232                        page_kb = 1 << (PAGE_SHIFT - 10);
 233        ssize_t ret = queue_var_store(&var, page, count);
 234
 235        if (ret < 0)
 236                return ret;
 237
 238        max_sectors_kb = (unsigned int)var;
 239        max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb,
 240                                         q->limits.max_dev_sectors >> 1);
 241        if (max_sectors_kb == 0) {
 242                q->limits.max_user_sectors = 0;
 243                max_sectors_kb = min(max_hw_sectors_kb,
 244                                     BLK_DEF_MAX_SECTORS >> 1);
 245        } else {
 246                if (max_sectors_kb > max_hw_sectors_kb ||
 247                    max_sectors_kb < page_kb)
 248                        return -EINVAL;
 249                q->limits.max_user_sectors = max_sectors_kb << 1;
 250        }
 251
 252        spin_lock_irq(&q->queue_lock);
 253        q->limits.max_sectors = max_sectors_kb << 1;
 254        if (q->disk)
 255                q->disk->bdi->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
 256        spin_unlock_irq(&q->queue_lock);
 257
 258        return ret;
 259}
 260
 261static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
 262{
 263        int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
 264
 265        return queue_var_show(max_hw_sectors_kb, page);
 266}
 267
 268static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page)
 269{
 270        return queue_var_show(q->limits.virt_boundary_mask, page);
 271}
 272
 273static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page)
 274{
 275        return queue_var_show(queue_dma_alignment(q), page);
 276}
 277
 278#define QUEUE_SYSFS_BIT_FNS(name, flag, neg)                            \
 279static ssize_t                                                          \
 280queue_##name##_show(struct request_queue *q, char *page)                \
 281{                                                                       \
 282        int bit;                                                        \
 283        bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags);             \
 284        return queue_var_show(neg ? !bit : bit, page);                  \
 285}                                                                       \
 286static ssize_t                                                          \
 287queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
 288{                                                                       \
 289        unsigned long val;                                              \
 290        ssize_t ret;                                                    \
 291        ret = queue_var_store(&val, page, count);                       \
 292        if (ret < 0)                                                    \
 293                 return ret;                                            \
 294        if (neg)                                                        \
 295                val = !val;                                             \
 296                                                                        \
 297        if (val)                                                        \
 298                blk_queue_flag_set(QUEUE_FLAG_##flag, q);               \
 299        else                                                            \
 300                blk_queue_flag_clear(QUEUE_FLAG_##flag, q);             \
 301        return ret;                                                     \
 302}
 303
 304QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
 305QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
 306QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
 307QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
 308#undef QUEUE_SYSFS_BIT_FNS
 309
 310static ssize_t queue_zoned_show(struct request_queue *q, char *page)
 311{
 312        switch (blk_queue_zoned_model(q)) {
 313        case BLK_ZONED_HA:
 314                return sprintf(page, "host-aware\n");
 315        case BLK_ZONED_HM:
 316                return sprintf(page, "host-managed\n");
 317        default:
 318                return sprintf(page, "none\n");
 319        }
 320}
 321
 322static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
 323{
 324        return queue_var_show(disk_nr_zones(q->disk), page);
 325}
 326
 327static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page)
 328{
 329        return queue_var_show(bdev_max_open_zones(q->disk->part0), page);
 330}
 331
 332static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page)
 333{
 334        return queue_var_show(bdev_max_active_zones(q->disk->part0), page);
 335}
 336
 337static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
 338{
 339        return queue_var_show((blk_queue_nomerges(q) << 1) |
 340                               blk_queue_noxmerges(q), page);
 341}
 342
 343static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
 344                                    size_t count)
 345{
 346        unsigned long nm;
 347        ssize_t ret = queue_var_store(&nm, page, count);
 348
 349        if (ret < 0)
 350                return ret;
 351
 352        blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
 353        blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
 354        if (nm == 2)
 355                blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
 356        else if (nm)
 357                blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
 358
 359        return ret;
 360}
 361
 362static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
 363{
 364        bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
 365        bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
 366
 367        return queue_var_show(set << force, page);
 368}
 369
 370static ssize_t
 371queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
 372{
 373        ssize_t ret = -EINVAL;
 374#ifdef CONFIG_SMP
 375        unsigned long val;
 376
 377        ret = queue_var_store(&val, page, count);
 378        if (ret < 0)
 379                return ret;
 380
 381        if (val == 2) {
 382                blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
 383                blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
 384        } else if (val == 1) {
 385                blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
 386                blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
 387        } else if (val == 0) {
 388                blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
 389                blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
 390        }
 391#endif
 392        return ret;
 393}
 394
 395static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
 396{
 397        return sprintf(page, "%d\n", -1);
 398}
 399
 400static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
 401                                size_t count)
 402{
 403        return count;
 404}
 405
 406static ssize_t queue_poll_show(struct request_queue *q, char *page)
 407{
 408        return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
 409}
 410
 411static ssize_t queue_poll_store(struct request_queue *q, const char *page,
 412                                size_t count)
 413{
 414        if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
 415                return -EINVAL;
 416        pr_info_ratelimited("writes to the poll attribute are ignored.\n");
 417        pr_info_ratelimited("please use driver specific parameters instead.\n");
 418        return count;
 419}
 420
 421static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
 422{
 423        return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout));
 424}
 425
 426static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
 427                                  size_t count)
 428{
 429        unsigned int val;
 430        int err;
 431
 432        err = kstrtou32(page, 10, &val);
 433        if (err || val == 0)
 434                return -EINVAL;
 435
 436        blk_queue_rq_timeout(q, msecs_to_jiffies(val));
 437
 438        return count;
 439}
 440
 441static ssize_t queue_wc_show(struct request_queue *q, char *page)
 442{
 443        if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
 444                return sprintf(page, "write back\n");
 445
 446        return sprintf(page, "write through\n");
 447}
 448
 449static ssize_t queue_wc_store(struct request_queue *q, const char *page,
 450                              size_t count)
 451{
 452        if (!strncmp(page, "write back", 10)) {
 453                if (!test_bit(QUEUE_FLAG_HW_WC, &q->queue_flags))
 454                        return -EINVAL;
 455                blk_queue_flag_set(QUEUE_FLAG_WC, q);
 456        } else if (!strncmp(page, "write through", 13) ||
 457                 !strncmp(page, "none", 4)) {
 458                blk_queue_flag_clear(QUEUE_FLAG_WC, q);
 459        } else {
 460                return -EINVAL;
 461        }
 462
 463        return count;
 464}
 465
 466static ssize_t queue_fua_show(struct request_queue *q, char *page)
 467{
 468        return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags));
 469}
 470
 471static ssize_t queue_dax_show(struct request_queue *q, char *page)
 472{
 473        return queue_var_show(blk_queue_dax(q), page);
 474}
 475
 476#define QUEUE_RO_ENTRY(_prefix, _name)                  \
 477static struct queue_sysfs_entry _prefix##_entry = {     \
 478        .attr   = { .name = _name, .mode = 0444 },      \
 479        .show   = _prefix##_show,                       \
 480};
 481
 482#define QUEUE_RW_ENTRY(_prefix, _name)                  \
 483static struct queue_sysfs_entry _prefix##_entry = {     \
 484        .attr   = { .name = _name, .mode = 0644 },      \
 485        .show   = _prefix##_show,                       \
 486        .store  = _prefix##_store,                      \
 487};
 488
 489QUEUE_RW_ENTRY(queue_requests, "nr_requests");
 490QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
 491QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
 492QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
 493QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
 494QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
 495QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size");
 496QUEUE_RW_ENTRY(elv_iosched, "scheduler");
 497
 498QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size");
 499QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size");
 500QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
 501QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size");
 502QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size");
 503
 504QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
 505QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
 506QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes");
 507QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes");
 508QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
 509
 510QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
 511QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes");
 512QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
 513QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
 514
 515QUEUE_RO_ENTRY(queue_zoned, "zoned");
 516QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
 517QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
 518QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");
 519
 520QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
 521QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
 522QUEUE_RW_ENTRY(queue_poll, "io_poll");
 523QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
 524QUEUE_RW_ENTRY(queue_wc, "write_cache");
 525QUEUE_RO_ENTRY(queue_fua, "fua");
 526QUEUE_RO_ENTRY(queue_dax, "dax");
 527QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
 528QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
 529QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment");
 530
 531#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
 532QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time");
 533#endif
 534
 535/* legacy alias for logical_block_size: */
 536static struct queue_sysfs_entry queue_hw_sector_size_entry = {
 537        .attr = {.name = "hw_sector_size", .mode = 0444 },
 538        .show = queue_logical_block_size_show,
 539};
 540
 541QUEUE_RW_ENTRY(queue_nonrot, "rotational");
 542QUEUE_RW_ENTRY(queue_iostats, "iostats");
 543QUEUE_RW_ENTRY(queue_random, "add_random");
 544QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
 545
 546#ifdef CONFIG_BLK_WBT
 547static ssize_t queue_var_store64(s64 *var, const char *page)
 548{
 549        int err;
 550        s64 v;
 551
 552        err = kstrtos64(page, 10, &v);
 553        if (err < 0)
 554                return err;
 555
 556        *var = v;
 557        return 0;
 558}
 559
 560static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
 561{
 562        if (!wbt_rq_qos(q))
 563                return -EINVAL;
 564
 565        if (wbt_disabled(q))
 566                return sprintf(page, "0\n");
 567
 568        return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
 569}
 570
 571static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
 572                                  size_t count)
 573{
 574        struct rq_qos *rqos;
 575        ssize_t ret;
 576        s64 val;
 577
 578        ret = queue_var_store64(&val, page);
 579        if (ret < 0)
 580                return ret;
 581        if (val < -1)
 582                return -EINVAL;
 583
 584        rqos = wbt_rq_qos(q);
 585        if (!rqos) {
 586                ret = wbt_init(q->disk);
 587                if (ret)
 588                        return ret;
 589        }
 590
 591        if (val == -1)
 592                val = wbt_default_latency_nsec(q);
 593        else if (val >= 0)
 594                val *= 1000ULL;
 595
 596        if (wbt_get_min_lat(q) == val)
 597                return count;
 598
 599        /*
 600         * Ensure that the queue is idled, in case the latency update
 601         * ends up either enabling or disabling wbt completely. We can't
 602         * have IO inflight if that happens.
 603         */
 604        blk_mq_freeze_queue(q);
 605        blk_mq_quiesce_queue(q);
 606
 607        wbt_set_min_lat(q, val);
 608
 609        blk_mq_unquiesce_queue(q);
 610        blk_mq_unfreeze_queue(q);
 611
 612        return count;
 613}
 614
 615QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
 616#endif
 617
 618/* Common attributes for bio-based and request-based queues. */
 619static struct attribute *queue_attrs[] = {
 620        &queue_ra_entry.attr,
 621        &queue_max_hw_sectors_entry.attr,
 622        &queue_max_sectors_entry.attr,
 623        &queue_max_segments_entry.attr,
 624        &queue_max_discard_segments_entry.attr,
 625        &queue_max_integrity_segments_entry.attr,
 626        &queue_max_segment_size_entry.attr,
 627        &queue_hw_sector_size_entry.attr,
 628        &queue_logical_block_size_entry.attr,
 629        &queue_physical_block_size_entry.attr,
 630        &queue_chunk_sectors_entry.attr,
 631        &queue_io_min_entry.attr,
 632        &queue_io_opt_entry.attr,
 633        &queue_discard_granularity_entry.attr,
 634        &queue_discard_max_entry.attr,
 635        &queue_discard_max_hw_entry.attr,
 636        &queue_discard_zeroes_data_entry.attr,
 637        &queue_write_same_max_entry.attr,
 638        &queue_write_zeroes_max_entry.attr,
 639        &queue_zone_append_max_entry.attr,
 640        &queue_zone_write_granularity_entry.attr,
 641        &queue_nonrot_entry.attr,
 642        &queue_zoned_entry.attr,
 643        &queue_nr_zones_entry.attr,
 644        &queue_max_open_zones_entry.attr,
 645        &queue_max_active_zones_entry.attr,
 646        &queue_nomerges_entry.attr,
 647        &queue_iostats_entry.attr,
 648        &queue_stable_writes_entry.attr,
 649        &queue_random_entry.attr,
 650        &queue_poll_entry.attr,
 651        &queue_wc_entry.attr,
 652        &queue_fua_entry.attr,
 653        &queue_dax_entry.attr,
 654        &queue_poll_delay_entry.attr,
 655#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
 656        &blk_throtl_sample_time_entry.attr,
 657#endif
 658        &queue_virt_boundary_mask_entry.attr,
 659        &queue_dma_alignment_entry.attr,
 660        NULL,
 661};
 662
 663/* Request-based queue attributes that are not relevant for bio-based queues. */
 664static struct attribute *blk_mq_queue_attrs[] = {
 665        &queue_requests_entry.attr,
 666        &elv_iosched_entry.attr,
 667        &queue_rq_affinity_entry.attr,
 668        &queue_io_timeout_entry.attr,
 669#ifdef CONFIG_BLK_WBT
 670        &queue_wb_lat_entry.attr,
 671#endif
 672        NULL,
 673};
 674
 675static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
 676                                int n)
 677{
 678        struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
 679        struct request_queue *q = disk->queue;
 680
 681        if ((attr == &queue_max_open_zones_entry.attr ||
 682             attr == &queue_max_active_zones_entry.attr) &&
 683            !blk_queue_is_zoned(q))
 684                return 0;
 685
 686        return attr->mode;
 687}
 688
 689static umode_t blk_mq_queue_attr_visible(struct kobject *kobj,
 690                                         struct attribute *attr, int n)
 691{
 692        struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
 693        struct request_queue *q = disk->queue;
 694
 695        if (!queue_is_mq(q))
 696                return 0;
 697
 698        if (attr == &queue_io_timeout_entry.attr && !q->mq_ops->timeout)
 699                return 0;
 700
 701        return attr->mode;
 702}
 703
 704static struct attribute_group queue_attr_group = {
 705        .attrs = queue_attrs,
 706        .is_visible = queue_attr_visible,
 707};
 708
 709static struct attribute_group blk_mq_queue_attr_group = {
 710        .attrs = blk_mq_queue_attrs,
 711        .is_visible = blk_mq_queue_attr_visible,
 712};
 713
 714#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
 715
 716static ssize_t
 717queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
 718{
 719        struct queue_sysfs_entry *entry = to_queue(attr);
 720        struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
 721        struct request_queue *q = disk->queue;
 722        ssize_t res;
 723
 724        if (!entry->show)
 725                return -EIO;
 726        mutex_lock(&q->sysfs_lock);
 727        res = entry->show(q, page);
 728        mutex_unlock(&q->sysfs_lock);
 729        return res;
 730}
 731
 732static ssize_t
 733queue_attr_store(struct kobject *kobj, struct attribute *attr,
 734                    const char *page, size_t length)
 735{
 736        struct queue_sysfs_entry *entry = to_queue(attr);
 737        struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
 738        struct request_queue *q = disk->queue;
 739        ssize_t res;
 740
 741        if (!entry->store)
 742                return -EIO;
 743
 744        mutex_lock(&q->sysfs_lock);
 745        res = entry->store(q, page, length);
 746        mutex_unlock(&q->sysfs_lock);
 747        return res;
 748}
 749
 750static const struct sysfs_ops queue_sysfs_ops = {
 751        .show   = queue_attr_show,
 752        .store  = queue_attr_store,
 753};
 754
 755static const struct attribute_group *blk_queue_attr_groups[] = {
 756        &queue_attr_group,
 757        &blk_mq_queue_attr_group,
 758        NULL
 759};
 760
 761static void blk_queue_release(struct kobject *kobj)
 762{
 763        /* nothing to do here, all data is associated with the parent gendisk */
 764}
 765
 766static const struct kobj_type blk_queue_ktype = {
 767        .default_groups = blk_queue_attr_groups,
 768        .sysfs_ops      = &queue_sysfs_ops,
 769        .release        = blk_queue_release,
 770};
 771
 772static void blk_debugfs_remove(struct gendisk *disk)
 773{
 774        struct request_queue *q = disk->queue;
 775
 776        mutex_lock(&q->debugfs_mutex);
 777        blk_trace_shutdown(q);
 778        debugfs_remove_recursive(q->debugfs_dir);
 779        q->debugfs_dir = NULL;
 780        q->sched_debugfs_dir = NULL;
 781        q->rqos_debugfs_dir = NULL;
 782        mutex_unlock(&q->debugfs_mutex);
 783}
 784
 785/**
 786 * blk_register_queue - register a block layer queue with sysfs
 787 * @disk: Disk of which the request queue should be registered with sysfs.
 788 */
 789int blk_register_queue(struct gendisk *disk)
 790{
 791        struct request_queue *q = disk->queue;
 792        int ret;
 793
 794        mutex_lock(&q->sysfs_dir_lock);
 795        kobject_init(&disk->queue_kobj, &blk_queue_ktype);
 796        ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue");
 797        if (ret < 0)
 798                goto out_put_queue_kobj;
 799
 800        if (queue_is_mq(q)) {
 801                ret = blk_mq_sysfs_register(disk);
 802                if (ret)
 803                        goto out_put_queue_kobj;
 804        }
 805        mutex_lock(&q->sysfs_lock);
 806
 807        mutex_lock(&q->debugfs_mutex);
 808        q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root);
 809        if (queue_is_mq(q))
 810                blk_mq_debugfs_register(q);
 811        mutex_unlock(&q->debugfs_mutex);
 812
 813        ret = disk_register_independent_access_ranges(disk);
 814        if (ret)
 815                goto out_debugfs_remove;
 816
 817        if (q->elevator) {
 818                ret = elv_register_queue(q, false);
 819                if (ret)
 820                        goto out_unregister_ia_ranges;
 821        }
 822
 823        ret = blk_crypto_sysfs_register(disk);
 824        if (ret)
 825                goto out_elv_unregister;
 826
 827        blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
 828        wbt_enable_default(disk);
 829        blk_throtl_register(disk);
 830
 831        /* Now everything is ready and send out KOBJ_ADD uevent */
 832        kobject_uevent(&disk->queue_kobj, KOBJ_ADD);
 833        if (q->elevator)
 834                kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
 835        mutex_unlock(&q->sysfs_lock);
 836        mutex_unlock(&q->sysfs_dir_lock);
 837
 838        /*
 839         * SCSI probing may synchronously create and destroy a lot of
 840         * request_queues for non-existent devices.  Shutting down a fully
 841         * functional queue takes measureable wallclock time as RCU grace
 842         * periods are involved.  To avoid excessive latency in these
 843         * cases, a request_queue starts out in a degraded mode which is
 844         * faster to shut down and is made fully functional here as
 845         * request_queues for non-existent devices never get registered.
 846         */
 847        if (!blk_queue_init_done(q)) {
 848                blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
 849                percpu_ref_switch_to_percpu(&q->q_usage_counter);
 850        }
 851
 852        return ret;
 853
 854out_elv_unregister:
 855        elv_unregister_queue(q);
 856out_unregister_ia_ranges:
 857        disk_unregister_independent_access_ranges(disk);
 858out_debugfs_remove:
 859        blk_debugfs_remove(disk);
 860        mutex_unlock(&q->sysfs_lock);
 861out_put_queue_kobj:
 862        kobject_put(&disk->queue_kobj);
 863        mutex_unlock(&q->sysfs_dir_lock);
 864        return ret;
 865}
 866
 867/**
 868 * blk_unregister_queue - counterpart of blk_register_queue()
 869 * @disk: Disk of which the request queue should be unregistered from sysfs.
 870 *
 871 * Note: the caller is responsible for guaranteeing that this function is called
 872 * after blk_register_queue() has finished.
 873 */
 874void blk_unregister_queue(struct gendisk *disk)
 875{
 876        struct request_queue *q = disk->queue;
 877
 878        if (WARN_ON(!q))
 879                return;
 880
 881        /* Return early if disk->queue was never registered. */
 882        if (!blk_queue_registered(q))
 883                return;
 884
 885        /*
 886         * Since sysfs_remove_dir() prevents adding new directory entries
 887         * before removal of existing entries starts, protect against
 888         * concurrent elv_iosched_store() calls.
 889         */
 890        mutex_lock(&q->sysfs_lock);
 891        blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
 892        mutex_unlock(&q->sysfs_lock);
 893
 894        mutex_lock(&q->sysfs_dir_lock);
 895        /*
 896         * Remove the sysfs attributes before unregistering the queue data
 897         * structures that can be modified through sysfs.
 898         */
 899        if (queue_is_mq(q))
 900                blk_mq_sysfs_unregister(disk);
 901        blk_crypto_sysfs_unregister(disk);
 902
 903        mutex_lock(&q->sysfs_lock);
 904        elv_unregister_queue(q);
 905        disk_unregister_independent_access_ranges(disk);
 906        mutex_unlock(&q->sysfs_lock);
 907
 908        /* Now that we've deleted all child objects, we can delete the queue. */
 909        kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE);
 910        kobject_del(&disk->queue_kobj);
 911        mutex_unlock(&q->sysfs_dir_lock);
 912
 913        blk_debugfs_remove(disk);
 914}
 915