linux/drivers/cxl/core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
   3#include <linux/io-64-nonatomic-lo-hi.h>
   4#include <linux/device.h>
   5#include <linux/module.h>
   6#include <linux/pci.h>
   7#include <linux/slab.h>
   8#include <linux/idr.h>
   9#include "cxl.h"
  10#include "mem.h"
  11
  12/**
  13 * DOC: cxl core
  14 *
  15 * The CXL core provides a sysfs hierarchy for control devices and a rendezvous
  16 * point for cross-device interleave coordination through cxl ports.
  17 */
  18
  19static DEFINE_IDA(cxl_port_ida);
  20
  21static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
  22                            char *buf)
  23{
  24        return sysfs_emit(buf, "%s\n", dev->type->name);
  25}
  26static DEVICE_ATTR_RO(devtype);
  27
  28static struct attribute *cxl_base_attributes[] = {
  29        &dev_attr_devtype.attr,
  30        NULL,
  31};
  32
  33static struct attribute_group cxl_base_attribute_group = {
  34        .attrs = cxl_base_attributes,
  35};
  36
  37static ssize_t start_show(struct device *dev, struct device_attribute *attr,
  38                          char *buf)
  39{
  40        struct cxl_decoder *cxld = to_cxl_decoder(dev);
  41
  42        return sysfs_emit(buf, "%#llx\n", cxld->range.start);
  43}
  44static DEVICE_ATTR_RO(start);
  45
  46static ssize_t size_show(struct device *dev, struct device_attribute *attr,
  47                        char *buf)
  48{
  49        struct cxl_decoder *cxld = to_cxl_decoder(dev);
  50
  51        return sysfs_emit(buf, "%#llx\n", range_len(&cxld->range));
  52}
  53static DEVICE_ATTR_RO(size);
  54
  55#define CXL_DECODER_FLAG_ATTR(name, flag)                            \
  56static ssize_t name##_show(struct device *dev,                       \
  57                           struct device_attribute *attr, char *buf) \
  58{                                                                    \
  59        struct cxl_decoder *cxld = to_cxl_decoder(dev);              \
  60                                                                     \
  61        return sysfs_emit(buf, "%s\n",                               \
  62                          (cxld->flags & (flag)) ? "1" : "0");       \
  63}                                                                    \
  64static DEVICE_ATTR_RO(name)
  65
  66CXL_DECODER_FLAG_ATTR(cap_pmem, CXL_DECODER_F_PMEM);
  67CXL_DECODER_FLAG_ATTR(cap_ram, CXL_DECODER_F_RAM);
  68CXL_DECODER_FLAG_ATTR(cap_type2, CXL_DECODER_F_TYPE2);
  69CXL_DECODER_FLAG_ATTR(cap_type3, CXL_DECODER_F_TYPE3);
  70CXL_DECODER_FLAG_ATTR(locked, CXL_DECODER_F_LOCK);
  71
  72static ssize_t target_type_show(struct device *dev,
  73                                struct device_attribute *attr, char *buf)
  74{
  75        struct cxl_decoder *cxld = to_cxl_decoder(dev);
  76
  77        switch (cxld->target_type) {
  78        case CXL_DECODER_ACCELERATOR:
  79                return sysfs_emit(buf, "accelerator\n");
  80        case CXL_DECODER_EXPANDER:
  81                return sysfs_emit(buf, "expander\n");
  82        }
  83        return -ENXIO;
  84}
  85static DEVICE_ATTR_RO(target_type);
  86
  87static ssize_t target_list_show(struct device *dev,
  88                               struct device_attribute *attr, char *buf)
  89{
  90        struct cxl_decoder *cxld = to_cxl_decoder(dev);
  91        ssize_t offset = 0;
  92        int i, rc = 0;
  93
  94        device_lock(dev);
  95        for (i = 0; i < cxld->interleave_ways; i++) {
  96                struct cxl_dport *dport = cxld->target[i];
  97                struct cxl_dport *next = NULL;
  98
  99                if (!dport)
 100                        break;
 101
 102                if (i + 1 < cxld->interleave_ways)
 103                        next = cxld->target[i + 1];
 104                rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id,
 105                                   next ? "," : "");
 106                if (rc < 0)
 107                        break;
 108                offset += rc;
 109        }
 110        device_unlock(dev);
 111
 112        if (rc < 0)
 113                return rc;
 114
 115        rc = sysfs_emit_at(buf, offset, "\n");
 116        if (rc < 0)
 117                return rc;
 118
 119        return offset + rc;
 120}
 121static DEVICE_ATTR_RO(target_list);
 122
 123static struct attribute *cxl_decoder_base_attrs[] = {
 124        &dev_attr_start.attr,
 125        &dev_attr_size.attr,
 126        &dev_attr_locked.attr,
 127        &dev_attr_target_list.attr,
 128        NULL,
 129};
 130
 131static struct attribute_group cxl_decoder_base_attribute_group = {
 132        .attrs = cxl_decoder_base_attrs,
 133};
 134
 135static struct attribute *cxl_decoder_root_attrs[] = {
 136        &dev_attr_cap_pmem.attr,
 137        &dev_attr_cap_ram.attr,
 138        &dev_attr_cap_type2.attr,
 139        &dev_attr_cap_type3.attr,
 140        NULL,
 141};
 142
 143static struct attribute_group cxl_decoder_root_attribute_group = {
 144        .attrs = cxl_decoder_root_attrs,
 145};
 146
 147static const struct attribute_group *cxl_decoder_root_attribute_groups[] = {
 148        &cxl_decoder_root_attribute_group,
 149        &cxl_decoder_base_attribute_group,
 150        &cxl_base_attribute_group,
 151        NULL,
 152};
 153
 154static struct attribute *cxl_decoder_switch_attrs[] = {
 155        &dev_attr_target_type.attr,
 156        NULL,
 157};
 158
 159static struct attribute_group cxl_decoder_switch_attribute_group = {
 160        .attrs = cxl_decoder_switch_attrs,
 161};
 162
 163static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = {
 164        &cxl_decoder_switch_attribute_group,
 165        &cxl_decoder_base_attribute_group,
 166        &cxl_base_attribute_group,
 167        NULL,
 168};
 169
 170static void cxl_decoder_release(struct device *dev)
 171{
 172        struct cxl_decoder *cxld = to_cxl_decoder(dev);
 173        struct cxl_port *port = to_cxl_port(dev->parent);
 174
 175        ida_free(&port->decoder_ida, cxld->id);
 176        kfree(cxld);
 177}
 178
 179static const struct device_type cxl_decoder_switch_type = {
 180        .name = "cxl_decoder_switch",
 181        .release = cxl_decoder_release,
 182        .groups = cxl_decoder_switch_attribute_groups,
 183};
 184
 185static const struct device_type cxl_decoder_root_type = {
 186        .name = "cxl_decoder_root",
 187        .release = cxl_decoder_release,
 188        .groups = cxl_decoder_root_attribute_groups,
 189};
 190
 191bool is_root_decoder(struct device *dev)
 192{
 193        return dev->type == &cxl_decoder_root_type;
 194}
 195EXPORT_SYMBOL_GPL(is_root_decoder);
 196
 197struct cxl_decoder *to_cxl_decoder(struct device *dev)
 198{
 199        if (dev_WARN_ONCE(dev, dev->type->release != cxl_decoder_release,
 200                          "not a cxl_decoder device\n"))
 201                return NULL;
 202        return container_of(dev, struct cxl_decoder, dev);
 203}
 204EXPORT_SYMBOL_GPL(to_cxl_decoder);
 205
 206static void cxl_dport_release(struct cxl_dport *dport)
 207{
 208        list_del(&dport->list);
 209        put_device(dport->dport);
 210        kfree(dport);
 211}
 212
 213static void cxl_port_release(struct device *dev)
 214{
 215        struct cxl_port *port = to_cxl_port(dev);
 216        struct cxl_dport *dport, *_d;
 217
 218        device_lock(dev);
 219        list_for_each_entry_safe(dport, _d, &port->dports, list)
 220                cxl_dport_release(dport);
 221        device_unlock(dev);
 222        ida_free(&cxl_port_ida, port->id);
 223        kfree(port);
 224}
 225
 226static const struct attribute_group *cxl_port_attribute_groups[] = {
 227        &cxl_base_attribute_group,
 228        NULL,
 229};
 230
 231static const struct device_type cxl_port_type = {
 232        .name = "cxl_port",
 233        .release = cxl_port_release,
 234        .groups = cxl_port_attribute_groups,
 235};
 236
 237struct cxl_port *to_cxl_port(struct device *dev)
 238{
 239        if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type,
 240                          "not a cxl_port device\n"))
 241                return NULL;
 242        return container_of(dev, struct cxl_port, dev);
 243}
 244
 245static void unregister_port(void *_port)
 246{
 247        struct cxl_port *port = _port;
 248        struct cxl_dport *dport;
 249
 250        device_lock(&port->dev);
 251        list_for_each_entry(dport, &port->dports, list) {
 252                char link_name[CXL_TARGET_STRLEN];
 253
 254                if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d",
 255                             dport->port_id) >= CXL_TARGET_STRLEN)
 256                        continue;
 257                sysfs_remove_link(&port->dev.kobj, link_name);
 258        }
 259        device_unlock(&port->dev);
 260        device_unregister(&port->dev);
 261}
 262
 263static void cxl_unlink_uport(void *_port)
 264{
 265        struct cxl_port *port = _port;
 266
 267        sysfs_remove_link(&port->dev.kobj, "uport");
 268}
 269
 270static int devm_cxl_link_uport(struct device *host, struct cxl_port *port)
 271{
 272        int rc;
 273
 274        rc = sysfs_create_link(&port->dev.kobj, &port->uport->kobj, "uport");
 275        if (rc)
 276                return rc;
 277        return devm_add_action_or_reset(host, cxl_unlink_uport, port);
 278}
 279
 280static struct cxl_port *cxl_port_alloc(struct device *uport,
 281                                       resource_size_t component_reg_phys,
 282                                       struct cxl_port *parent_port)
 283{
 284        struct cxl_port *port;
 285        struct device *dev;
 286        int rc;
 287
 288        port = kzalloc(sizeof(*port), GFP_KERNEL);
 289        if (!port)
 290                return ERR_PTR(-ENOMEM);
 291
 292        rc = ida_alloc(&cxl_port_ida, GFP_KERNEL);
 293        if (rc < 0)
 294                goto err;
 295        port->id = rc;
 296
 297        /*
 298         * The top-level cxl_port "cxl_root" does not have a cxl_port as
 299         * its parent and it does not have any corresponding component
 300         * registers as its decode is described by a fixed platform
 301         * description.
 302         */
 303        dev = &port->dev;
 304        if (parent_port)
 305                dev->parent = &parent_port->dev;
 306        else
 307                dev->parent = uport;
 308
 309        port->uport = uport;
 310        port->component_reg_phys = component_reg_phys;
 311        ida_init(&port->decoder_ida);
 312        INIT_LIST_HEAD(&port->dports);
 313
 314        device_initialize(dev);
 315        device_set_pm_not_required(dev);
 316        dev->bus = &cxl_bus_type;
 317        dev->type = &cxl_port_type;
 318
 319        return port;
 320
 321err:
 322        kfree(port);
 323        return ERR_PTR(rc);
 324}
 325
 326/**
 327 * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy
 328 * @host: host device for devm operations
 329 * @uport: "physical" device implementing this upstream port
 330 * @component_reg_phys: (optional) for configurable cxl_port instances
 331 * @parent_port: next hop up in the CXL memory decode hierarchy
 332 */
 333struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
 334                                   resource_size_t component_reg_phys,
 335                                   struct cxl_port *parent_port)
 336{
 337        struct cxl_port *port;
 338        struct device *dev;
 339        int rc;
 340
 341        port = cxl_port_alloc(uport, component_reg_phys, parent_port);
 342        if (IS_ERR(port))
 343                return port;
 344
 345        dev = &port->dev;
 346        if (parent_port)
 347                rc = dev_set_name(dev, "port%d", port->id);
 348        else
 349                rc = dev_set_name(dev, "root%d", port->id);
 350        if (rc)
 351                goto err;
 352
 353        rc = device_add(dev);
 354        if (rc)
 355                goto err;
 356
 357        rc = devm_add_action_or_reset(host, unregister_port, port);
 358        if (rc)
 359                return ERR_PTR(rc);
 360
 361        rc = devm_cxl_link_uport(host, port);
 362        if (rc)
 363                return ERR_PTR(rc);
 364
 365        return port;
 366
 367err:
 368        put_device(dev);
 369        return ERR_PTR(rc);
 370}
 371EXPORT_SYMBOL_GPL(devm_cxl_add_port);
 372
 373static struct cxl_dport *find_dport(struct cxl_port *port, int id)
 374{
 375        struct cxl_dport *dport;
 376
 377        device_lock_assert(&port->dev);
 378        list_for_each_entry (dport, &port->dports, list)
 379                if (dport->port_id == id)
 380                        return dport;
 381        return NULL;
 382}
 383
 384static int add_dport(struct cxl_port *port, struct cxl_dport *new)
 385{
 386        struct cxl_dport *dup;
 387
 388        device_lock(&port->dev);
 389        dup = find_dport(port, new->port_id);
 390        if (dup)
 391                dev_err(&port->dev,
 392                        "unable to add dport%d-%s non-unique port id (%s)\n",
 393                        new->port_id, dev_name(new->dport),
 394                        dev_name(dup->dport));
 395        else
 396                list_add_tail(&new->list, &port->dports);
 397        device_unlock(&port->dev);
 398
 399        return dup ? -EEXIST : 0;
 400}
 401
 402/**
 403 * cxl_add_dport - append downstream port data to a cxl_port
 404 * @port: the cxl_port that references this dport
 405 * @dport_dev: firmware or PCI device representing the dport
 406 * @port_id: identifier for this dport in a decoder's target list
 407 * @component_reg_phys: optional location of CXL component registers
 408 *
 409 * Note that all allocations and links are undone by cxl_port deletion
 410 * and release.
 411 */
 412int cxl_add_dport(struct cxl_port *port, struct device *dport_dev, int port_id,
 413                  resource_size_t component_reg_phys)
 414{
 415        char link_name[CXL_TARGET_STRLEN];
 416        struct cxl_dport *dport;
 417        int rc;
 418
 419        if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d", port_id) >=
 420            CXL_TARGET_STRLEN)
 421                return -EINVAL;
 422
 423        dport = kzalloc(sizeof(*dport), GFP_KERNEL);
 424        if (!dport)
 425                return -ENOMEM;
 426
 427        INIT_LIST_HEAD(&dport->list);
 428        dport->dport = get_device(dport_dev);
 429        dport->port_id = port_id;
 430        dport->component_reg_phys = component_reg_phys;
 431        dport->port = port;
 432
 433        rc = add_dport(port, dport);
 434        if (rc)
 435                goto err;
 436
 437        rc = sysfs_create_link(&port->dev.kobj, &dport_dev->kobj, link_name);
 438        if (rc)
 439                goto err;
 440
 441        return 0;
 442err:
 443        cxl_dport_release(dport);
 444        return rc;
 445}
 446EXPORT_SYMBOL_GPL(cxl_add_dport);
 447
 448static struct cxl_decoder *
 449cxl_decoder_alloc(struct cxl_port *port, int nr_targets, resource_size_t base,
 450                  resource_size_t len, int interleave_ways,
 451                  int interleave_granularity, enum cxl_decoder_type type,
 452                  unsigned long flags)
 453{
 454        struct cxl_decoder *cxld;
 455        struct device *dev;
 456        int rc = 0;
 457
 458        if (interleave_ways < 1)
 459                return ERR_PTR(-EINVAL);
 460
 461        device_lock(&port->dev);
 462        if (list_empty(&port->dports))
 463                rc = -EINVAL;
 464        device_unlock(&port->dev);
 465        if (rc)
 466                return ERR_PTR(rc);
 467
 468        cxld = kzalloc(struct_size(cxld, target, nr_targets), GFP_KERNEL);
 469        if (!cxld)
 470                return ERR_PTR(-ENOMEM);
 471
 472        rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
 473        if (rc < 0)
 474                goto err;
 475
 476        *cxld = (struct cxl_decoder) {
 477                .id = rc,
 478                .range = {
 479                        .start = base,
 480                        .end = base + len - 1,
 481                },
 482                .flags = flags,
 483                .interleave_ways = interleave_ways,
 484                .interleave_granularity = interleave_granularity,
 485                .target_type = type,
 486        };
 487
 488        /* handle implied target_list */
 489        if (interleave_ways == 1)
 490                cxld->target[0] =
 491                        list_first_entry(&port->dports, struct cxl_dport, list);
 492        dev = &cxld->dev;
 493        device_initialize(dev);
 494        device_set_pm_not_required(dev);
 495        dev->parent = &port->dev;
 496        dev->bus = &cxl_bus_type;
 497
 498        /* root ports do not have a cxl_port_type parent */
 499        if (port->dev.parent->type == &cxl_port_type)
 500                dev->type = &cxl_decoder_switch_type;
 501        else
 502                dev->type = &cxl_decoder_root_type;
 503
 504        return cxld;
 505err:
 506        kfree(cxld);
 507        return ERR_PTR(rc);
 508}
 509
 510static void unregister_dev(void *dev)
 511{
 512        device_unregister(dev);
 513}
 514
 515struct cxl_decoder *
 516devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets,
 517                     resource_size_t base, resource_size_t len,
 518                     int interleave_ways, int interleave_granularity,
 519                     enum cxl_decoder_type type, unsigned long flags)
 520{
 521        struct cxl_decoder *cxld;
 522        struct device *dev;
 523        int rc;
 524
 525        cxld = cxl_decoder_alloc(port, nr_targets, base, len, interleave_ways,
 526                                 interleave_granularity, type, flags);
 527        if (IS_ERR(cxld))
 528                return cxld;
 529
 530        dev = &cxld->dev;
 531        rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id);
 532        if (rc)
 533                goto err;
 534
 535        rc = device_add(dev);
 536        if (rc)
 537                goto err;
 538
 539        rc = devm_add_action_or_reset(host, unregister_dev, dev);
 540        if (rc)
 541                return ERR_PTR(rc);
 542        return cxld;
 543
 544err:
 545        put_device(dev);
 546        return ERR_PTR(rc);
 547}
 548EXPORT_SYMBOL_GPL(devm_cxl_add_decoder);
 549
 550/**
 551 * cxl_probe_component_regs() - Detect CXL Component register blocks
 552 * @dev: Host device of the @base mapping
 553 * @base: Mapping containing the HDM Decoder Capability Header
 554 * @map: Map object describing the register block information found
 555 *
 556 * See CXL 2.0 8.2.4 Component Register Layout and Definition
 557 * See CXL 2.0 8.2.5.5 CXL Device Register Interface
 558 *
 559 * Probe for component register information and return it in map object.
 560 */
 561void cxl_probe_component_regs(struct device *dev, void __iomem *base,
 562                              struct cxl_component_reg_map *map)
 563{
 564        int cap, cap_count;
 565        u64 cap_array;
 566
 567        *map = (struct cxl_component_reg_map) { 0 };
 568
 569        /*
 570         * CXL.cache and CXL.mem registers are at offset 0x1000 as defined in
 571         * CXL 2.0 8.2.4 Table 141.
 572         */
 573        base += CXL_CM_OFFSET;
 574
 575        cap_array = readq(base + CXL_CM_CAP_HDR_OFFSET);
 576
 577        if (FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, cap_array) != CM_CAP_HDR_CAP_ID) {
 578                dev_err(dev,
 579                        "Couldn't locate the CXL.cache and CXL.mem capability array header./n");
 580                return;
 581        }
 582
 583        /* It's assumed that future versions will be backward compatible */
 584        cap_count = FIELD_GET(CXL_CM_CAP_HDR_ARRAY_SIZE_MASK, cap_array);
 585
 586        for (cap = 1; cap <= cap_count; cap++) {
 587                void __iomem *register_block;
 588                u32 hdr;
 589                int decoder_cnt;
 590                u16 cap_id, offset;
 591                u32 length;
 592
 593                hdr = readl(base + cap * 0x4);
 594
 595                cap_id = FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, hdr);
 596                offset = FIELD_GET(CXL_CM_CAP_PTR_MASK, hdr);
 597                register_block = base + offset;
 598
 599                switch (cap_id) {
 600                case CXL_CM_CAP_CAP_ID_HDM:
 601                        dev_dbg(dev, "found HDM decoder capability (0x%x)\n",
 602                                offset);
 603
 604                        hdr = readl(register_block);
 605
 606                        decoder_cnt = cxl_hdm_decoder_count(hdr);
 607                        length = 0x20 * decoder_cnt + 0x10;
 608
 609                        map->hdm_decoder.valid = true;
 610                        map->hdm_decoder.offset = CXL_CM_OFFSET + offset;
 611                        map->hdm_decoder.size = length;
 612                        break;
 613                default:
 614                        dev_dbg(dev, "Unknown CM cap ID: %d (0x%x)\n", cap_id,
 615                                offset);
 616                        break;
 617                }
 618        }
 619}
 620EXPORT_SYMBOL_GPL(cxl_probe_component_regs);
 621
 622static void cxl_nvdimm_bridge_release(struct device *dev)
 623{
 624        struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
 625
 626        kfree(cxl_nvb);
 627}
 628
 629static const struct attribute_group *cxl_nvdimm_bridge_attribute_groups[] = {
 630        &cxl_base_attribute_group,
 631        NULL,
 632};
 633
 634static const struct device_type cxl_nvdimm_bridge_type = {
 635        .name = "cxl_nvdimm_bridge",
 636        .release = cxl_nvdimm_bridge_release,
 637        .groups = cxl_nvdimm_bridge_attribute_groups,
 638};
 639
 640struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
 641{
 642        if (dev_WARN_ONCE(dev, dev->type != &cxl_nvdimm_bridge_type,
 643                          "not a cxl_nvdimm_bridge device\n"))
 644                return NULL;
 645        return container_of(dev, struct cxl_nvdimm_bridge, dev);
 646}
 647EXPORT_SYMBOL_GPL(to_cxl_nvdimm_bridge);
 648
 649static struct cxl_nvdimm_bridge *
 650cxl_nvdimm_bridge_alloc(struct cxl_port *port)
 651{
 652        struct cxl_nvdimm_bridge *cxl_nvb;
 653        struct device *dev;
 654
 655        cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL);
 656        if (!cxl_nvb)
 657                return ERR_PTR(-ENOMEM);
 658
 659        dev = &cxl_nvb->dev;
 660        cxl_nvb->port = port;
 661        cxl_nvb->state = CXL_NVB_NEW;
 662        device_initialize(dev);
 663        device_set_pm_not_required(dev);
 664        dev->parent = &port->dev;
 665        dev->bus = &cxl_bus_type;
 666        dev->type = &cxl_nvdimm_bridge_type;
 667
 668        return cxl_nvb;
 669}
 670
 671static void unregister_nvb(void *_cxl_nvb)
 672{
 673        struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb;
 674        bool flush;
 675
 676        /*
 677         * If the bridge was ever activated then there might be in-flight state
 678         * work to flush. Once the state has been changed to 'dead' then no new
 679         * work can be queued by user-triggered bind.
 680         */
 681        device_lock(&cxl_nvb->dev);
 682        flush = cxl_nvb->state != CXL_NVB_NEW;
 683        cxl_nvb->state = CXL_NVB_DEAD;
 684        device_unlock(&cxl_nvb->dev);
 685
 686        /*
 687         * Even though the device core will trigger device_release_driver()
 688         * before the unregister, it does not know about the fact that
 689         * cxl_nvdimm_bridge_driver defers ->remove() work. So, do the driver
 690         * release not and flush it before tearing down the nvdimm device
 691         * hierarchy.
 692         */
 693        device_release_driver(&cxl_nvb->dev);
 694        if (flush)
 695                flush_work(&cxl_nvb->state_work);
 696        device_unregister(&cxl_nvb->dev);
 697}
 698
 699struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
 700                                                     struct cxl_port *port)
 701{
 702        struct cxl_nvdimm_bridge *cxl_nvb;
 703        struct device *dev;
 704        int rc;
 705
 706        if (!IS_ENABLED(CONFIG_CXL_PMEM))
 707                return ERR_PTR(-ENXIO);
 708
 709        cxl_nvb = cxl_nvdimm_bridge_alloc(port);
 710        if (IS_ERR(cxl_nvb))
 711                return cxl_nvb;
 712
 713        dev = &cxl_nvb->dev;
 714        rc = dev_set_name(dev, "nvdimm-bridge");
 715        if (rc)
 716                goto err;
 717
 718        rc = device_add(dev);
 719        if (rc)
 720                goto err;
 721
 722        rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb);
 723        if (rc)
 724                return ERR_PTR(rc);
 725
 726        return cxl_nvb;
 727
 728err:
 729        put_device(dev);
 730        return ERR_PTR(rc);
 731}
 732EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm_bridge);
 733
 734static void cxl_nvdimm_release(struct device *dev)
 735{
 736        struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
 737
 738        kfree(cxl_nvd);
 739}
 740
 741static const struct attribute_group *cxl_nvdimm_attribute_groups[] = {
 742        &cxl_base_attribute_group,
 743        NULL,
 744};
 745
 746static const struct device_type cxl_nvdimm_type = {
 747        .name = "cxl_nvdimm",
 748        .release = cxl_nvdimm_release,
 749        .groups = cxl_nvdimm_attribute_groups,
 750};
 751
 752bool is_cxl_nvdimm(struct device *dev)
 753{
 754        return dev->type == &cxl_nvdimm_type;
 755}
 756EXPORT_SYMBOL_GPL(is_cxl_nvdimm);
 757
 758struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
 759{
 760        if (dev_WARN_ONCE(dev, !is_cxl_nvdimm(dev),
 761                          "not a cxl_nvdimm device\n"))
 762                return NULL;
 763        return container_of(dev, struct cxl_nvdimm, dev);
 764}
 765EXPORT_SYMBOL_GPL(to_cxl_nvdimm);
 766
 767static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd)
 768{
 769        struct cxl_nvdimm *cxl_nvd;
 770        struct device *dev;
 771
 772        cxl_nvd = kzalloc(sizeof(*cxl_nvd), GFP_KERNEL);
 773        if (!cxl_nvd)
 774                return ERR_PTR(-ENOMEM);
 775
 776        dev = &cxl_nvd->dev;
 777        cxl_nvd->cxlmd = cxlmd;
 778        device_initialize(dev);
 779        device_set_pm_not_required(dev);
 780        dev->parent = &cxlmd->dev;
 781        dev->bus = &cxl_bus_type;
 782        dev->type = &cxl_nvdimm_type;
 783
 784        return cxl_nvd;
 785}
 786
 787int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd)
 788{
 789        struct cxl_nvdimm *cxl_nvd;
 790        struct device *dev;
 791        int rc;
 792
 793        cxl_nvd = cxl_nvdimm_alloc(cxlmd);
 794        if (IS_ERR(cxl_nvd))
 795                return PTR_ERR(cxl_nvd);
 796
 797        dev = &cxl_nvd->dev;
 798        rc = dev_set_name(dev, "pmem%d", cxlmd->id);
 799        if (rc)
 800                goto err;
 801
 802        rc = device_add(dev);
 803        if (rc)
 804                goto err;
 805
 806        dev_dbg(host, "%s: register %s\n", dev_name(dev->parent),
 807                dev_name(dev));
 808
 809        return devm_add_action_or_reset(host, unregister_dev, dev);
 810
 811err:
 812        put_device(dev);
 813        return rc;
 814}
 815EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm);
 816
 817/**
 818 * cxl_probe_device_regs() - Detect CXL Device register blocks
 819 * @dev: Host device of the @base mapping
 820 * @base: Mapping of CXL 2.0 8.2.8 CXL Device Register Interface
 821 * @map: Map object describing the register block information found
 822 *
 823 * Probe for device register information and return it in map object.
 824 */
 825void cxl_probe_device_regs(struct device *dev, void __iomem *base,
 826                           struct cxl_device_reg_map *map)
 827{
 828        int cap, cap_count;
 829        u64 cap_array;
 830
 831        *map = (struct cxl_device_reg_map){ 0 };
 832
 833        cap_array = readq(base + CXLDEV_CAP_ARRAY_OFFSET);
 834        if (FIELD_GET(CXLDEV_CAP_ARRAY_ID_MASK, cap_array) !=
 835            CXLDEV_CAP_ARRAY_CAP_ID)
 836                return;
 837
 838        cap_count = FIELD_GET(CXLDEV_CAP_ARRAY_COUNT_MASK, cap_array);
 839
 840        for (cap = 1; cap <= cap_count; cap++) {
 841                u32 offset, length;
 842                u16 cap_id;
 843
 844                cap_id = FIELD_GET(CXLDEV_CAP_HDR_CAP_ID_MASK,
 845                                   readl(base + cap * 0x10));
 846                offset = readl(base + cap * 0x10 + 0x4);
 847                length = readl(base + cap * 0x10 + 0x8);
 848
 849                switch (cap_id) {
 850                case CXLDEV_CAP_CAP_ID_DEVICE_STATUS:
 851                        dev_dbg(dev, "found Status capability (0x%x)\n", offset);
 852
 853                        map->status.valid = true;
 854                        map->status.offset = offset;
 855                        map->status.size = length;
 856                        break;
 857                case CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX:
 858                        dev_dbg(dev, "found Mailbox capability (0x%x)\n", offset);
 859                        map->mbox.valid = true;
 860                        map->mbox.offset = offset;
 861                        map->mbox.size = length;
 862                        break;
 863                case CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX:
 864                        dev_dbg(dev, "found Secondary Mailbox capability (0x%x)\n", offset);
 865                        break;
 866                case CXLDEV_CAP_CAP_ID_MEMDEV:
 867                        dev_dbg(dev, "found Memory Device capability (0x%x)\n", offset);
 868                        map->memdev.valid = true;
 869                        map->memdev.offset = offset;
 870                        map->memdev.size = length;
 871                        break;
 872                default:
 873                        if (cap_id >= 0x8000)
 874                                dev_dbg(dev, "Vendor cap ID: %#x offset: %#x\n", cap_id, offset);
 875                        else
 876                                dev_dbg(dev, "Unknown cap ID: %#x offset: %#x\n", cap_id, offset);
 877                        break;
 878                }
 879        }
 880}
 881EXPORT_SYMBOL_GPL(cxl_probe_device_regs);
 882
 883static void __iomem *devm_cxl_iomap_block(struct device *dev,
 884                                          resource_size_t addr,
 885                                          resource_size_t length)
 886{
 887        void __iomem *ret_val;
 888        struct resource *res;
 889
 890        res = devm_request_mem_region(dev, addr, length, dev_name(dev));
 891        if (!res) {
 892                resource_size_t end = addr + length - 1;
 893
 894                dev_err(dev, "Failed to request region %pa-%pa\n", &addr, &end);
 895                return NULL;
 896        }
 897
 898        ret_val = devm_ioremap(dev, addr, length);
 899        if (!ret_val)
 900                dev_err(dev, "Failed to map region %pr\n", res);
 901
 902        return ret_val;
 903}
 904
 905int cxl_map_component_regs(struct pci_dev *pdev,
 906                           struct cxl_component_regs *regs,
 907                           struct cxl_register_map *map)
 908{
 909        struct device *dev = &pdev->dev;
 910        resource_size_t phys_addr;
 911        resource_size_t length;
 912
 913        phys_addr = pci_resource_start(pdev, map->barno);
 914        phys_addr += map->block_offset;
 915
 916        phys_addr += map->component_map.hdm_decoder.offset;
 917        length = map->component_map.hdm_decoder.size;
 918        regs->hdm_decoder = devm_cxl_iomap_block(dev, phys_addr, length);
 919        if (!regs->hdm_decoder)
 920                return -ENOMEM;
 921
 922        return 0;
 923}
 924EXPORT_SYMBOL_GPL(cxl_map_component_regs);
 925
 926int cxl_map_device_regs(struct pci_dev *pdev,
 927                        struct cxl_device_regs *regs,
 928                        struct cxl_register_map *map)
 929{
 930        struct device *dev = &pdev->dev;
 931        resource_size_t phys_addr;
 932
 933        phys_addr = pci_resource_start(pdev, map->barno);
 934        phys_addr += map->block_offset;
 935
 936        if (map->device_map.status.valid) {
 937                resource_size_t addr;
 938                resource_size_t length;
 939
 940                addr = phys_addr + map->device_map.status.offset;
 941                length = map->device_map.status.size;
 942                regs->status = devm_cxl_iomap_block(dev, addr, length);
 943                if (!regs->status)
 944                        return -ENOMEM;
 945        }
 946
 947        if (map->device_map.mbox.valid) {
 948                resource_size_t addr;
 949                resource_size_t length;
 950
 951                addr = phys_addr + map->device_map.mbox.offset;
 952                length = map->device_map.mbox.size;
 953                regs->mbox = devm_cxl_iomap_block(dev, addr, length);
 954                if (!regs->mbox)
 955                        return -ENOMEM;
 956        }
 957
 958        if (map->device_map.memdev.valid) {
 959                resource_size_t addr;
 960                resource_size_t length;
 961
 962                addr = phys_addr + map->device_map.memdev.offset;
 963                length = map->device_map.memdev.size;
 964                regs->memdev = devm_cxl_iomap_block(dev, addr, length);
 965                if (!regs->memdev)
 966                        return -ENOMEM;
 967        }
 968
 969        return 0;
 970}
 971EXPORT_SYMBOL_GPL(cxl_map_device_regs);
 972
 973/**
 974 * __cxl_driver_register - register a driver for the cxl bus
 975 * @cxl_drv: cxl driver structure to attach
 976 * @owner: owning module/driver
 977 * @modname: KBUILD_MODNAME for parent driver
 978 */
 979int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
 980                          const char *modname)
 981{
 982        if (!cxl_drv->probe) {
 983                pr_debug("%s ->probe() must be specified\n", modname);
 984                return -EINVAL;
 985        }
 986
 987        if (!cxl_drv->name) {
 988                pr_debug("%s ->name must be specified\n", modname);
 989                return -EINVAL;
 990        }
 991
 992        if (!cxl_drv->id) {
 993                pr_debug("%s ->id must be specified\n", modname);
 994                return -EINVAL;
 995        }
 996
 997        cxl_drv->drv.bus = &cxl_bus_type;
 998        cxl_drv->drv.owner = owner;
 999        cxl_drv->drv.mod_name = modname;
1000        cxl_drv->drv.name = cxl_drv->name;
1001
1002        return driver_register(&cxl_drv->drv);
1003}
1004EXPORT_SYMBOL_GPL(__cxl_driver_register);
1005
1006void cxl_driver_unregister(struct cxl_driver *cxl_drv)
1007{
1008        driver_unregister(&cxl_drv->drv);
1009}
1010EXPORT_SYMBOL_GPL(cxl_driver_unregister);
1011
1012static int cxl_device_id(struct device *dev)
1013{
1014        if (dev->type == &cxl_nvdimm_bridge_type)
1015                return CXL_DEVICE_NVDIMM_BRIDGE;
1016        if (dev->type == &cxl_nvdimm_type)
1017                return CXL_DEVICE_NVDIMM;
1018        return 0;
1019}
1020
1021static int cxl_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
1022{
1023        return add_uevent_var(env, "MODALIAS=" CXL_MODALIAS_FMT,
1024                              cxl_device_id(dev));
1025}
1026
1027static int cxl_bus_match(struct device *dev, struct device_driver *drv)
1028{
1029        return cxl_device_id(dev) == to_cxl_drv(drv)->id;
1030}
1031
1032static int cxl_bus_probe(struct device *dev)
1033{
1034        return to_cxl_drv(dev->driver)->probe(dev);
1035}
1036
1037static int cxl_bus_remove(struct device *dev)
1038{
1039        struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver);
1040
1041        if (cxl_drv->remove)
1042                cxl_drv->remove(dev);
1043        return 0;
1044}
1045
1046struct bus_type cxl_bus_type = {
1047        .name = "cxl",
1048        .uevent = cxl_bus_uevent,
1049        .match = cxl_bus_match,
1050        .probe = cxl_bus_probe,
1051        .remove = cxl_bus_remove,
1052};
1053EXPORT_SYMBOL_GPL(cxl_bus_type);
1054
1055static __init int cxl_core_init(void)
1056{
1057        return bus_register(&cxl_bus_type);
1058}
1059
1060static void cxl_core_exit(void)
1061{
1062        bus_unregister(&cxl_bus_type);
1063}
1064
1065module_init(cxl_core_init);
1066module_exit(cxl_core_exit);
1067MODULE_LICENSE("GPL v2");
1068