linux/drivers/s390/cio/vfio_ccw_ops.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Physical device callbacks for vfio_ccw
   4 *
   5 * Copyright IBM Corp. 2017
   6 * Copyright Red Hat, Inc. 2019
   7 *
   8 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
   9 *            Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
  10 *            Cornelia Huck <cohuck@redhat.com>
  11 */
  12
  13#include <linux/vfio.h>
  14#include <linux/mdev.h>
  15#include <linux/nospec.h>
  16#include <linux/slab.h>
  17
  18#include "vfio_ccw_private.h"
  19
  20static int vfio_ccw_mdev_reset(struct mdev_device *mdev)
  21{
  22        struct vfio_ccw_private *private;
  23        struct subchannel *sch;
  24        int ret;
  25
  26        private = dev_get_drvdata(mdev_parent_dev(mdev));
  27        sch = private->sch;
  28        /*
  29         * TODO:
  30         * In the cureent stage, some things like "no I/O running" and "no
  31         * interrupt pending" are clear, but we are not sure what other state
  32         * we need to care about.
  33         * There are still a lot more instructions need to be handled. We
  34         * should come back here later.
  35         */
  36        ret = vfio_ccw_sch_quiesce(sch);
  37        if (ret)
  38                return ret;
  39
  40        ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
  41        if (!ret)
  42                private->state = VFIO_CCW_STATE_IDLE;
  43
  44        return ret;
  45}
  46
  47static int vfio_ccw_mdev_notifier(struct notifier_block *nb,
  48                                  unsigned long action,
  49                                  void *data)
  50{
  51        struct vfio_ccw_private *private =
  52                container_of(nb, struct vfio_ccw_private, nb);
  53
  54        /*
  55         * Vendor drivers MUST unpin pages in response to an
  56         * invalidation.
  57         */
  58        if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
  59                struct vfio_iommu_type1_dma_unmap *unmap = data;
  60
  61                if (!cp_iova_pinned(&private->cp, unmap->iova))
  62                        return NOTIFY_OK;
  63
  64                if (vfio_ccw_mdev_reset(private->mdev))
  65                        return NOTIFY_BAD;
  66
  67                cp_free(&private->cp);
  68                return NOTIFY_OK;
  69        }
  70
  71        return NOTIFY_DONE;
  72}
  73
  74static ssize_t name_show(struct mdev_type *mtype,
  75                         struct mdev_type_attribute *attr, char *buf)
  76{
  77        return sprintf(buf, "I/O subchannel (Non-QDIO)\n");
  78}
  79static MDEV_TYPE_ATTR_RO(name);
  80
  81static ssize_t device_api_show(struct mdev_type *mtype,
  82                               struct mdev_type_attribute *attr, char *buf)
  83{
  84        return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING);
  85}
  86static MDEV_TYPE_ATTR_RO(device_api);
  87
  88static ssize_t available_instances_show(struct mdev_type *mtype,
  89                                        struct mdev_type_attribute *attr,
  90                                        char *buf)
  91{
  92        struct vfio_ccw_private *private =
  93                dev_get_drvdata(mtype_get_parent_dev(mtype));
  94
  95        return sprintf(buf, "%d\n", atomic_read(&private->avail));
  96}
  97static MDEV_TYPE_ATTR_RO(available_instances);
  98
  99static struct attribute *mdev_types_attrs[] = {
 100        &mdev_type_attr_name.attr,
 101        &mdev_type_attr_device_api.attr,
 102        &mdev_type_attr_available_instances.attr,
 103        NULL,
 104};
 105
 106static struct attribute_group mdev_type_group = {
 107        .name  = "io",
 108        .attrs = mdev_types_attrs,
 109};
 110
 111static struct attribute_group *mdev_type_groups[] = {
 112        &mdev_type_group,
 113        NULL,
 114};
 115
 116static int vfio_ccw_mdev_create(struct mdev_device *mdev)
 117{
 118        struct vfio_ccw_private *private =
 119                dev_get_drvdata(mdev_parent_dev(mdev));
 120
 121        if (private->state == VFIO_CCW_STATE_NOT_OPER)
 122                return -ENODEV;
 123
 124        if (atomic_dec_if_positive(&private->avail) < 0)
 125                return -EPERM;
 126
 127        private->mdev = mdev;
 128        private->state = VFIO_CCW_STATE_IDLE;
 129
 130        VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: create\n",
 131                           mdev_uuid(mdev), private->sch->schid.cssid,
 132                           private->sch->schid.ssid,
 133                           private->sch->schid.sch_no);
 134
 135        return 0;
 136}
 137
 138static int vfio_ccw_mdev_remove(struct mdev_device *mdev)
 139{
 140        struct vfio_ccw_private *private =
 141                dev_get_drvdata(mdev_parent_dev(mdev));
 142
 143        VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: remove\n",
 144                           mdev_uuid(mdev), private->sch->schid.cssid,
 145                           private->sch->schid.ssid,
 146                           private->sch->schid.sch_no);
 147
 148        if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
 149            (private->state != VFIO_CCW_STATE_STANDBY)) {
 150                if (!vfio_ccw_sch_quiesce(private->sch))
 151                        private->state = VFIO_CCW_STATE_STANDBY;
 152                /* The state will be NOT_OPER on error. */
 153        }
 154
 155        cp_free(&private->cp);
 156        private->mdev = NULL;
 157        atomic_inc(&private->avail);
 158
 159        return 0;
 160}
 161
 162static int vfio_ccw_mdev_open(struct mdev_device *mdev)
 163{
 164        struct vfio_ccw_private *private =
 165                dev_get_drvdata(mdev_parent_dev(mdev));
 166        unsigned long events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
 167        int ret;
 168
 169        private->nb.notifier_call = vfio_ccw_mdev_notifier;
 170
 171        ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
 172                                     &events, &private->nb);
 173        if (ret)
 174                return ret;
 175
 176        ret = vfio_ccw_register_async_dev_regions(private);
 177        if (ret)
 178                goto out_unregister;
 179
 180        ret = vfio_ccw_register_schib_dev_regions(private);
 181        if (ret)
 182                goto out_unregister;
 183
 184        ret = vfio_ccw_register_crw_dev_regions(private);
 185        if (ret)
 186                goto out_unregister;
 187
 188        return ret;
 189
 190out_unregister:
 191        vfio_ccw_unregister_dev_regions(private);
 192        vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
 193                                 &private->nb);
 194        return ret;
 195}
 196
 197static void vfio_ccw_mdev_release(struct mdev_device *mdev)
 198{
 199        struct vfio_ccw_private *private =
 200                dev_get_drvdata(mdev_parent_dev(mdev));
 201
 202        if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
 203            (private->state != VFIO_CCW_STATE_STANDBY)) {
 204                if (!vfio_ccw_mdev_reset(mdev))
 205                        private->state = VFIO_CCW_STATE_STANDBY;
 206                /* The state will be NOT_OPER on error. */
 207        }
 208
 209        cp_free(&private->cp);
 210        vfio_ccw_unregister_dev_regions(private);
 211        vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
 212                                 &private->nb);
 213}
 214
 215static ssize_t vfio_ccw_mdev_read_io_region(struct vfio_ccw_private *private,
 216                                            char __user *buf, size_t count,
 217                                            loff_t *ppos)
 218{
 219        loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
 220        struct ccw_io_region *region;
 221        int ret;
 222
 223        if (pos + count > sizeof(*region))
 224                return -EINVAL;
 225
 226        mutex_lock(&private->io_mutex);
 227        region = private->io_region;
 228        if (copy_to_user(buf, (void *)region + pos, count))
 229                ret = -EFAULT;
 230        else
 231                ret = count;
 232        mutex_unlock(&private->io_mutex);
 233        return ret;
 234}
 235
 236static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev,
 237                                  char __user *buf,
 238                                  size_t count,
 239                                  loff_t *ppos)
 240{
 241        unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
 242        struct vfio_ccw_private *private;
 243
 244        private = dev_get_drvdata(mdev_parent_dev(mdev));
 245
 246        if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
 247                return -EINVAL;
 248
 249        switch (index) {
 250        case VFIO_CCW_CONFIG_REGION_INDEX:
 251                return vfio_ccw_mdev_read_io_region(private, buf, count, ppos);
 252        default:
 253                index -= VFIO_CCW_NUM_REGIONS;
 254                return private->region[index].ops->read(private, buf, count,
 255                                                        ppos);
 256        }
 257
 258        return -EINVAL;
 259}
 260
 261static ssize_t vfio_ccw_mdev_write_io_region(struct vfio_ccw_private *private,
 262                                             const char __user *buf,
 263                                             size_t count, loff_t *ppos)
 264{
 265        loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
 266        struct ccw_io_region *region;
 267        int ret;
 268
 269        if (pos + count > sizeof(*region))
 270                return -EINVAL;
 271
 272        if (!mutex_trylock(&private->io_mutex))
 273                return -EAGAIN;
 274
 275        region = private->io_region;
 276        if (copy_from_user((void *)region + pos, buf, count)) {
 277                ret = -EFAULT;
 278                goto out_unlock;
 279        }
 280
 281        vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
 282        ret = (region->ret_code != 0) ? region->ret_code : count;
 283
 284out_unlock:
 285        mutex_unlock(&private->io_mutex);
 286        return ret;
 287}
 288
 289static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
 290                                   const char __user *buf,
 291                                   size_t count,
 292                                   loff_t *ppos)
 293{
 294        unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
 295        struct vfio_ccw_private *private;
 296
 297        private = dev_get_drvdata(mdev_parent_dev(mdev));
 298
 299        if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
 300                return -EINVAL;
 301
 302        switch (index) {
 303        case VFIO_CCW_CONFIG_REGION_INDEX:
 304                return vfio_ccw_mdev_write_io_region(private, buf, count, ppos);
 305        default:
 306                index -= VFIO_CCW_NUM_REGIONS;
 307                return private->region[index].ops->write(private, buf, count,
 308                                                         ppos);
 309        }
 310
 311        return -EINVAL;
 312}
 313
 314static int vfio_ccw_mdev_get_device_info(struct vfio_device_info *info,
 315                                         struct mdev_device *mdev)
 316{
 317        struct vfio_ccw_private *private;
 318
 319        private = dev_get_drvdata(mdev_parent_dev(mdev));
 320        info->flags = VFIO_DEVICE_FLAGS_CCW | VFIO_DEVICE_FLAGS_RESET;
 321        info->num_regions = VFIO_CCW_NUM_REGIONS + private->num_regions;
 322        info->num_irqs = VFIO_CCW_NUM_IRQS;
 323
 324        return 0;
 325}
 326
 327static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info,
 328                                         struct mdev_device *mdev,
 329                                         unsigned long arg)
 330{
 331        struct vfio_ccw_private *private;
 332        int i;
 333
 334        private = dev_get_drvdata(mdev_parent_dev(mdev));
 335        switch (info->index) {
 336        case VFIO_CCW_CONFIG_REGION_INDEX:
 337                info->offset = 0;
 338                info->size = sizeof(struct ccw_io_region);
 339                info->flags = VFIO_REGION_INFO_FLAG_READ
 340                              | VFIO_REGION_INFO_FLAG_WRITE;
 341                return 0;
 342        default: /* all other regions are handled via capability chain */
 343        {
 344                struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
 345                struct vfio_region_info_cap_type cap_type = {
 346                        .header.id = VFIO_REGION_INFO_CAP_TYPE,
 347                        .header.version = 1 };
 348                int ret;
 349
 350                if (info->index >=
 351                    VFIO_CCW_NUM_REGIONS + private->num_regions)
 352                        return -EINVAL;
 353
 354                info->index = array_index_nospec(info->index,
 355                                                 VFIO_CCW_NUM_REGIONS +
 356                                                 private->num_regions);
 357
 358                i = info->index - VFIO_CCW_NUM_REGIONS;
 359
 360                info->offset = VFIO_CCW_INDEX_TO_OFFSET(info->index);
 361                info->size = private->region[i].size;
 362                info->flags = private->region[i].flags;
 363
 364                cap_type.type = private->region[i].type;
 365                cap_type.subtype = private->region[i].subtype;
 366
 367                ret = vfio_info_add_capability(&caps, &cap_type.header,
 368                                               sizeof(cap_type));
 369                if (ret)
 370                        return ret;
 371
 372                info->flags |= VFIO_REGION_INFO_FLAG_CAPS;
 373                if (info->argsz < sizeof(*info) + caps.size) {
 374                        info->argsz = sizeof(*info) + caps.size;
 375                        info->cap_offset = 0;
 376                } else {
 377                        vfio_info_cap_shift(&caps, sizeof(*info));
 378                        if (copy_to_user((void __user *)arg + sizeof(*info),
 379                                         caps.buf, caps.size)) {
 380                                kfree(caps.buf);
 381                                return -EFAULT;
 382                        }
 383                        info->cap_offset = sizeof(*info);
 384                }
 385
 386                kfree(caps.buf);
 387
 388        }
 389        }
 390        return 0;
 391}
 392
 393static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
 394{
 395        switch (info->index) {
 396        case VFIO_CCW_IO_IRQ_INDEX:
 397        case VFIO_CCW_CRW_IRQ_INDEX:
 398        case VFIO_CCW_REQ_IRQ_INDEX:
 399                info->count = 1;
 400                info->flags = VFIO_IRQ_INFO_EVENTFD;
 401                break;
 402        default:
 403                return -EINVAL;
 404        }
 405
 406        return 0;
 407}
 408
 409static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev,
 410                                  uint32_t flags,
 411                                  uint32_t index,
 412                                  void __user *data)
 413{
 414        struct vfio_ccw_private *private;
 415        struct eventfd_ctx **ctx;
 416
 417        if (!(flags & VFIO_IRQ_SET_ACTION_TRIGGER))
 418                return -EINVAL;
 419
 420        private = dev_get_drvdata(mdev_parent_dev(mdev));
 421
 422        switch (index) {
 423        case VFIO_CCW_IO_IRQ_INDEX:
 424                ctx = &private->io_trigger;
 425                break;
 426        case VFIO_CCW_CRW_IRQ_INDEX:
 427                ctx = &private->crw_trigger;
 428                break;
 429        case VFIO_CCW_REQ_IRQ_INDEX:
 430                ctx = &private->req_trigger;
 431                break;
 432        default:
 433                return -EINVAL;
 434        }
 435
 436        switch (flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
 437        case VFIO_IRQ_SET_DATA_NONE:
 438        {
 439                if (*ctx)
 440                        eventfd_signal(*ctx, 1);
 441                return 0;
 442        }
 443        case VFIO_IRQ_SET_DATA_BOOL:
 444        {
 445                uint8_t trigger;
 446
 447                if (get_user(trigger, (uint8_t __user *)data))
 448                        return -EFAULT;
 449
 450                if (trigger && *ctx)
 451                        eventfd_signal(*ctx, 1);
 452                return 0;
 453        }
 454        case VFIO_IRQ_SET_DATA_EVENTFD:
 455        {
 456                int32_t fd;
 457
 458                if (get_user(fd, (int32_t __user *)data))
 459                        return -EFAULT;
 460
 461                if (fd == -1) {
 462                        if (*ctx)
 463                                eventfd_ctx_put(*ctx);
 464                        *ctx = NULL;
 465                } else if (fd >= 0) {
 466                        struct eventfd_ctx *efdctx;
 467
 468                        efdctx = eventfd_ctx_fdget(fd);
 469                        if (IS_ERR(efdctx))
 470                                return PTR_ERR(efdctx);
 471
 472                        if (*ctx)
 473                                eventfd_ctx_put(*ctx);
 474
 475                        *ctx = efdctx;
 476                } else
 477                        return -EINVAL;
 478
 479                return 0;
 480        }
 481        default:
 482                return -EINVAL;
 483        }
 484}
 485
 486int vfio_ccw_register_dev_region(struct vfio_ccw_private *private,
 487                                 unsigned int subtype,
 488                                 const struct vfio_ccw_regops *ops,
 489                                 size_t size, u32 flags, void *data)
 490{
 491        struct vfio_ccw_region *region;
 492
 493        region = krealloc(private->region,
 494                          (private->num_regions + 1) * sizeof(*region),
 495                          GFP_KERNEL);
 496        if (!region)
 497                return -ENOMEM;
 498
 499        private->region = region;
 500        private->region[private->num_regions].type = VFIO_REGION_TYPE_CCW;
 501        private->region[private->num_regions].subtype = subtype;
 502        private->region[private->num_regions].ops = ops;
 503        private->region[private->num_regions].size = size;
 504        private->region[private->num_regions].flags = flags;
 505        private->region[private->num_regions].data = data;
 506
 507        private->num_regions++;
 508
 509        return 0;
 510}
 511
 512void vfio_ccw_unregister_dev_regions(struct vfio_ccw_private *private)
 513{
 514        int i;
 515
 516        for (i = 0; i < private->num_regions; i++)
 517                private->region[i].ops->release(private, &private->region[i]);
 518        private->num_regions = 0;
 519        kfree(private->region);
 520        private->region = NULL;
 521}
 522
 523static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
 524                                   unsigned int cmd,
 525                                   unsigned long arg)
 526{
 527        int ret = 0;
 528        unsigned long minsz;
 529
 530        switch (cmd) {
 531        case VFIO_DEVICE_GET_INFO:
 532        {
 533                struct vfio_device_info info;
 534
 535                minsz = offsetofend(struct vfio_device_info, num_irqs);
 536
 537                if (copy_from_user(&info, (void __user *)arg, minsz))
 538                        return -EFAULT;
 539
 540                if (info.argsz < minsz)
 541                        return -EINVAL;
 542
 543                ret = vfio_ccw_mdev_get_device_info(&info, mdev);
 544                if (ret)
 545                        return ret;
 546
 547                return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
 548        }
 549        case VFIO_DEVICE_GET_REGION_INFO:
 550        {
 551                struct vfio_region_info info;
 552
 553                minsz = offsetofend(struct vfio_region_info, offset);
 554
 555                if (copy_from_user(&info, (void __user *)arg, minsz))
 556                        return -EFAULT;
 557
 558                if (info.argsz < minsz)
 559                        return -EINVAL;
 560
 561                ret = vfio_ccw_mdev_get_region_info(&info, mdev, arg);
 562                if (ret)
 563                        return ret;
 564
 565                return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
 566        }
 567        case VFIO_DEVICE_GET_IRQ_INFO:
 568        {
 569                struct vfio_irq_info info;
 570
 571                minsz = offsetofend(struct vfio_irq_info, count);
 572
 573                if (copy_from_user(&info, (void __user *)arg, minsz))
 574                        return -EFAULT;
 575
 576                if (info.argsz < minsz || info.index >= VFIO_CCW_NUM_IRQS)
 577                        return -EINVAL;
 578
 579                ret = vfio_ccw_mdev_get_irq_info(&info);
 580                if (ret)
 581                        return ret;
 582
 583                if (info.count == -1)
 584                        return -EINVAL;
 585
 586                return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
 587        }
 588        case VFIO_DEVICE_SET_IRQS:
 589        {
 590                struct vfio_irq_set hdr;
 591                size_t data_size;
 592                void __user *data;
 593
 594                minsz = offsetofend(struct vfio_irq_set, count);
 595
 596                if (copy_from_user(&hdr, (void __user *)arg, minsz))
 597                        return -EFAULT;
 598
 599                ret = vfio_set_irqs_validate_and_prepare(&hdr, 1,
 600                                                         VFIO_CCW_NUM_IRQS,
 601                                                         &data_size);
 602                if (ret)
 603                        return ret;
 604
 605                data = (void __user *)(arg + minsz);
 606                return vfio_ccw_mdev_set_irqs(mdev, hdr.flags, hdr.index, data);
 607        }
 608        case VFIO_DEVICE_RESET:
 609                return vfio_ccw_mdev_reset(mdev);
 610        default:
 611                return -ENOTTY;
 612        }
 613}
 614
 615/* Request removal of the device*/
 616static void vfio_ccw_mdev_request(struct mdev_device *mdev, unsigned int count)
 617{
 618        struct vfio_ccw_private *private = dev_get_drvdata(mdev_parent_dev(mdev));
 619
 620        if (!private)
 621                return;
 622
 623        if (private->req_trigger) {
 624                if (!(count % 10))
 625                        dev_notice_ratelimited(mdev_dev(private->mdev),
 626                                               "Relaying device request to user (#%u)\n",
 627                                               count);
 628
 629                eventfd_signal(private->req_trigger, 1);
 630        } else if (count == 0) {
 631                dev_notice(mdev_dev(private->mdev),
 632                           "No device request channel registered, blocked until released by user\n");
 633        }
 634}
 635
 636static const struct mdev_parent_ops vfio_ccw_mdev_ops = {
 637        .owner                  = THIS_MODULE,
 638        .supported_type_groups  = mdev_type_groups,
 639        .create                 = vfio_ccw_mdev_create,
 640        .remove                 = vfio_ccw_mdev_remove,
 641        .open                   = vfio_ccw_mdev_open,
 642        .release                = vfio_ccw_mdev_release,
 643        .read                   = vfio_ccw_mdev_read,
 644        .write                  = vfio_ccw_mdev_write,
 645        .ioctl                  = vfio_ccw_mdev_ioctl,
 646        .request                = vfio_ccw_mdev_request,
 647};
 648
 649int vfio_ccw_mdev_reg(struct subchannel *sch)
 650{
 651        return mdev_register_device(&sch->dev, &vfio_ccw_mdev_ops);
 652}
 653
 654void vfio_ccw_mdev_unreg(struct subchannel *sch)
 655{
 656        mdev_unregister_device(&sch->dev);
 657}
 658