linux/drivers/char/virtio_console.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation
   3 * Copyright (C) 2009, 2010, 2011 Red Hat, Inc.
   4 * Copyright (C) 2009, 2010, 2011 Amit Shah <amit.shah@redhat.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  19 */
  20#include <linux/cdev.h>
  21#include <linux/debugfs.h>
  22#include <linux/completion.h>
  23#include <linux/device.h>
  24#include <linux/err.h>
  25#include <linux/freezer.h>
  26#include <linux/fs.h>
  27#include <linux/splice.h>
  28#include <linux/pagemap.h>
  29#include <linux/init.h>
  30#include <linux/list.h>
  31#include <linux/poll.h>
  32#include <linux/sched.h>
  33#include <linux/slab.h>
  34#include <linux/spinlock.h>
  35#include <linux/virtio.h>
  36#include <linux/virtio_console.h>
  37#include <linux/wait.h>
  38#include <linux/workqueue.h>
  39#include <linux/module.h>
  40#include <linux/dma-mapping.h>
  41#include <linux/kconfig.h>
  42#include "../tty/hvc/hvc_console.h"
  43
  44#define is_rproc_enabled IS_ENABLED(CONFIG_REMOTEPROC)
  45
  46/*
  47 * This is a global struct for storing common data for all the devices
  48 * this driver handles.
  49 *
  50 * Mainly, it has a linked list for all the consoles in one place so
  51 * that callbacks from hvc for get_chars(), put_chars() work properly
  52 * across multiple devices and multiple ports per device.
  53 */
  54struct ports_driver_data {
  55        /* Used for registering chardevs */
  56        struct class *class;
  57
  58        /* Used for exporting per-port information to debugfs */
  59        struct dentry *debugfs_dir;
  60
  61        /* List of all the devices we're handling */
  62        struct list_head portdevs;
  63
  64        /* Number of devices this driver is handling */
  65        unsigned int index;
  66
  67        /*
  68         * This is used to keep track of the number of hvc consoles
  69         * spawned by this driver.  This number is given as the first
  70         * argument to hvc_alloc().  To correctly map an initial
  71         * console spawned via hvc_instantiate to the console being
  72         * hooked up via hvc_alloc, we need to pass the same vtermno.
  73         *
  74         * We also just assume the first console being initialised was
  75         * the first one that got used as the initial console.
  76         */
  77        unsigned int next_vtermno;
  78
  79        /* All the console devices handled by this driver */
  80        struct list_head consoles;
  81};
  82static struct ports_driver_data pdrvdata;
  83
  84DEFINE_SPINLOCK(pdrvdata_lock);
  85DECLARE_COMPLETION(early_console_added);
  86
  87/* This struct holds information that's relevant only for console ports */
  88struct console {
  89        /* We'll place all consoles in a list in the pdrvdata struct */
  90        struct list_head list;
  91
  92        /* The hvc device associated with this console port */
  93        struct hvc_struct *hvc;
  94
  95        /* The size of the console */
  96        struct winsize ws;
  97
  98        /*
  99         * This number identifies the number that we used to register
 100         * with hvc in hvc_instantiate() and hvc_alloc(); this is the
 101         * number passed on by the hvc callbacks to us to
 102         * differentiate between the other console ports handled by
 103         * this driver
 104         */
 105        u32 vtermno;
 106};
 107
 108struct port_buffer {
 109        char *buf;
 110
 111        /* size of the buffer in *buf above */
 112        size_t size;
 113
 114        /* used length of the buffer */
 115        size_t len;
 116        /* offset in the buf from which to consume data */
 117        size_t offset;
 118
 119        /* DMA address of buffer */
 120        dma_addr_t dma;
 121
 122        /* Device we got DMA memory from */
 123        struct device *dev;
 124
 125        /* List of pending dma buffers to free */
 126        struct list_head list;
 127
 128        /* If sgpages == 0 then buf is used */
 129        unsigned int sgpages;
 130
 131        /* sg is used if spages > 0. sg must be the last in is struct */
 132        struct scatterlist sg[0];
 133};
 134
 135/*
 136 * This is a per-device struct that stores data common to all the
 137 * ports for that device (vdev->priv).
 138 */
 139struct ports_device {
 140        /* Next portdev in the list, head is in the pdrvdata struct */
 141        struct list_head list;
 142
 143        /*
 144         * Workqueue handlers where we process deferred work after
 145         * notification
 146         */
 147        struct work_struct control_work;
 148
 149        struct list_head ports;
 150
 151        /* To protect the list of ports */
 152        spinlock_t ports_lock;
 153
 154        /* To protect the vq operations for the control channel */
 155        spinlock_t cvq_lock;
 156
 157        /* The current config space is stored here */
 158        struct virtio_console_config config;
 159
 160        /* The virtio device we're associated with */
 161        struct virtio_device *vdev;
 162
 163        /*
 164         * A couple of virtqueues for the control channel: one for
 165         * guest->host transfers, one for host->guest transfers
 166         */
 167        struct virtqueue *c_ivq, *c_ovq;
 168
 169        /* Array of per-port IO virtqueues */
 170        struct virtqueue **in_vqs, **out_vqs;
 171
 172        /* Used for numbering devices for sysfs and debugfs */
 173        unsigned int drv_index;
 174
 175        /* Major number for this device.  Ports will be created as minors. */
 176        int chr_major;
 177};
 178
 179struct port_stats {
 180        unsigned long bytes_sent, bytes_received, bytes_discarded;
 181};
 182
 183/* This struct holds the per-port data */
 184struct port {
 185        /* Next port in the list, head is in the ports_device */
 186        struct list_head list;
 187
 188        /* Pointer to the parent virtio_console device */
 189        struct ports_device *portdev;
 190
 191        /* The current buffer from which data has to be fed to readers */
 192        struct port_buffer *inbuf;
 193
 194        /*
 195         * To protect the operations on the in_vq associated with this
 196         * port.  Has to be a spinlock because it can be called from
 197         * interrupt context (get_char()).
 198         */
 199        spinlock_t inbuf_lock;
 200
 201        /* Protect the operations on the out_vq. */
 202        spinlock_t outvq_lock;
 203
 204        /* The IO vqs for this port */
 205        struct virtqueue *in_vq, *out_vq;
 206
 207        /* File in the debugfs directory that exposes this port's information */
 208        struct dentry *debugfs_file;
 209
 210        /*
 211         * Keep count of the bytes sent, received and discarded for
 212         * this port for accounting and debugging purposes.  These
 213         * counts are not reset across port open / close events.
 214         */
 215        struct port_stats stats;
 216
 217        /*
 218         * The entries in this struct will be valid if this port is
 219         * hooked up to an hvc console
 220         */
 221        struct console cons;
 222
 223        /* Each port associates with a separate char device */
 224        struct cdev *cdev;
 225        struct device *dev;
 226
 227        /* Reference-counting to handle port hot-unplugs and file operations */
 228        struct kref kref;
 229
 230        /* A waitqueue for poll() or blocking read operations */
 231        wait_queue_head_t waitqueue;
 232
 233        /* The 'name' of the port that we expose via sysfs properties */
 234        char *name;
 235
 236        /* We can notify apps of host connect / disconnect events via SIGIO */
 237        struct fasync_struct *async_queue;
 238
 239        /* The 'id' to identify the port with the Host */
 240        u32 id;
 241
 242        bool outvq_full;
 243
 244        /* Is the host device open */
 245        bool host_connected;
 246
 247        /* We should allow only one process to open a port */
 248        bool guest_connected;
 249};
 250
 251/* This is the very early arch-specified put chars function. */
 252static int (*early_put_chars)(u32, const char *, int);
 253
 254static struct port *find_port_by_vtermno(u32 vtermno)
 255{
 256        struct port *port;
 257        struct console *cons;
 258        unsigned long flags;
 259
 260        spin_lock_irqsave(&pdrvdata_lock, flags);
 261        list_for_each_entry(cons, &pdrvdata.consoles, list) {
 262                if (cons->vtermno == vtermno) {
 263                        port = container_of(cons, struct port, cons);
 264                        goto out;
 265                }
 266        }
 267        port = NULL;
 268out:
 269        spin_unlock_irqrestore(&pdrvdata_lock, flags);
 270        return port;
 271}
 272
 273static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev,
 274                                                 dev_t dev)
 275{
 276        struct port *port;
 277        unsigned long flags;
 278
 279        spin_lock_irqsave(&portdev->ports_lock, flags);
 280        list_for_each_entry(port, &portdev->ports, list)
 281                if (port->cdev->dev == dev)
 282                        goto out;
 283        port = NULL;
 284out:
 285        spin_unlock_irqrestore(&portdev->ports_lock, flags);
 286
 287        return port;
 288}
 289
 290static struct port *find_port_by_devt(dev_t dev)
 291{
 292        struct ports_device *portdev;
 293        struct port *port;
 294        unsigned long flags;
 295
 296        spin_lock_irqsave(&pdrvdata_lock, flags);
 297        list_for_each_entry(portdev, &pdrvdata.portdevs, list) {
 298                port = find_port_by_devt_in_portdev(portdev, dev);
 299                if (port)
 300                        goto out;
 301        }
 302        port = NULL;
 303out:
 304        spin_unlock_irqrestore(&pdrvdata_lock, flags);
 305        return port;
 306}
 307
 308static struct port *find_port_by_id(struct ports_device *portdev, u32 id)
 309{
 310        struct port *port;
 311        unsigned long flags;
 312
 313        spin_lock_irqsave(&portdev->ports_lock, flags);
 314        list_for_each_entry(port, &portdev->ports, list)
 315                if (port->id == id)
 316                        goto out;
 317        port = NULL;
 318out:
 319        spin_unlock_irqrestore(&portdev->ports_lock, flags);
 320
 321        return port;
 322}
 323
 324static struct port *find_port_by_vq(struct ports_device *portdev,
 325                                    struct virtqueue *vq)
 326{
 327        struct port *port;
 328        unsigned long flags;
 329
 330        spin_lock_irqsave(&portdev->ports_lock, flags);
 331        list_for_each_entry(port, &portdev->ports, list)
 332                if (port->in_vq == vq || port->out_vq == vq)
 333                        goto out;
 334        port = NULL;
 335out:
 336        spin_unlock_irqrestore(&portdev->ports_lock, flags);
 337        return port;
 338}
 339
 340static bool is_console_port(struct port *port)
 341{
 342        if (port->cons.hvc)
 343                return true;
 344        return false;
 345}
 346
 347static bool is_rproc_serial(const struct virtio_device *vdev)
 348{
 349        return is_rproc_enabled && vdev->id.device == VIRTIO_ID_RPROC_SERIAL;
 350}
 351
 352static inline bool use_multiport(struct ports_device *portdev)
 353{
 354        /*
 355         * This condition can be true when put_chars is called from
 356         * early_init
 357         */
 358        if (!portdev->vdev)
 359                return 0;
 360        return portdev->vdev->features[0] & (1 << VIRTIO_CONSOLE_F_MULTIPORT);
 361}
 362
 363static DEFINE_SPINLOCK(dma_bufs_lock);
 364static LIST_HEAD(pending_free_dma_bufs);
 365
 366static void free_buf(struct port_buffer *buf, bool can_sleep)
 367{
 368        unsigned int i;
 369
 370        for (i = 0; i < buf->sgpages; i++) {
 371                struct page *page = sg_page(&buf->sg[i]);
 372                if (!page)
 373                        break;
 374                put_page(page);
 375        }
 376
 377        if (!buf->dev) {
 378                kfree(buf->buf);
 379        } else if (is_rproc_enabled) {
 380                unsigned long flags;
 381
 382                /* dma_free_coherent requires interrupts to be enabled. */
 383                if (!can_sleep) {
 384                        /* queue up dma-buffers to be freed later */
 385                        spin_lock_irqsave(&dma_bufs_lock, flags);
 386                        list_add_tail(&buf->list, &pending_free_dma_bufs);
 387                        spin_unlock_irqrestore(&dma_bufs_lock, flags);
 388                        return;
 389                }
 390                dma_free_coherent(buf->dev, buf->size, buf->buf, buf->dma);
 391
 392                /* Release device refcnt and allow it to be freed */
 393                put_device(buf->dev);
 394        }
 395
 396        kfree(buf);
 397}
 398
 399static void reclaim_dma_bufs(void)
 400{
 401        unsigned long flags;
 402        struct port_buffer *buf, *tmp;
 403        LIST_HEAD(tmp_list);
 404
 405        if (list_empty(&pending_free_dma_bufs))
 406                return;
 407
 408        /* Create a copy of the pending_free_dma_bufs while holding the lock */
 409        spin_lock_irqsave(&dma_bufs_lock, flags);
 410        list_cut_position(&tmp_list, &pending_free_dma_bufs,
 411                          pending_free_dma_bufs.prev);
 412        spin_unlock_irqrestore(&dma_bufs_lock, flags);
 413
 414        /* Release the dma buffers, without irqs enabled */
 415        list_for_each_entry_safe(buf, tmp, &tmp_list, list) {
 416                list_del(&buf->list);
 417                free_buf(buf, true);
 418        }
 419}
 420
 421static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size,
 422                                     int pages)
 423{
 424        struct port_buffer *buf;
 425
 426        reclaim_dma_bufs();
 427
 428        /*
 429         * Allocate buffer and the sg list. The sg list array is allocated
 430         * directly after the port_buffer struct.
 431         */
 432        buf = kmalloc(sizeof(*buf) + sizeof(struct scatterlist) * pages,
 433                      GFP_KERNEL);
 434        if (!buf)
 435                goto fail;
 436
 437        buf->sgpages = pages;
 438        if (pages > 0) {
 439                buf->dev = NULL;
 440                buf->buf = NULL;
 441                return buf;
 442        }
 443
 444        if (is_rproc_serial(vq->vdev)) {
 445                /*
 446                 * Allocate DMA memory from ancestor. When a virtio
 447                 * device is created by remoteproc, the DMA memory is
 448                 * associated with the grandparent device:
 449                 * vdev => rproc => platform-dev.
 450                 * The code here would have been less quirky if
 451                 * DMA_MEMORY_INCLUDES_CHILDREN had been supported
 452                 * in dma-coherent.c
 453                 */
 454                if (!vq->vdev->dev.parent || !vq->vdev->dev.parent->parent)
 455                        goto free_buf;
 456                buf->dev = vq->vdev->dev.parent->parent;
 457
 458                /* Increase device refcnt to avoid freeing it */
 459                get_device(buf->dev);
 460                buf->buf = dma_alloc_coherent(buf->dev, buf_size, &buf->dma,
 461                                              GFP_KERNEL);
 462        } else {
 463                buf->dev = NULL;
 464                buf->buf = kmalloc(buf_size, GFP_KERNEL);
 465        }
 466
 467        if (!buf->buf)
 468                goto free_buf;
 469        buf->len = 0;
 470        buf->offset = 0;
 471        buf->size = buf_size;
 472        return buf;
 473
 474free_buf:
 475        kfree(buf);
 476fail:
 477        return NULL;
 478}
 479
 480/* Callers should take appropriate locks */
 481static struct port_buffer *get_inbuf(struct port *port)
 482{
 483        struct port_buffer *buf;
 484        unsigned int len;
 485
 486        if (port->inbuf)
 487                return port->inbuf;
 488
 489        buf = virtqueue_get_buf(port->in_vq, &len);
 490        if (buf) {
 491                buf->len = len;
 492                buf->offset = 0;
 493                port->stats.bytes_received += len;
 494        }
 495        return buf;
 496}
 497
 498/*
 499 * Create a scatter-gather list representing our input buffer and put
 500 * it in the queue.
 501 *
 502 * Callers should take appropriate locks.
 503 */
 504static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf)
 505{
 506        struct scatterlist sg[1];
 507        int ret;
 508
 509        sg_init_one(sg, buf->buf, buf->size);
 510
 511        ret = virtqueue_add_buf(vq, sg, 0, 1, buf, GFP_ATOMIC);
 512        virtqueue_kick(vq);
 513        if (!ret)
 514                ret = vq->num_free;
 515        return ret;
 516}
 517
 518/* Discard any unread data this port has. Callers lockers. */
 519static void discard_port_data(struct port *port)
 520{
 521        struct port_buffer *buf;
 522        unsigned int err;
 523
 524        if (!port->portdev) {
 525                /* Device has been unplugged.  vqs are already gone. */
 526                return;
 527        }
 528        buf = get_inbuf(port);
 529
 530        err = 0;
 531        while (buf) {
 532                port->stats.bytes_discarded += buf->len - buf->offset;
 533                if (add_inbuf(port->in_vq, buf) < 0) {
 534                        err++;
 535                        free_buf(buf, false);
 536                }
 537                port->inbuf = NULL;
 538                buf = get_inbuf(port);
 539        }
 540        if (err)
 541                dev_warn(port->dev, "Errors adding %d buffers back to vq\n",
 542                         err);
 543}
 544
 545static bool port_has_data(struct port *port)
 546{
 547        unsigned long flags;
 548        bool ret;
 549
 550        ret = false;
 551        spin_lock_irqsave(&port->inbuf_lock, flags);
 552        port->inbuf = get_inbuf(port);
 553        if (port->inbuf)
 554                ret = true;
 555
 556        spin_unlock_irqrestore(&port->inbuf_lock, flags);
 557        return ret;
 558}
 559
 560static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
 561                                  unsigned int event, unsigned int value)
 562{
 563        struct scatterlist sg[1];
 564        struct virtio_console_control cpkt;
 565        struct virtqueue *vq;
 566        unsigned int len;
 567
 568        if (!use_multiport(portdev))
 569                return 0;
 570
 571        cpkt.id = port_id;
 572        cpkt.event = event;
 573        cpkt.value = value;
 574
 575        vq = portdev->c_ovq;
 576
 577        sg_init_one(sg, &cpkt, sizeof(cpkt));
 578        if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) == 0) {
 579                virtqueue_kick(vq);
 580                while (!virtqueue_get_buf(vq, &len))
 581                        cpu_relax();
 582        }
 583        return 0;
 584}
 585
 586static ssize_t send_control_msg(struct port *port, unsigned int event,
 587                                unsigned int value)
 588{
 589        /* Did the port get unplugged before userspace closed it? */
 590        if (port->portdev)
 591                return __send_control_msg(port->portdev, port->id, event, value);
 592        return 0;
 593}
 594
 595
 596/* Callers must take the port->outvq_lock */
 597static void reclaim_consumed_buffers(struct port *port)
 598{
 599        struct port_buffer *buf;
 600        unsigned int len;
 601
 602        if (!port->portdev) {
 603                /* Device has been unplugged.  vqs are already gone. */
 604                return;
 605        }
 606        while ((buf = virtqueue_get_buf(port->out_vq, &len))) {
 607                free_buf(buf, false);
 608                port->outvq_full = false;
 609        }
 610}
 611
 612static ssize_t __send_to_port(struct port *port, struct scatterlist *sg,
 613                              int nents, size_t in_count,
 614                              void *data, bool nonblock)
 615{
 616        struct virtqueue *out_vq;
 617        int err;
 618        unsigned long flags;
 619        unsigned int len;
 620
 621        out_vq = port->out_vq;
 622
 623        spin_lock_irqsave(&port->outvq_lock, flags);
 624
 625        reclaim_consumed_buffers(port);
 626
 627        err = virtqueue_add_buf(out_vq, sg, nents, 0, data, GFP_ATOMIC);
 628
 629        /* Tell Host to go! */
 630        virtqueue_kick(out_vq);
 631
 632        if (err) {
 633                in_count = 0;
 634                goto done;
 635        }
 636
 637        if (out_vq->num_free == 0)
 638                port->outvq_full = true;
 639
 640        if (nonblock)
 641                goto done;
 642
 643        /*
 644         * Wait till the host acknowledges it pushed out the data we
 645         * sent.  This is done for data from the hvc_console; the tty
 646         * operations are performed with spinlocks held so we can't
 647         * sleep here.  An alternative would be to copy the data to a
 648         * buffer and relax the spinning requirement.  The downside is
 649         * we need to kmalloc a GFP_ATOMIC buffer each time the
 650         * console driver writes something out.
 651         */
 652        while (!virtqueue_get_buf(out_vq, &len))
 653                cpu_relax();
 654done:
 655        spin_unlock_irqrestore(&port->outvq_lock, flags);
 656
 657        port->stats.bytes_sent += in_count;
 658        /*
 659         * We're expected to return the amount of data we wrote -- all
 660         * of it
 661         */
 662        return in_count;
 663}
 664
 665/*
 666 * Give out the data that's requested from the buffer that we have
 667 * queued up.
 668 */
 669static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
 670                            bool to_user)
 671{
 672        struct port_buffer *buf;
 673        unsigned long flags;
 674
 675        if (!out_count || !port_has_data(port))
 676                return 0;
 677
 678        buf = port->inbuf;
 679        out_count = min(out_count, buf->len - buf->offset);
 680
 681        if (to_user) {
 682                ssize_t ret;
 683
 684                ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
 685                if (ret)
 686                        return -EFAULT;
 687        } else {
 688                memcpy(out_buf, buf->buf + buf->offset, out_count);
 689        }
 690
 691        buf->offset += out_count;
 692
 693        if (buf->offset == buf->len) {
 694                /*
 695                 * We're done using all the data in this buffer.
 696                 * Re-queue so that the Host can send us more data.
 697                 */
 698                spin_lock_irqsave(&port->inbuf_lock, flags);
 699                port->inbuf = NULL;
 700
 701                if (add_inbuf(port->in_vq, buf) < 0)
 702                        dev_warn(port->dev, "failed add_buf\n");
 703
 704                spin_unlock_irqrestore(&port->inbuf_lock, flags);
 705        }
 706        /* Return the number of bytes actually copied */
 707        return out_count;
 708}
 709
 710/* The condition that must be true for polling to end */
 711static bool will_read_block(struct port *port)
 712{
 713        if (!port->guest_connected) {
 714                /* Port got hot-unplugged. Let's exit. */
 715                return false;
 716        }
 717        return !port_has_data(port) && port->host_connected;
 718}
 719
 720static bool will_write_block(struct port *port)
 721{
 722        bool ret;
 723
 724        if (!port->guest_connected) {
 725                /* Port got hot-unplugged. Let's exit. */
 726                return false;
 727        }
 728        if (!port->host_connected)
 729                return true;
 730
 731        spin_lock_irq(&port->outvq_lock);
 732        /*
 733         * Check if the Host has consumed any buffers since we last
 734         * sent data (this is only applicable for nonblocking ports).
 735         */
 736        reclaim_consumed_buffers(port);
 737        ret = port->outvq_full;
 738        spin_unlock_irq(&port->outvq_lock);
 739
 740        return ret;
 741}
 742
 743static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
 744                              size_t count, loff_t *offp)
 745{
 746        struct port *port;
 747        ssize_t ret;
 748
 749        port = filp->private_data;
 750
 751        if (!port_has_data(port)) {
 752                /*
 753                 * If nothing's connected on the host just return 0 in
 754                 * case of list_empty; this tells the userspace app
 755                 * that there's no connection
 756                 */
 757                if (!port->host_connected)
 758                        return 0;
 759                if (filp->f_flags & O_NONBLOCK)
 760                        return -EAGAIN;
 761
 762                ret = wait_event_freezable(port->waitqueue,
 763                                           !will_read_block(port));
 764                if (ret < 0)
 765                        return ret;
 766        }
 767        /* Port got hot-unplugged. */
 768        if (!port->guest_connected)
 769                return -ENODEV;
 770        /*
 771         * We could've received a disconnection message while we were
 772         * waiting for more data.
 773         *
 774         * This check is not clubbed in the if() statement above as we
 775         * might receive some data as well as the host could get
 776         * disconnected after we got woken up from our wait.  So we
 777         * really want to give off whatever data we have and only then
 778         * check for host_connected.
 779         */
 780        if (!port_has_data(port) && !port->host_connected)
 781                return 0;
 782
 783        return fill_readbuf(port, ubuf, count, true);
 784}
 785
 786static int wait_port_writable(struct port *port, bool nonblock)
 787{
 788        int ret;
 789
 790        if (will_write_block(port)) {
 791                if (nonblock)
 792                        return -EAGAIN;
 793
 794                ret = wait_event_freezable(port->waitqueue,
 795                                           !will_write_block(port));
 796                if (ret < 0)
 797                        return ret;
 798        }
 799        /* Port got hot-unplugged. */
 800        if (!port->guest_connected)
 801                return -ENODEV;
 802
 803        return 0;
 804}
 805
 806static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
 807                               size_t count, loff_t *offp)
 808{
 809        struct port *port;
 810        struct port_buffer *buf;
 811        ssize_t ret;
 812        bool nonblock;
 813        struct scatterlist sg[1];
 814
 815        /* Userspace could be out to fool us */
 816        if (!count)
 817                return 0;
 818
 819        port = filp->private_data;
 820
 821        nonblock = filp->f_flags & O_NONBLOCK;
 822
 823        ret = wait_port_writable(port, nonblock);
 824        if (ret < 0)
 825                return ret;
 826
 827        count = min((size_t)(32 * 1024), count);
 828
 829        buf = alloc_buf(port->out_vq, count, 0);
 830        if (!buf)
 831                return -ENOMEM;
 832
 833        ret = copy_from_user(buf->buf, ubuf, count);
 834        if (ret) {
 835                ret = -EFAULT;
 836                goto free_buf;
 837        }
 838
 839        /*
 840         * We now ask send_buf() to not spin for generic ports -- we
 841         * can re-use the same code path that non-blocking file
 842         * descriptors take for blocking file descriptors since the
 843         * wait is already done and we're certain the write will go
 844         * through to the host.
 845         */
 846        nonblock = true;
 847        sg_init_one(sg, buf->buf, count);
 848        ret = __send_to_port(port, sg, 1, count, buf, nonblock);
 849
 850        if (nonblock && ret > 0)
 851                goto out;
 852
 853free_buf:
 854        free_buf(buf, true);
 855out:
 856        return ret;
 857}
 858
 859struct sg_list {
 860        unsigned int n;
 861        unsigned int size;
 862        size_t len;
 863        struct scatterlist *sg;
 864};
 865
 866static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
 867                        struct splice_desc *sd)
 868{
 869        struct sg_list *sgl = sd->u.data;
 870        unsigned int offset, len;
 871
 872        if (sgl->n == sgl->size)
 873                return 0;
 874
 875        /* Try lock this page */
 876        if (buf->ops->steal(pipe, buf) == 0) {
 877                /* Get reference and unlock page for moving */
 878                get_page(buf->page);
 879                unlock_page(buf->page);
 880
 881                len = min(buf->len, sd->len);
 882                sg_set_page(&(sgl->sg[sgl->n]), buf->page, len, buf->offset);
 883        } else {
 884                /* Failback to copying a page */
 885                struct page *page = alloc_page(GFP_KERNEL);
 886                char *src = buf->ops->map(pipe, buf, 1);
 887                char *dst;
 888
 889                if (!page)
 890                        return -ENOMEM;
 891                dst = kmap(page);
 892
 893                offset = sd->pos & ~PAGE_MASK;
 894
 895                len = sd->len;
 896                if (len + offset > PAGE_SIZE)
 897                        len = PAGE_SIZE - offset;
 898
 899                memcpy(dst + offset, src + buf->offset, len);
 900
 901                kunmap(page);
 902                buf->ops->unmap(pipe, buf, src);
 903
 904                sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
 905        }
 906        sgl->n++;
 907        sgl->len += len;
 908
 909        return len;
 910}
 911
 912/* Faster zero-copy write by splicing */
 913static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
 914                                      struct file *filp, loff_t *ppos,
 915                                      size_t len, unsigned int flags)
 916{
 917        struct port *port = filp->private_data;
 918        struct sg_list sgl;
 919        ssize_t ret;
 920        struct port_buffer *buf;
 921        struct splice_desc sd = {
 922                .total_len = len,
 923                .flags = flags,
 924                .pos = *ppos,
 925                .u.data = &sgl,
 926        };
 927
 928        /*
 929         * Rproc_serial does not yet support splice. To support splice
 930         * pipe_to_sg() must allocate dma-buffers and copy content from
 931         * regular pages to dma pages. And alloc_buf and free_buf must
 932         * support allocating and freeing such a list of dma-buffers.
 933         */
 934        if (is_rproc_serial(port->out_vq->vdev))
 935                return -EINVAL;
 936
 937        ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
 938        if (ret < 0)
 939                return ret;
 940
 941        buf = alloc_buf(port->out_vq, 0, pipe->nrbufs);
 942        if (!buf)
 943                return -ENOMEM;
 944
 945        sgl.n = 0;
 946        sgl.len = 0;
 947        sgl.size = pipe->nrbufs;
 948        sgl.sg = buf->sg;
 949        sg_init_table(sgl.sg, sgl.size);
 950        ret = __splice_from_pipe(pipe, &sd, pipe_to_sg);
 951        if (likely(ret > 0))
 952                ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true);
 953
 954        if (unlikely(ret <= 0))
 955                free_buf(buf, true);
 956        return ret;
 957}
 958
 959static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
 960{
 961        struct port *port;
 962        unsigned int ret;
 963
 964        port = filp->private_data;
 965        poll_wait(filp, &port->waitqueue, wait);
 966
 967        if (!port->guest_connected) {
 968                /* Port got unplugged */
 969                return POLLHUP;
 970        }
 971        ret = 0;
 972        if (!will_read_block(port))
 973                ret |= POLLIN | POLLRDNORM;
 974        if (!will_write_block(port))
 975                ret |= POLLOUT;
 976        if (!port->host_connected)
 977                ret |= POLLHUP;
 978
 979        return ret;
 980}
 981
 982static void remove_port(struct kref *kref);
 983
 984static int port_fops_release(struct inode *inode, struct file *filp)
 985{
 986        struct port *port;
 987
 988        port = filp->private_data;
 989
 990        /* Notify host of port being closed */
 991        send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0);
 992
 993        spin_lock_irq(&port->inbuf_lock);
 994        port->guest_connected = false;
 995
 996        discard_port_data(port);
 997
 998        spin_unlock_irq(&port->inbuf_lock);
 999
1000        spin_lock_irq(&port->outvq_lock);
1001        reclaim_consumed_buffers(port);
1002        spin_unlock_irq(&port->outvq_lock);
1003
1004        reclaim_dma_bufs();
1005        /*
1006         * Locks aren't necessary here as a port can't be opened after
1007         * unplug, and if a port isn't unplugged, a kref would already
1008         * exist for the port.  Plus, taking ports_lock here would
1009         * create a dependency on other locks taken by functions
1010         * inside remove_port if we're the last holder of the port,
1011         * creating many problems.
1012         */
1013        kref_put(&port->kref, remove_port);
1014
1015        return 0;
1016}
1017
1018static int port_fops_open(struct inode *inode, struct file *filp)
1019{
1020        struct cdev *cdev = inode->i_cdev;
1021        struct port *port;
1022        int ret;
1023
1024        port = find_port_by_devt(cdev->dev);
1025        filp->private_data = port;
1026
1027        /* Prevent against a port getting hot-unplugged at the same time */
1028        spin_lock_irq(&port->portdev->ports_lock);
1029        kref_get(&port->kref);
1030        spin_unlock_irq(&port->portdev->ports_lock);
1031
1032        /*
1033         * Don't allow opening of console port devices -- that's done
1034         * via /dev/hvc
1035         */
1036        if (is_console_port(port)) {
1037                ret = -ENXIO;
1038                goto out;
1039        }
1040
1041        /* Allow only one process to open a particular port at a time */
1042        spin_lock_irq(&port->inbuf_lock);
1043        if (port->guest_connected) {
1044                spin_unlock_irq(&port->inbuf_lock);
1045                ret = -EMFILE;
1046                goto out;
1047        }
1048
1049        port->guest_connected = true;
1050        spin_unlock_irq(&port->inbuf_lock);
1051
1052        spin_lock_irq(&port->outvq_lock);
1053        /*
1054         * There might be a chance that we missed reclaiming a few
1055         * buffers in the window of the port getting previously closed
1056         * and opening now.
1057         */
1058        reclaim_consumed_buffers(port);
1059        spin_unlock_irq(&port->outvq_lock);
1060
1061        nonseekable_open(inode, filp);
1062
1063        /* Notify host of port being opened */
1064        send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1);
1065
1066        return 0;
1067out:
1068        kref_put(&port->kref, remove_port);
1069        return ret;
1070}
1071
1072static int port_fops_fasync(int fd, struct file *filp, int mode)
1073{
1074        struct port *port;
1075
1076        port = filp->private_data;
1077        return fasync_helper(fd, filp, mode, &port->async_queue);
1078}
1079
1080/*
1081 * The file operations that we support: programs in the guest can open
1082 * a console device, read from it, write to it, poll for data and
1083 * close it.  The devices are at
1084 *   /dev/vport<device number>p<port number>
1085 */
1086static const struct file_operations port_fops = {
1087        .owner = THIS_MODULE,
1088        .open  = port_fops_open,
1089        .read  = port_fops_read,
1090        .write = port_fops_write,
1091        .splice_write = port_fops_splice_write,
1092        .poll  = port_fops_poll,
1093        .release = port_fops_release,
1094        .fasync = port_fops_fasync,
1095        .llseek = no_llseek,
1096};
1097
1098/*
1099 * The put_chars() callback is pretty straightforward.
1100 *
1101 * We turn the characters into a scatter-gather list, add it to the
1102 * output queue and then kick the Host.  Then we sit here waiting for
1103 * it to finish: inefficient in theory, but in practice
1104 * implementations will do it immediately (lguest's Launcher does).
1105 */
1106static int put_chars(u32 vtermno, const char *buf, int count)
1107{
1108        struct port *port;
1109        struct scatterlist sg[1];
1110
1111        if (unlikely(early_put_chars))
1112                return early_put_chars(vtermno, buf, count);
1113
1114        port = find_port_by_vtermno(vtermno);
1115        if (!port)
1116                return -EPIPE;
1117
1118        sg_init_one(sg, buf, count);
1119        return __send_to_port(port, sg, 1, count, (void *)buf, false);
1120}
1121
1122/*
1123 * get_chars() is the callback from the hvc_console infrastructure
1124 * when an interrupt is received.
1125 *
1126 * We call out to fill_readbuf that gets us the required data from the
1127 * buffers that are queued up.
1128 */
1129static int get_chars(u32 vtermno, char *buf, int count)
1130{
1131        struct port *port;
1132
1133        /* If we've not set up the port yet, we have no input to give. */
1134        if (unlikely(early_put_chars))
1135                return 0;
1136
1137        port = find_port_by_vtermno(vtermno);
1138        if (!port)
1139                return -EPIPE;
1140
1141        /* If we don't have an input queue yet, we can't get input. */
1142        BUG_ON(!port->in_vq);
1143
1144        return fill_readbuf(port, buf, count, false);
1145}
1146
1147static void resize_console(struct port *port)
1148{
1149        struct virtio_device *vdev;
1150
1151        /* The port could have been hot-unplugged */
1152        if (!port || !is_console_port(port))
1153                return;
1154
1155        vdev = port->portdev->vdev;
1156
1157        /* Don't test F_SIZE at all if we're rproc: not a valid feature! */
1158        if (!is_rproc_serial(vdev) &&
1159            virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE))
1160                hvc_resize(port->cons.hvc, port->cons.ws);
1161}
1162
1163/* We set the configuration at this point, since we now have a tty */
1164static int notifier_add_vio(struct hvc_struct *hp, int data)
1165{
1166        struct port *port;
1167
1168        port = find_port_by_vtermno(hp->vtermno);
1169        if (!port)
1170                return -EINVAL;
1171
1172        hp->irq_requested = 1;
1173        resize_console(port);
1174
1175        return 0;
1176}
1177
1178static void notifier_del_vio(struct hvc_struct *hp, int data)
1179{
1180        hp->irq_requested = 0;
1181}
1182
1183/* The operations for console ports. */
1184static const struct hv_ops hv_ops = {
1185        .get_chars = get_chars,
1186        .put_chars = put_chars,
1187        .notifier_add = notifier_add_vio,
1188        .notifier_del = notifier_del_vio,
1189        .notifier_hangup = notifier_del_vio,
1190};
1191
1192/*
1193 * Console drivers are initialized very early so boot messages can go
1194 * out, so we do things slightly differently from the generic virtio
1195 * initialization of the net and block drivers.
1196 *
1197 * At this stage, the console is output-only.  It's too early to set
1198 * up a virtqueue, so we let the drivers do some boutique early-output
1199 * thing.
1200 */
1201int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
1202{
1203        early_put_chars = put_chars;
1204        return hvc_instantiate(0, 0, &hv_ops);
1205}
1206
1207int init_port_console(struct port *port)
1208{
1209        int ret;
1210
1211        /*
1212         * The Host's telling us this port is a console port.  Hook it
1213         * up with an hvc console.
1214         *
1215         * To set up and manage our virtual console, we call
1216         * hvc_alloc().
1217         *
1218         * The first argument of hvc_alloc() is the virtual console
1219         * number.  The second argument is the parameter for the
1220         * notification mechanism (like irq number).  We currently
1221         * leave this as zero, virtqueues have implicit notifications.
1222         *
1223         * The third argument is a "struct hv_ops" containing the
1224         * put_chars() get_chars(), notifier_add() and notifier_del()
1225         * pointers.  The final argument is the output buffer size: we
1226         * can do any size, so we put PAGE_SIZE here.
1227         */
1228        port->cons.vtermno = pdrvdata.next_vtermno;
1229
1230        port->cons.hvc = hvc_alloc(port->cons.vtermno, 0, &hv_ops, PAGE_SIZE);
1231        if (IS_ERR(port->cons.hvc)) {
1232                ret = PTR_ERR(port->cons.hvc);
1233                dev_err(port->dev,
1234                        "error %d allocating hvc for port\n", ret);
1235                port->cons.hvc = NULL;
1236                return ret;
1237        }
1238        spin_lock_irq(&pdrvdata_lock);
1239        pdrvdata.next_vtermno++;
1240        list_add_tail(&port->cons.list, &pdrvdata.consoles);
1241        spin_unlock_irq(&pdrvdata_lock);
1242        port->guest_connected = true;
1243
1244        /*
1245         * Start using the new console output if this is the first
1246         * console to come up.
1247         */
1248        if (early_put_chars)
1249                early_put_chars = NULL;
1250
1251        /* Notify host of port being opened */
1252        send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
1253
1254        return 0;
1255}
1256
1257static ssize_t show_port_name(struct device *dev,
1258                              struct device_attribute *attr, char *buffer)
1259{
1260        struct port *port;
1261
1262        port = dev_get_drvdata(dev);
1263
1264        return sprintf(buffer, "%s\n", port->name);
1265}
1266
1267static DEVICE_ATTR(name, S_IRUGO, show_port_name, NULL);
1268
1269static struct attribute *port_sysfs_entries[] = {
1270        &dev_attr_name.attr,
1271        NULL
1272};
1273
1274static struct attribute_group port_attribute_group = {
1275        .name = NULL,           /* put in device directory */
1276        .attrs = port_sysfs_entries,
1277};
1278
1279static ssize_t debugfs_read(struct file *filp, char __user *ubuf,
1280                            size_t count, loff_t *offp)
1281{
1282        struct port *port;
1283        char *buf;
1284        ssize_t ret, out_offset, out_count;
1285
1286        out_count = 1024;
1287        buf = kmalloc(out_count, GFP_KERNEL);
1288        if (!buf)
1289                return -ENOMEM;
1290
1291        port = filp->private_data;
1292        out_offset = 0;
1293        out_offset += snprintf(buf + out_offset, out_count,
1294                               "name: %s\n", port->name ? port->name : "");
1295        out_offset += snprintf(buf + out_offset, out_count - out_offset,
1296                               "guest_connected: %d\n", port->guest_connected);
1297        out_offset += snprintf(buf + out_offset, out_count - out_offset,
1298                               "host_connected: %d\n", port->host_connected);
1299        out_offset += snprintf(buf + out_offset, out_count - out_offset,
1300                               "outvq_full: %d\n", port->outvq_full);
1301        out_offset += snprintf(buf + out_offset, out_count - out_offset,
1302                               "bytes_sent: %lu\n", port->stats.bytes_sent);
1303        out_offset += snprintf(buf + out_offset, out_count - out_offset,
1304                               "bytes_received: %lu\n",
1305                               port->stats.bytes_received);
1306        out_offset += snprintf(buf + out_offset, out_count - out_offset,
1307                               "bytes_discarded: %lu\n",
1308                               port->stats.bytes_discarded);
1309        out_offset += snprintf(buf + out_offset, out_count - out_offset,
1310                               "is_console: %s\n",
1311                               is_console_port(port) ? "yes" : "no");
1312        out_offset += snprintf(buf + out_offset, out_count - out_offset,
1313                               "console_vtermno: %u\n", port->cons.vtermno);
1314
1315        ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
1316        kfree(buf);
1317        return ret;
1318}
1319
1320static const struct file_operations port_debugfs_ops = {
1321        .owner = THIS_MODULE,
1322        .open  = simple_open,
1323        .read  = debugfs_read,
1324};
1325
1326static void set_console_size(struct port *port, u16 rows, u16 cols)
1327{
1328        if (!port || !is_console_port(port))
1329                return;
1330
1331        port->cons.ws.ws_row = rows;
1332        port->cons.ws.ws_col = cols;
1333}
1334
1335static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
1336{
1337        struct port_buffer *buf;
1338        unsigned int nr_added_bufs;
1339        int ret;
1340
1341        nr_added_bufs = 0;
1342        do {
1343                buf = alloc_buf(vq, PAGE_SIZE, 0);
1344                if (!buf)
1345                        break;
1346
1347                spin_lock_irq(lock);
1348                ret = add_inbuf(vq, buf);
1349                if (ret < 0) {
1350                        spin_unlock_irq(lock);
1351                        free_buf(buf, true);
1352                        break;
1353                }
1354                nr_added_bufs++;
1355                spin_unlock_irq(lock);
1356        } while (ret > 0);
1357
1358        return nr_added_bufs;
1359}
1360
1361static void send_sigio_to_port(struct port *port)
1362{
1363        if (port->async_queue && port->guest_connected)
1364                kill_fasync(&port->async_queue, SIGIO, POLL_OUT);
1365}
1366
1367static int add_port(struct ports_device *portdev, u32 id)
1368{
1369        char debugfs_name[16];
1370        struct port *port;
1371        struct port_buffer *buf;
1372        dev_t devt;
1373        unsigned int nr_added_bufs;
1374        int err;
1375
1376        port = kmalloc(sizeof(*port), GFP_KERNEL);
1377        if (!port) {
1378                err = -ENOMEM;
1379                goto fail;
1380        }
1381        kref_init(&port->kref);
1382
1383        port->portdev = portdev;
1384        port->id = id;
1385
1386        port->name = NULL;
1387        port->inbuf = NULL;
1388        port->cons.hvc = NULL;
1389        port->async_queue = NULL;
1390
1391        port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
1392
1393        port->host_connected = port->guest_connected = false;
1394        port->stats = (struct port_stats) { 0 };
1395
1396        port->outvq_full = false;
1397
1398        port->in_vq = portdev->in_vqs[port->id];
1399        port->out_vq = portdev->out_vqs[port->id];
1400
1401        port->cdev = cdev_alloc();
1402        if (!port->cdev) {
1403                dev_err(&port->portdev->vdev->dev, "Error allocating cdev\n");
1404                err = -ENOMEM;
1405                goto free_port;
1406        }
1407        port->cdev->ops = &port_fops;
1408
1409        devt = MKDEV(portdev->chr_major, id);
1410        err = cdev_add(port->cdev, devt, 1);
1411        if (err < 0) {
1412                dev_err(&port->portdev->vdev->dev,
1413                        "Error %d adding cdev for port %u\n", err, id);
1414                goto free_cdev;
1415        }
1416        port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev,
1417                                  devt, port, "vport%up%u",
1418                                  port->portdev->drv_index, id);
1419        if (IS_ERR(port->dev)) {
1420                err = PTR_ERR(port->dev);
1421                dev_err(&port->portdev->vdev->dev,
1422                        "Error %d creating device for port %u\n",
1423                        err, id);
1424                goto free_cdev;
1425        }
1426
1427        spin_lock_init(&port->inbuf_lock);
1428        spin_lock_init(&port->outvq_lock);
1429        init_waitqueue_head(&port->waitqueue);
1430
1431        /* Fill the in_vq with buffers so the host can send us data. */
1432        nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock);
1433        if (!nr_added_bufs) {
1434                dev_err(port->dev, "Error allocating inbufs\n");
1435                err = -ENOMEM;
1436                goto free_device;
1437        }
1438
1439        if (is_rproc_serial(port->portdev->vdev))
1440                /*
1441                 * For rproc_serial assume remote processor is connected.
1442                 * rproc_serial does not want the console port, only
1443                 * the generic port implementation.
1444                 */
1445                port->host_connected = true;
1446        else if (!use_multiport(port->portdev)) {
1447                /*
1448                 * If we're not using multiport support,
1449                 * this has to be a console port.
1450                 */
1451                err = init_port_console(port);
1452                if (err)
1453                        goto free_inbufs;
1454        }
1455
1456        spin_lock_irq(&portdev->ports_lock);
1457        list_add_tail(&port->list, &port->portdev->ports);
1458        spin_unlock_irq(&portdev->ports_lock);
1459
1460        /*
1461         * Tell the Host we're set so that it can send us various
1462         * configuration parameters for this port (eg, port name,
1463         * caching, whether this is a console port, etc.)
1464         */
1465        send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
1466
1467        if (pdrvdata.debugfs_dir) {
1468                /*
1469                 * Finally, create the debugfs file that we can use to
1470                 * inspect a port's state at any time
1471                 */
1472                sprintf(debugfs_name, "vport%up%u",
1473                        port->portdev->drv_index, id);
1474                port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
1475                                                         pdrvdata.debugfs_dir,
1476                                                         port,
1477                                                         &port_debugfs_ops);
1478        }
1479        return 0;
1480
1481free_inbufs:
1482        while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
1483                free_buf(buf, true);
1484free_device:
1485        device_destroy(pdrvdata.class, port->dev->devt);
1486free_cdev:
1487        cdev_del(port->cdev);
1488free_port:
1489        kfree(port);
1490fail:
1491        /* The host might want to notify management sw about port add failure */
1492        __send_control_msg(portdev, id, VIRTIO_CONSOLE_PORT_READY, 0);
1493        return err;
1494}
1495
1496/* No users remain, remove all port-specific data. */
1497static void remove_port(struct kref *kref)
1498{
1499        struct port *port;
1500
1501        port = container_of(kref, struct port, kref);
1502
1503        sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
1504        device_destroy(pdrvdata.class, port->dev->devt);
1505        cdev_del(port->cdev);
1506
1507        kfree(port->name);
1508
1509        debugfs_remove(port->debugfs_file);
1510
1511        kfree(port);
1512}
1513
1514static void remove_port_data(struct port *port)
1515{
1516        struct port_buffer *buf;
1517
1518        /* Remove unused data this port might have received. */
1519        discard_port_data(port);
1520
1521        reclaim_consumed_buffers(port);
1522
1523        /* Remove buffers we queued up for the Host to send us data in. */
1524        while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
1525                free_buf(buf, true);
1526
1527        /* Free pending buffers from the out-queue. */
1528        while ((buf = virtqueue_detach_unused_buf(port->out_vq)))
1529                free_buf(buf, true);
1530}
1531
1532/*
1533 * Port got unplugged.  Remove port from portdev's list and drop the
1534 * kref reference.  If no userspace has this port opened, it will
1535 * result in immediate removal the port.
1536 */
1537static void unplug_port(struct port *port)
1538{
1539        spin_lock_irq(&port->portdev->ports_lock);
1540        list_del(&port->list);
1541        spin_unlock_irq(&port->portdev->ports_lock);
1542
1543        if (port->guest_connected) {
1544                port->guest_connected = false;
1545                port->host_connected = false;
1546                wake_up_interruptible(&port->waitqueue);
1547
1548                /* Let the app know the port is going down. */
1549                send_sigio_to_port(port);
1550        }
1551
1552        if (is_console_port(port)) {
1553                spin_lock_irq(&pdrvdata_lock);
1554                list_del(&port->cons.list);
1555                spin_unlock_irq(&pdrvdata_lock);
1556                hvc_remove(port->cons.hvc);
1557        }
1558
1559        remove_port_data(port);
1560
1561        /*
1562         * We should just assume the device itself has gone off --
1563         * else a close on an open port later will try to send out a
1564         * control message.
1565         */
1566        port->portdev = NULL;
1567
1568        /*
1569         * Locks around here are not necessary - a port can't be
1570         * opened after we removed the port struct from ports_list
1571         * above.
1572         */
1573        kref_put(&port->kref, remove_port);
1574}
1575
1576/* Any private messages that the Host and Guest want to share */
1577static void handle_control_message(struct ports_device *portdev,
1578                                   struct port_buffer *buf)
1579{
1580        struct virtio_console_control *cpkt;
1581        struct port *port;
1582        size_t name_size;
1583        int err;
1584
1585        cpkt = (struct virtio_console_control *)(buf->buf + buf->offset);
1586
1587        port = find_port_by_id(portdev, cpkt->id);
1588        if (!port && cpkt->event != VIRTIO_CONSOLE_PORT_ADD) {
1589                /* No valid header at start of buffer.  Drop it. */
1590                dev_dbg(&portdev->vdev->dev,
1591                        "Invalid index %u in control packet\n", cpkt->id);
1592                return;
1593        }
1594
1595        switch (cpkt->event) {
1596        case VIRTIO_CONSOLE_PORT_ADD:
1597                if (port) {
1598                        dev_dbg(&portdev->vdev->dev,
1599                                "Port %u already added\n", port->id);
1600                        send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
1601                        break;
1602                }
1603                if (cpkt->id >= portdev->config.max_nr_ports) {
1604                        dev_warn(&portdev->vdev->dev,
1605                                "Request for adding port with out-of-bound id %u, max. supported id: %u\n",
1606                                cpkt->id, portdev->config.max_nr_ports - 1);
1607                        break;
1608                }
1609                add_port(portdev, cpkt->id);
1610                break;
1611        case VIRTIO_CONSOLE_PORT_REMOVE:
1612                unplug_port(port);
1613                break;
1614        case VIRTIO_CONSOLE_CONSOLE_PORT:
1615                if (!cpkt->value)
1616                        break;
1617                if (is_console_port(port))
1618                        break;
1619
1620                init_port_console(port);
1621                complete(&early_console_added);
1622                /*
1623                 * Could remove the port here in case init fails - but
1624                 * have to notify the host first.
1625                 */
1626                break;
1627        case VIRTIO_CONSOLE_RESIZE: {
1628                struct {
1629                        __u16 rows;
1630                        __u16 cols;
1631                } size;
1632
1633                if (!is_console_port(port))
1634                        break;
1635
1636                memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt),
1637                       sizeof(size));
1638                set_console_size(port, size.rows, size.cols);
1639
1640                port->cons.hvc->irq_requested = 1;
1641                resize_console(port);
1642                break;
1643        }
1644        case VIRTIO_CONSOLE_PORT_OPEN:
1645                port->host_connected = cpkt->value;
1646                wake_up_interruptible(&port->waitqueue);
1647                /*
1648                 * If the host port got closed and the host had any
1649                 * unconsumed buffers, we'll be able to reclaim them
1650                 * now.
1651                 */
1652                spin_lock_irq(&port->outvq_lock);
1653                reclaim_consumed_buffers(port);
1654                spin_unlock_irq(&port->outvq_lock);
1655
1656                /*
1657                 * If the guest is connected, it'll be interested in
1658                 * knowing the host connection state changed.
1659                 */
1660                send_sigio_to_port(port);
1661                break;
1662        case VIRTIO_CONSOLE_PORT_NAME:
1663                /*
1664                 * If we woke up after hibernation, we can get this
1665                 * again.  Skip it in that case.
1666                 */
1667                if (port->name)
1668                        break;
1669
1670                /*
1671                 * Skip the size of the header and the cpkt to get the size
1672                 * of the name that was sent
1673                 */
1674                name_size = buf->len - buf->offset - sizeof(*cpkt) + 1;
1675
1676                port->name = kmalloc(name_size, GFP_KERNEL);
1677                if (!port->name) {
1678                        dev_err(port->dev,
1679                                "Not enough space to store port name\n");
1680                        break;
1681                }
1682                strncpy(port->name, buf->buf + buf->offset + sizeof(*cpkt),
1683                        name_size - 1);
1684                port->name[name_size - 1] = 0;
1685
1686                /*
1687                 * Since we only have one sysfs attribute, 'name',
1688                 * create it only if we have a name for the port.
1689                 */
1690                err = sysfs_create_group(&port->dev->kobj,
1691                                         &port_attribute_group);
1692                if (err) {
1693                        dev_err(port->dev,
1694                                "Error %d creating sysfs device attributes\n",
1695                                err);
1696                } else {
1697                        /*
1698                         * Generate a udev event so that appropriate
1699                         * symlinks can be created based on udev
1700                         * rules.
1701                         */
1702                        kobject_uevent(&port->dev->kobj, KOBJ_CHANGE);
1703                }
1704                break;
1705        }
1706}
1707
1708static void control_work_handler(struct work_struct *work)
1709{
1710        struct ports_device *portdev;
1711        struct virtqueue *vq;
1712        struct port_buffer *buf;
1713        unsigned int len;
1714
1715        portdev = container_of(work, struct ports_device, control_work);
1716        vq = portdev->c_ivq;
1717
1718        spin_lock(&portdev->cvq_lock);
1719        while ((buf = virtqueue_get_buf(vq, &len))) {
1720                spin_unlock(&portdev->cvq_lock);
1721
1722                buf->len = len;
1723                buf->offset = 0;
1724
1725                handle_control_message(portdev, buf);
1726
1727                spin_lock(&portdev->cvq_lock);
1728                if (add_inbuf(portdev->c_ivq, buf) < 0) {
1729                        dev_warn(&portdev->vdev->dev,
1730                                 "Error adding buffer to queue\n");
1731                        free_buf(buf, false);
1732                }
1733        }
1734        spin_unlock(&portdev->cvq_lock);
1735}
1736
1737static void out_intr(struct virtqueue *vq)
1738{
1739        struct port *port;
1740
1741        port = find_port_by_vq(vq->vdev->priv, vq);
1742        if (!port)
1743                return;
1744
1745        wake_up_interruptible(&port->waitqueue);
1746}
1747
1748static void in_intr(struct virtqueue *vq)
1749{
1750        struct port *port;
1751        unsigned long flags;
1752
1753        port = find_port_by_vq(vq->vdev->priv, vq);
1754        if (!port)
1755                return;
1756
1757        spin_lock_irqsave(&port->inbuf_lock, flags);
1758        port->inbuf = get_inbuf(port);
1759
1760        /*
1761         * Don't queue up data when port is closed.  This condition
1762         * can be reached when a console port is not yet connected (no
1763         * tty is spawned) and the host sends out data to console
1764         * ports.  For generic serial ports, the host won't
1765         * (shouldn't) send data till the guest is connected.
1766         */
1767        if (!port->guest_connected)
1768                discard_port_data(port);
1769
1770        spin_unlock_irqrestore(&port->inbuf_lock, flags);
1771
1772        wake_up_interruptible(&port->waitqueue);
1773
1774        /* Send a SIGIO indicating new data in case the process asked for it */
1775        send_sigio_to_port(port);
1776
1777        if (is_console_port(port) && hvc_poll(port->cons.hvc))
1778                hvc_kick();
1779}
1780
1781static void control_intr(struct virtqueue *vq)
1782{
1783        struct ports_device *portdev;
1784
1785        portdev = vq->vdev->priv;
1786        schedule_work(&portdev->control_work);
1787}
1788
1789static void config_intr(struct virtio_device *vdev)
1790{
1791        struct ports_device *portdev;
1792
1793        portdev = vdev->priv;
1794
1795        if (!use_multiport(portdev)) {
1796                struct port *port;
1797                u16 rows, cols;
1798
1799                vdev->config->get(vdev,
1800                                  offsetof(struct virtio_console_config, cols),
1801                                  &cols, sizeof(u16));
1802                vdev->config->get(vdev,
1803                                  offsetof(struct virtio_console_config, rows),
1804                                  &rows, sizeof(u16));
1805
1806                port = find_port_by_id(portdev, 0);
1807                set_console_size(port, rows, cols);
1808
1809                /*
1810                 * We'll use this way of resizing only for legacy
1811                 * support.  For newer userspace
1812                 * (VIRTIO_CONSOLE_F_MULTPORT+), use control messages
1813                 * to indicate console size changes so that it can be
1814                 * done per-port.
1815                 */
1816                resize_console(port);
1817        }
1818}
1819
1820static int init_vqs(struct ports_device *portdev)
1821{
1822        vq_callback_t **io_callbacks;
1823        char **io_names;
1824        struct virtqueue **vqs;
1825        u32 i, j, nr_ports, nr_queues;
1826        int err;
1827
1828        nr_ports = portdev->config.max_nr_ports;
1829        nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2;
1830
1831        vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL);
1832        io_callbacks = kmalloc(nr_queues * sizeof(vq_callback_t *), GFP_KERNEL);
1833        io_names = kmalloc(nr_queues * sizeof(char *), GFP_KERNEL);
1834        portdev->in_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *),
1835                                  GFP_KERNEL);
1836        portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *),
1837                                   GFP_KERNEL);
1838        if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs ||
1839            !portdev->out_vqs) {
1840                err = -ENOMEM;
1841                goto free;
1842        }
1843
1844        /*
1845         * For backward compat (newer host but older guest), the host
1846         * spawns a console port first and also inits the vqs for port
1847         * 0 before others.
1848         */
1849        j = 0;
1850        io_callbacks[j] = in_intr;
1851        io_callbacks[j + 1] = out_intr;
1852        io_names[j] = "input";
1853        io_names[j + 1] = "output";
1854        j += 2;
1855
1856        if (use_multiport(portdev)) {
1857                io_callbacks[j] = control_intr;
1858                io_callbacks[j + 1] = NULL;
1859                io_names[j] = "control-i";
1860                io_names[j + 1] = "control-o";
1861
1862                for (i = 1; i < nr_ports; i++) {
1863                        j += 2;
1864                        io_callbacks[j] = in_intr;
1865                        io_callbacks[j + 1] = out_intr;
1866                        io_names[j] = "input";
1867                        io_names[j + 1] = "output";
1868                }
1869        }
1870        /* Find the queues. */
1871        err = portdev->vdev->config->find_vqs(portdev->vdev, nr_queues, vqs,
1872                                              io_callbacks,
1873                                              (const char **)io_names);
1874        if (err)
1875                goto free;
1876
1877        j = 0;
1878        portdev->in_vqs[0] = vqs[0];
1879        portdev->out_vqs[0] = vqs[1];
1880        j += 2;
1881        if (use_multiport(portdev)) {
1882                portdev->c_ivq = vqs[j];
1883                portdev->c_ovq = vqs[j + 1];
1884
1885                for (i = 1; i < nr_ports; i++) {
1886                        j += 2;
1887                        portdev->in_vqs[i] = vqs[j];
1888                        portdev->out_vqs[i] = vqs[j + 1];
1889                }
1890        }
1891        kfree(io_names);
1892        kfree(io_callbacks);
1893        kfree(vqs);
1894
1895        return 0;
1896
1897free:
1898        kfree(portdev->out_vqs);
1899        kfree(portdev->in_vqs);
1900        kfree(io_names);
1901        kfree(io_callbacks);
1902        kfree(vqs);
1903
1904        return err;
1905}
1906
1907static const struct file_operations portdev_fops = {
1908        .owner = THIS_MODULE,
1909};
1910
1911static void remove_vqs(struct ports_device *portdev)
1912{
1913        portdev->vdev->config->del_vqs(portdev->vdev);
1914        kfree(portdev->in_vqs);
1915        kfree(portdev->out_vqs);
1916}
1917
1918static void remove_controlq_data(struct ports_device *portdev)
1919{
1920        struct port_buffer *buf;
1921        unsigned int len;
1922
1923        if (!use_multiport(portdev))
1924                return;
1925
1926        while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
1927                free_buf(buf, true);
1928
1929        while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
1930                free_buf(buf, true);
1931}
1932
1933/*
1934 * Once we're further in boot, we get probed like any other virtio
1935 * device.
1936 *
1937 * If the host also supports multiple console ports, we check the
1938 * config space to see how many ports the host has spawned.  We
1939 * initialize each port found.
1940 */
1941static int virtcons_probe(struct virtio_device *vdev)
1942{
1943        struct ports_device *portdev;
1944        int err;
1945        bool multiport;
1946        bool early = early_put_chars != NULL;
1947
1948        /* Ensure to read early_put_chars now */
1949        barrier();
1950
1951        portdev = kmalloc(sizeof(*portdev), GFP_KERNEL);
1952        if (!portdev) {
1953                err = -ENOMEM;
1954                goto fail;
1955        }
1956
1957        /* Attach this portdev to this virtio_device, and vice-versa. */
1958        portdev->vdev = vdev;
1959        vdev->priv = portdev;
1960
1961        spin_lock_irq(&pdrvdata_lock);
1962        portdev->drv_index = pdrvdata.index++;
1963        spin_unlock_irq(&pdrvdata_lock);
1964
1965        portdev->chr_major = register_chrdev(0, "virtio-portsdev",
1966                                             &portdev_fops);
1967        if (portdev->chr_major < 0) {
1968                dev_err(&vdev->dev,
1969                        "Error %d registering chrdev for device %u\n",
1970                        portdev->chr_major, portdev->drv_index);
1971                err = portdev->chr_major;
1972                goto free;
1973        }
1974
1975        multiport = false;
1976        portdev->config.max_nr_ports = 1;
1977
1978        /* Don't test MULTIPORT at all if we're rproc: not a valid feature! */
1979        if (!is_rproc_serial(vdev) &&
1980            virtio_config_val(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
1981                                  offsetof(struct virtio_console_config,
1982                                           max_nr_ports),
1983                                  &portdev->config.max_nr_ports) == 0) {
1984                multiport = true;
1985        }
1986
1987        err = init_vqs(portdev);
1988        if (err < 0) {
1989                dev_err(&vdev->dev, "Error %d initializing vqs\n", err);
1990                goto free_chrdev;
1991        }
1992
1993        spin_lock_init(&portdev->ports_lock);
1994        INIT_LIST_HEAD(&portdev->ports);
1995
1996        if (multiport) {
1997                unsigned int nr_added_bufs;
1998
1999                spin_lock_init(&portdev->cvq_lock);
2000                INIT_WORK(&portdev->control_work, &control_work_handler);
2001
2002                nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock);
2003                if (!nr_added_bufs) {
2004                        dev_err(&vdev->dev,
2005                                "Error allocating buffers for control queue\n");
2006                        err = -ENOMEM;
2007                        goto free_vqs;
2008                }
2009        } else {
2010                /*
2011                 * For backward compatibility: Create a console port
2012                 * if we're running on older host.
2013                 */
2014                add_port(portdev, 0);
2015        }
2016
2017        spin_lock_irq(&pdrvdata_lock);
2018        list_add_tail(&portdev->list, &pdrvdata.portdevs);
2019        spin_unlock_irq(&pdrvdata_lock);
2020
2021        __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
2022                           VIRTIO_CONSOLE_DEVICE_READY, 1);
2023
2024        /*
2025         * If there was an early virtio console, assume that there are no
2026         * other consoles. We need to wait until the hvc_alloc matches the
2027         * hvc_instantiate, otherwise tty_open will complain, resulting in
2028         * a "Warning: unable to open an initial console" boot failure.
2029         * Without multiport this is done in add_port above. With multiport
2030         * this might take some host<->guest communication - thus we have to
2031         * wait.
2032         */
2033        if (multiport && early)
2034                wait_for_completion(&early_console_added);
2035
2036        return 0;
2037
2038free_vqs:
2039        /* The host might want to notify mgmt sw about device add failure */
2040        __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
2041                           VIRTIO_CONSOLE_DEVICE_READY, 0);
2042        remove_vqs(portdev);
2043free_chrdev:
2044        unregister_chrdev(portdev->chr_major, "virtio-portsdev");
2045free:
2046        kfree(portdev);
2047fail:
2048        return err;
2049}
2050
2051static void virtcons_remove(struct virtio_device *vdev)
2052{
2053        struct ports_device *portdev;
2054        struct port *port, *port2;
2055
2056        portdev = vdev->priv;
2057
2058        spin_lock_irq(&pdrvdata_lock);
2059        list_del(&portdev->list);
2060        spin_unlock_irq(&pdrvdata_lock);
2061
2062        /* Disable interrupts for vqs */
2063        vdev->config->reset(vdev);
2064        /* Finish up work that's lined up */
2065        if (use_multiport(portdev))
2066                cancel_work_sync(&portdev->control_work);
2067
2068        list_for_each_entry_safe(port, port2, &portdev->ports, list)
2069                unplug_port(port);
2070
2071        unregister_chrdev(portdev->chr_major, "virtio-portsdev");
2072
2073        /*
2074         * When yanking out a device, we immediately lose the
2075         * (device-side) queues.  So there's no point in keeping the
2076         * guest side around till we drop our final reference.  This
2077         * also means that any ports which are in an open state will
2078         * have to just stop using the port, as the vqs are going
2079         * away.
2080         */
2081        remove_controlq_data(portdev);
2082        remove_vqs(portdev);
2083        kfree(portdev);
2084}
2085
2086static struct virtio_device_id id_table[] = {
2087        { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID },
2088        { 0 },
2089};
2090
2091static unsigned int features[] = {
2092        VIRTIO_CONSOLE_F_SIZE,
2093        VIRTIO_CONSOLE_F_MULTIPORT,
2094};
2095
2096static struct virtio_device_id rproc_serial_id_table[] = {
2097#if IS_ENABLED(CONFIG_REMOTEPROC)
2098        { VIRTIO_ID_RPROC_SERIAL, VIRTIO_DEV_ANY_ID },
2099#endif
2100        { 0 },
2101};
2102
2103static unsigned int rproc_serial_features[] = {
2104};
2105
2106#ifdef CONFIG_PM
2107static int virtcons_freeze(struct virtio_device *vdev)
2108{
2109        struct ports_device *portdev;
2110        struct port *port;
2111
2112        portdev = vdev->priv;
2113
2114        vdev->config->reset(vdev);
2115
2116        virtqueue_disable_cb(portdev->c_ivq);
2117        cancel_work_sync(&portdev->control_work);
2118        /*
2119         * Once more: if control_work_handler() was running, it would
2120         * enable the cb as the last step.
2121         */
2122        virtqueue_disable_cb(portdev->c_ivq);
2123        remove_controlq_data(portdev);
2124
2125        list_for_each_entry(port, &portdev->ports, list) {
2126                virtqueue_disable_cb(port->in_vq);
2127                virtqueue_disable_cb(port->out_vq);
2128                /*
2129                 * We'll ask the host later if the new invocation has
2130                 * the port opened or closed.
2131                 */
2132                port->host_connected = false;
2133                remove_port_data(port);
2134        }
2135        remove_vqs(portdev);
2136
2137        return 0;
2138}
2139
2140static int virtcons_restore(struct virtio_device *vdev)
2141{
2142        struct ports_device *portdev;
2143        struct port *port;
2144        int ret;
2145
2146        portdev = vdev->priv;
2147
2148        ret = init_vqs(portdev);
2149        if (ret)
2150                return ret;
2151
2152        if (use_multiport(portdev))
2153                fill_queue(portdev->c_ivq, &portdev->cvq_lock);
2154
2155        list_for_each_entry(port, &portdev->ports, list) {
2156                port->in_vq = portdev->in_vqs[port->id];
2157                port->out_vq = portdev->out_vqs[port->id];
2158
2159                fill_queue(port->in_vq, &port->inbuf_lock);
2160
2161                /* Get port open/close status on the host */
2162                send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
2163
2164                /*
2165                 * If a port was open at the time of suspending, we
2166                 * have to let the host know that it's still open.
2167                 */
2168                if (port->guest_connected)
2169                        send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
2170        }
2171        return 0;
2172}
2173#endif
2174
2175static struct virtio_driver virtio_console = {
2176        .feature_table = features,
2177        .feature_table_size = ARRAY_SIZE(features),
2178        .driver.name =  KBUILD_MODNAME,
2179        .driver.owner = THIS_MODULE,
2180        .id_table =     id_table,
2181        .probe =        virtcons_probe,
2182        .remove =       virtcons_remove,
2183        .config_changed = config_intr,
2184#ifdef CONFIG_PM
2185        .freeze =       virtcons_freeze,
2186        .restore =      virtcons_restore,
2187#endif
2188};
2189
2190static struct virtio_driver virtio_rproc_serial = {
2191        .feature_table = rproc_serial_features,
2192        .feature_table_size = ARRAY_SIZE(rproc_serial_features),
2193        .driver.name =  "virtio_rproc_serial",
2194        .driver.owner = THIS_MODULE,
2195        .id_table =     rproc_serial_id_table,
2196        .probe =        virtcons_probe,
2197        .remove =       virtcons_remove,
2198};
2199
2200static int __init init(void)
2201{
2202        int err;
2203
2204        pdrvdata.class = class_create(THIS_MODULE, "virtio-ports");
2205        if (IS_ERR(pdrvdata.class)) {
2206                err = PTR_ERR(pdrvdata.class);
2207                pr_err("Error %d creating virtio-ports class\n", err);
2208                return err;
2209        }
2210
2211        pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL);
2212        if (!pdrvdata.debugfs_dir) {
2213                pr_warning("Error %ld creating debugfs dir for virtio-ports\n",
2214                           PTR_ERR(pdrvdata.debugfs_dir));
2215        }
2216        INIT_LIST_HEAD(&pdrvdata.consoles);
2217        INIT_LIST_HEAD(&pdrvdata.portdevs);
2218
2219        err = register_virtio_driver(&virtio_console);
2220        if (err < 0) {
2221                pr_err("Error %d registering virtio driver\n", err);
2222                goto free;
2223        }
2224        err = register_virtio_driver(&virtio_rproc_serial);
2225        if (err < 0) {
2226                pr_err("Error %d registering virtio rproc serial driver\n",
2227                       err);
2228                goto unregister;
2229        }
2230        return 0;
2231unregister:
2232        unregister_virtio_driver(&virtio_console);
2233free:
2234        if (pdrvdata.debugfs_dir)
2235                debugfs_remove_recursive(pdrvdata.debugfs_dir);
2236        class_destroy(pdrvdata.class);
2237        return err;
2238}
2239
2240static void __exit fini(void)
2241{
2242        reclaim_dma_bufs();
2243
2244        unregister_virtio_driver(&virtio_console);
2245        unregister_virtio_driver(&virtio_rproc_serial);
2246
2247        class_destroy(pdrvdata.class);
2248        if (pdrvdata.debugfs_dir)
2249                debugfs_remove_recursive(pdrvdata.debugfs_dir);
2250}
2251module_init(init);
2252module_exit(fini);
2253
2254MODULE_DEVICE_TABLE(virtio, id_table);
2255MODULE_DESCRIPTION("Virtio console driver");
2256MODULE_LICENSE("GPL");
2257
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.