linux/arch/um/drivers/port_kern.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
   3 * Licensed under the GPL
   4 */
   5
   6#include "linux/completion.h"
   7#include "linux/interrupt.h"
   8#include "linux/list.h"
   9#include "linux/mutex.h"
  10#include "asm/atomic.h"
  11#include "init.h"
  12#include "irq_kern.h"
  13#include "os.h"
  14#include "port.h"
  15
  16struct port_list {
  17        struct list_head list;
  18        atomic_t wait_count;
  19        int has_connection;
  20        struct completion done;
  21        int port;
  22        int fd;
  23        spinlock_t lock;
  24        struct list_head pending;
  25        struct list_head connections;
  26};
  27
  28struct port_dev {
  29        struct port_list *port;
  30        int helper_pid;
  31        int telnetd_pid;
  32};
  33
  34struct connection {
  35        struct list_head list;
  36        int fd;
  37        int helper_pid;
  38        int socket[2];
  39        int telnetd_pid;
  40        struct port_list *port;
  41};
  42
  43static irqreturn_t pipe_interrupt(int irq, void *data)
  44{
  45        struct connection *conn = data;
  46        int fd;
  47
  48        fd = os_rcv_fd(conn->socket[0], &conn->helper_pid);
  49        if (fd < 0) {
  50                if (fd == -EAGAIN)
  51                        return IRQ_NONE;
  52
  53                printk(KERN_ERR "pipe_interrupt : os_rcv_fd returned %d\n",
  54                       -fd);
  55                os_close_file(conn->fd);
  56        }
  57
  58        list_del(&conn->list);
  59
  60        conn->fd = fd;
  61        list_add(&conn->list, &conn->port->connections);
  62
  63        complete(&conn->port->done);
  64        return IRQ_HANDLED;
  65}
  66
  67#define NO_WAITER_MSG \
  68    "****\n" \
  69    "There are currently no UML consoles waiting for port connections.\n" \
  70    "Either disconnect from one to make it available or activate some more\n" \
  71    "by enabling more consoles in the UML /etc/inittab.\n" \
  72    "****\n"
  73
  74static int port_accept(struct port_list *port)
  75{
  76        struct connection *conn;
  77        int fd, socket[2], pid;
  78
  79        fd = port_connection(port->fd, socket, &pid);
  80        if (fd < 0) {
  81                if (fd != -EAGAIN)
  82                        printk(KERN_ERR "port_accept : port_connection "
  83                               "returned %d\n", -fd);
  84                goto out;
  85        }
  86
  87        conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
  88        if (conn == NULL) {
  89                printk(KERN_ERR "port_accept : failed to allocate "
  90                       "connection\n");
  91                goto out_close;
  92        }
  93        *conn = ((struct connection)
  94                { .list         = LIST_HEAD_INIT(conn->list),
  95                  .fd           = fd,
  96                  .socket       = { socket[0], socket[1] },
  97                  .telnetd_pid  = pid,
  98                  .port         = port });
  99
 100        if (um_request_irq(TELNETD_IRQ, socket[0], IRQ_READ, pipe_interrupt,
 101                          IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM,
 102                          "telnetd", conn)) {
 103                printk(KERN_ERR "port_accept : failed to get IRQ for "
 104                       "telnetd\n");
 105                goto out_free;
 106        }
 107
 108        if (atomic_read(&port->wait_count) == 0) {
 109                os_write_file(fd, NO_WAITER_MSG, sizeof(NO_WAITER_MSG));
 110                printk(KERN_ERR "No one waiting for port\n");
 111        }
 112        list_add(&conn->list, &port->pending);
 113        return 1;
 114
 115 out_free:
 116        kfree(conn);
 117 out_close:
 118        os_close_file(fd);
 119        os_kill_process(pid, 1);
 120 out:
 121        return 0;
 122}
 123
 124static DEFINE_MUTEX(ports_mutex);
 125static LIST_HEAD(ports);
 126
 127static void port_work_proc(struct work_struct *unused)
 128{
 129        struct port_list *port;
 130        struct list_head *ele;
 131        unsigned long flags;
 132
 133        local_irq_save(flags);
 134        list_for_each(ele, &ports) {
 135                port = list_entry(ele, struct port_list, list);
 136                if (!port->has_connection)
 137                        continue;
 138
 139                reactivate_fd(port->fd, ACCEPT_IRQ);
 140                while (port_accept(port))
 141                        ;
 142                port->has_connection = 0;
 143        }
 144        local_irq_restore(flags);
 145}
 146
 147DECLARE_WORK(port_work, port_work_proc);
 148
 149static irqreturn_t port_interrupt(int irq, void *data)
 150{
 151        struct port_list *port = data;
 152
 153        port->has_connection = 1;
 154        schedule_work(&port_work);
 155        return IRQ_HANDLED;
 156}
 157
 158void *port_data(int port_num)
 159{
 160        struct list_head *ele;
 161        struct port_list *port;
 162        struct port_dev *dev = NULL;
 163        int fd;
 164
 165        mutex_lock(&ports_mutex);
 166        list_for_each(ele, &ports) {
 167                port = list_entry(ele, struct port_list, list);
 168                if (port->port == port_num)
 169                        goto found;
 170        }
 171        port = kmalloc(sizeof(struct port_list), GFP_KERNEL);
 172        if (port == NULL) {
 173                printk(KERN_ERR "Allocation of port list failed\n");
 174                goto out;
 175        }
 176
 177        fd = port_listen_fd(port_num);
 178        if (fd < 0) {
 179                printk(KERN_ERR "binding to port %d failed, errno = %d\n",
 180                       port_num, -fd);
 181                goto out_free;
 182        }
 183
 184        if (um_request_irq(ACCEPT_IRQ, fd, IRQ_READ, port_interrupt,
 185                          IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM,
 186                          "port", port)) {
 187                printk(KERN_ERR "Failed to get IRQ for port %d\n", port_num);
 188                goto out_close;
 189        }
 190
 191        *port = ((struct port_list)
 192                { .list                 = LIST_HEAD_INIT(port->list),
 193                  .wait_count           = ATOMIC_INIT(0),
 194                  .has_connection       = 0,
 195                  .port                 = port_num,
 196                  .fd                   = fd,
 197                  .pending              = LIST_HEAD_INIT(port->pending),
 198                  .connections          = LIST_HEAD_INIT(port->connections) });
 199        spin_lock_init(&port->lock);
 200        init_completion(&port->done);
 201        list_add(&port->list, &ports);
 202
 203 found:
 204        dev = kmalloc(sizeof(struct port_dev), GFP_KERNEL);
 205        if (dev == NULL) {
 206                printk(KERN_ERR "Allocation of port device entry failed\n");
 207                goto out;
 208        }
 209
 210        *dev = ((struct port_dev) { .port               = port,
 211                                    .helper_pid         = -1,
 212                                    .telnetd_pid        = -1 });
 213        goto out;
 214
 215 out_close:
 216        os_close_file(fd);
 217 out_free:
 218        kfree(port);
 219 out:
 220        mutex_unlock(&ports_mutex);
 221        return dev;
 222}
 223
 224int port_wait(void *data)
 225{
 226        struct port_dev *dev = data;
 227        struct connection *conn;
 228        struct port_list *port = dev->port;
 229        int fd;
 230
 231        atomic_inc(&port->wait_count);
 232        while (1) {
 233                fd = -ERESTARTSYS;
 234                if (wait_for_completion_interruptible(&port->done))
 235                        goto out;
 236
 237                spin_lock(&port->lock);
 238
 239                conn = list_entry(port->connections.next, struct connection,
 240                                  list);
 241                list_del(&conn->list);
 242                spin_unlock(&port->lock);
 243
 244                os_shutdown_socket(conn->socket[0], 1, 1);
 245                os_close_file(conn->socket[0]);
 246                os_shutdown_socket(conn->socket[1], 1, 1);
 247                os_close_file(conn->socket[1]);
 248
 249                /* This is done here because freeing an IRQ can't be done
 250                 * within the IRQ handler.  So, pipe_interrupt always ups
 251                 * the semaphore regardless of whether it got a successful
 252                 * connection.  Then we loop here throwing out failed
 253                 * connections until a good one is found.
 254                 */
 255                free_irq(TELNETD_IRQ, conn);
 256
 257                if (conn->fd >= 0)
 258                        break;
 259                os_close_file(conn->fd);
 260                kfree(conn);
 261        }
 262
 263        fd = conn->fd;
 264        dev->helper_pid = conn->helper_pid;
 265        dev->telnetd_pid = conn->telnetd_pid;
 266        kfree(conn);
 267 out:
 268        atomic_dec(&port->wait_count);
 269        return fd;
 270}
 271
 272void port_remove_dev(void *d)
 273{
 274        struct port_dev *dev = d;
 275
 276        if (dev->helper_pid != -1)
 277                os_kill_process(dev->helper_pid, 0);
 278        if (dev->telnetd_pid != -1)
 279                os_kill_process(dev->telnetd_pid, 1);
 280        dev->helper_pid = -1;
 281        dev->telnetd_pid = -1;
 282}
 283
 284void port_kern_free(void *d)
 285{
 286        struct port_dev *dev = d;
 287
 288        port_remove_dev(dev);
 289        kfree(dev);
 290}
 291
 292static void free_port(void)
 293{
 294        struct list_head *ele;
 295        struct port_list *port;
 296
 297        list_for_each(ele, &ports) {
 298                port = list_entry(ele, struct port_list, list);
 299                free_irq_by_fd(port->fd);
 300                os_close_file(port->fd);
 301        }
 302}
 303
 304__uml_exitcall(free_port);
 305
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.