linux/drivers/net/plip.c
<<
>>
Prefs
   1/* $Id: plip.c,v 1.3.6.2 1997/04/16 15:07:56 phil Exp $ */
   2/* PLIP: A parallel port "network" driver for Linux. */
   3/* This driver is for parallel port with 5-bit cable (LapLink (R) cable). */
   4/*
   5 * Authors:     Donald Becker <becker@scyld.com>
   6 *              Tommy Thorn <thorn@daimi.aau.dk>
   7 *              Tanabe Hiroyasu <hiro@sanpo.t.u-tokyo.ac.jp>
   8 *              Alan Cox <gw4pts@gw4pts.ampr.org>
   9 *              Peter Bauer <100136.3530@compuserve.com>
  10 *              Niibe Yutaka <gniibe@mri.co.jp>
  11 *              Nimrod Zimerman <zimerman@mailandnews.com>
  12 *
  13 * Enhancements:
  14 *              Modularization and ifreq/ifmap support by Alan Cox.
  15 *              Rewritten by Niibe Yutaka.
  16 *              parport-sharing awareness code by Philip Blundell.
  17 *              SMP locking by Niibe Yutaka.
  18 *              Support for parallel ports with no IRQ (poll mode),
  19 *              Modifications to use the parallel port API
  20 *              by Nimrod Zimerman.
  21 *
  22 * Fixes:
  23 *              Niibe Yutaka
  24 *                - Module initialization.
  25 *                - MTU fix.
  26 *                - Make sure other end is OK, before sending a packet.
  27 *                - Fix immediate timer problem.
  28 *
  29 *              Al Viro
  30 *                - Changed {enable,disable}_irq handling to make it work
  31 *                  with new ("stack") semantics.
  32 *
  33 *              This program is free software; you can redistribute it and/or
  34 *              modify it under the terms of the GNU General Public License
  35 *              as published by the Free Software Foundation; either version
  36 *              2 of the License, or (at your option) any later version.
  37 */
  38
  39/*
  40 * Original version and the name 'PLIP' from Donald Becker <becker@scyld.com>
  41 * inspired by Russ Nelson's parallel port packet driver.
  42 *
  43 * NOTE:
  44 *     Tanabe Hiroyasu had changed the protocol, and it was in Linux v1.0.
  45 *     Because of the necessity to communicate to DOS machines with the
  46 *     Crynwr packet driver, Peter Bauer changed the protocol again
  47 *     back to original protocol.
  48 *
  49 *     This version follows original PLIP protocol.
  50 *     So, this PLIP can't communicate the PLIP of Linux v1.0.
  51 */
  52
  53/*
  54 *     To use with DOS box, please do (Turn on ARP switch):
  55 *      # ifconfig plip[0-2] arp
  56 */
  57static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n";
  58
  59/*
  60  Sources:
  61        Ideas and protocols came from Russ Nelson's <nelson@crynwr.com>
  62        "parallel.asm" parallel port packet driver.
  63
  64  The "Crynwr" parallel port standard specifies the following protocol:
  65    Trigger by sending nibble '0x8' (this causes interrupt on other end)
  66    count-low octet
  67    count-high octet
  68    ... data octets
  69    checksum octet
  70  Each octet is sent as <wait for rx. '0x1?'> <send 0x10+(octet&0x0F)>
  71                        <wait for rx. '0x0?'> <send 0x00+((octet>>4)&0x0F)>
  72
  73  The packet is encapsulated as if it were ethernet.
  74
  75  The cable used is a de facto standard parallel null cable -- sold as
  76  a "LapLink" cable by various places.  You'll need a 12-conductor cable to
  77  make one yourself.  The wiring is:
  78    SLCTIN      17 - 17
  79    GROUND      25 - 25
  80    D0->ERROR   2 - 15          15 - 2
  81    D1->SLCT    3 - 13          13 - 3
  82    D2->PAPOUT  4 - 12          12 - 4
  83    D3->ACK     5 - 10          10 - 5
  84    D4->BUSY    6 - 11          11 - 6
  85  Do not connect the other pins.  They are
  86    D5,D6,D7 are 7,8,9
  87    STROBE is 1, FEED is 14, INIT is 16
  88    extra grounds are 18,19,20,21,22,23,24
  89*/
  90
  91#include <linux/module.h>
  92#include <linux/kernel.h>
  93#include <linux/types.h>
  94#include <linux/fcntl.h>
  95#include <linux/interrupt.h>
  96#include <linux/string.h>
  97#include <linux/if_ether.h>
  98#include <linux/in.h>
  99#include <linux/errno.h>
 100#include <linux/delay.h>
 101#include <linux/init.h>
 102#include <linux/netdevice.h>
 103#include <linux/etherdevice.h>
 104#include <linux/inetdevice.h>
 105#include <linux/skbuff.h>
 106#include <linux/if_plip.h>
 107#include <linux/workqueue.h>
 108#include <linux/spinlock.h>
 109#include <linux/completion.h>
 110#include <linux/parport.h>
 111#include <linux/bitops.h>
 112
 113#include <net/neighbour.h>
 114
 115#include <asm/system.h>
 116#include <asm/irq.h>
 117#include <asm/byteorder.h>
 118
 119/* Maximum number of devices to support. */
 120#define PLIP_MAX  8
 121
 122/* Use 0 for production, 1 for verification, >2 for debug */
 123#ifndef NET_DEBUG
 124#define NET_DEBUG 1
 125#endif
 126static const unsigned int net_debug = NET_DEBUG;
 127
 128#define ENABLE(irq)  if (irq != -1) enable_irq(irq)
 129#define DISABLE(irq) if (irq != -1) disable_irq(irq)
 130
 131/* In micro second */
 132#define PLIP_DELAY_UNIT            1
 133
 134/* Connection time out = PLIP_TRIGGER_WAIT * PLIP_DELAY_UNIT usec */
 135#define PLIP_TRIGGER_WAIT        500
 136
 137/* Nibble time out = PLIP_NIBBLE_WAIT * PLIP_DELAY_UNIT usec */
 138#define PLIP_NIBBLE_WAIT        3000
 139
 140/* Bottom halves */
 141static void plip_kick_bh(struct work_struct *work);
 142static void plip_bh(struct work_struct *work);
 143static void plip_timer_bh(struct work_struct *work);
 144
 145/* Interrupt handler */
 146static void plip_interrupt(void *dev_id);
 147
 148/* Functions for DEV methods */
 149static int plip_tx_packet(struct sk_buff *skb, struct net_device *dev);
 150static int plip_hard_header(struct sk_buff *skb, struct net_device *dev,
 151                            unsigned short type, const void *daddr,
 152                            const void *saddr, unsigned len);
 153static int plip_hard_header_cache(const struct neighbour *neigh,
 154                                  struct hh_cache *hh);
 155static int plip_open(struct net_device *dev);
 156static int plip_close(struct net_device *dev);
 157static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
 158static int plip_preempt(void *handle);
 159static void plip_wakeup(void *handle);
 160
 161enum plip_connection_state {
 162        PLIP_CN_NONE=0,
 163        PLIP_CN_RECEIVE,
 164        PLIP_CN_SEND,
 165        PLIP_CN_CLOSING,
 166        PLIP_CN_ERROR
 167};
 168
 169enum plip_packet_state {
 170        PLIP_PK_DONE=0,
 171        PLIP_PK_TRIGGER,
 172        PLIP_PK_LENGTH_LSB,
 173        PLIP_PK_LENGTH_MSB,
 174        PLIP_PK_DATA,
 175        PLIP_PK_CHECKSUM
 176};
 177
 178enum plip_nibble_state {
 179        PLIP_NB_BEGIN,
 180        PLIP_NB_1,
 181        PLIP_NB_2,
 182};
 183
 184struct plip_local {
 185        enum plip_packet_state state;
 186        enum plip_nibble_state nibble;
 187        union {
 188                struct {
 189#if defined(__LITTLE_ENDIAN)
 190                        unsigned char lsb;
 191                        unsigned char msb;
 192#elif defined(__BIG_ENDIAN)
 193                        unsigned char msb;
 194                        unsigned char lsb;
 195#else
 196#error  "Please fix the endianness defines in <asm/byteorder.h>"
 197#endif
 198                } b;
 199                unsigned short h;
 200        } length;
 201        unsigned short byte;
 202        unsigned char  checksum;
 203        unsigned char  data;
 204        struct sk_buff *skb;
 205};
 206
 207struct net_local {
 208        struct net_device *dev;
 209        struct work_struct immediate;
 210        struct delayed_work deferred;
 211        struct delayed_work timer;
 212        struct plip_local snd_data;
 213        struct plip_local rcv_data;
 214        struct pardevice *pardev;
 215        unsigned long  trigger;
 216        unsigned long  nibble;
 217        enum plip_connection_state connection;
 218        unsigned short timeout_count;
 219        int is_deferred;
 220        int port_owner;
 221        int should_relinquish;
 222        spinlock_t lock;
 223        atomic_t kill_timer;
 224        struct completion killed_timer_cmp;
 225};
 226
 227static inline void enable_parport_interrupts (struct net_device *dev)
 228{
 229        if (dev->irq != -1)
 230        {
 231                struct parport *port =
 232                   ((struct net_local *)dev->priv)->pardev->port;
 233                port->ops->enable_irq (port);
 234        }
 235}
 236
 237static inline void disable_parport_interrupts (struct net_device *dev)
 238{
 239        if (dev->irq != -1)
 240        {
 241                struct parport *port =
 242                   ((struct net_local *)dev->priv)->pardev->port;
 243                port->ops->disable_irq (port);
 244        }
 245}
 246
 247static inline void write_data (struct net_device *dev, unsigned char data)
 248{
 249        struct parport *port =
 250           ((struct net_local *)dev->priv)->pardev->port;
 251
 252        port->ops->write_data (port, data);
 253}
 254
 255static inline unsigned char read_status (struct net_device *dev)
 256{
 257        struct parport *port =
 258           ((struct net_local *)dev->priv)->pardev->port;
 259
 260        return port->ops->read_status (port);
 261}
 262
 263static const struct header_ops plip_header_ops = {
 264        .create = plip_hard_header,
 265        .cache  = plip_hard_header_cache,
 266};
 267
 268/* Entry point of PLIP driver.
 269   Probe the hardware, and register/initialize the driver.
 270
 271   PLIP is rather weird, because of the way it interacts with the parport
 272   system.  It is _not_ initialised from Space.c.  Instead, plip_init()
 273   is called, and that function makes up a "struct net_device" for each port, and
 274   then calls us here.
 275
 276   */
 277static void
 278plip_init_netdev(struct net_device *dev)
 279{
 280        struct net_local *nl = netdev_priv(dev);
 281
 282        /* Then, override parts of it */
 283        dev->hard_start_xmit     = plip_tx_packet;
 284        dev->open                = plip_open;
 285        dev->stop                = plip_close;
 286        dev->do_ioctl            = plip_ioctl;
 287
 288        dev->tx_queue_len        = 10;
 289        dev->flags               = IFF_POINTOPOINT|IFF_NOARP;
 290        memset(dev->dev_addr, 0xfc, ETH_ALEN);
 291
 292        dev->header_ops          = &plip_header_ops;
 293
 294
 295        nl->port_owner = 0;
 296
 297        /* Initialize constants */
 298        nl->trigger     = PLIP_TRIGGER_WAIT;
 299        nl->nibble      = PLIP_NIBBLE_WAIT;
 300
 301        /* Initialize task queue structures */
 302        INIT_WORK(&nl->immediate, plip_bh);
 303        INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh);
 304
 305        if (dev->irq == -1)
 306                INIT_DELAYED_WORK(&nl->timer, plip_timer_bh);
 307
 308        spin_lock_init(&nl->lock);
 309}
 310
 311/* Bottom half handler for the delayed request.
 312   This routine is kicked by do_timer().
 313   Request `plip_bh' to be invoked. */
 314static void
 315plip_kick_bh(struct work_struct *work)
 316{
 317        struct net_local *nl =
 318                container_of(work, struct net_local, deferred.work);
 319
 320        if (nl->is_deferred)
 321                schedule_work(&nl->immediate);
 322}
 323
 324/* Forward declarations of internal routines */
 325static int plip_none(struct net_device *, struct net_local *,
 326                     struct plip_local *, struct plip_local *);
 327static int plip_receive_packet(struct net_device *, struct net_local *,
 328                               struct plip_local *, struct plip_local *);
 329static int plip_send_packet(struct net_device *, struct net_local *,
 330                            struct plip_local *, struct plip_local *);
 331static int plip_connection_close(struct net_device *, struct net_local *,
 332                                 struct plip_local *, struct plip_local *);
 333static int plip_error(struct net_device *, struct net_local *,
 334                      struct plip_local *, struct plip_local *);
 335static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
 336                                 struct plip_local *snd,
 337                                 struct plip_local *rcv,
 338                                 int error);
 339
 340#define OK        0
 341#define TIMEOUT   1
 342#define ERROR     2
 343#define HS_TIMEOUT      3
 344
 345typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
 346                         struct plip_local *snd, struct plip_local *rcv);
 347
 348static const plip_func connection_state_table[] =
 349{
 350        plip_none,
 351        plip_receive_packet,
 352        plip_send_packet,
 353        plip_connection_close,
 354        plip_error
 355};
 356
 357/* Bottom half handler of PLIP. */
 358static void
 359plip_bh(struct work_struct *work)
 360{
 361        struct net_local *nl = container_of(work, struct net_local, immediate);
 362        struct plip_local *snd = &nl->snd_data;
 363        struct plip_local *rcv = &nl->rcv_data;
 364        plip_func f;
 365        int r;
 366
 367        nl->is_deferred = 0;
 368        f = connection_state_table[nl->connection];
 369        if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK
 370            && (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) {
 371                nl->is_deferred = 1;
 372                schedule_delayed_work(&nl->deferred, 1);
 373        }
 374}
 375
 376static void
 377plip_timer_bh(struct work_struct *work)
 378{
 379        struct net_local *nl =
 380                container_of(work, struct net_local, timer.work);
 381
 382        if (!(atomic_read (&nl->kill_timer))) {
 383                plip_interrupt (nl->dev);
 384
 385                schedule_delayed_work(&nl->timer, 1);
 386        }
 387        else {
 388                complete(&nl->killed_timer_cmp);
 389        }
 390}
 391
 392static int
 393plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
 394                      struct plip_local *snd, struct plip_local *rcv,
 395                      int error)
 396{
 397        unsigned char c0;
 398        /*
 399         * This is tricky. If we got here from the beginning of send (either
 400         * with ERROR or HS_TIMEOUT) we have IRQ enabled. Otherwise it's
 401         * already disabled. With the old variant of {enable,disable}_irq()
 402         * extra disable_irq() was a no-op. Now it became mortal - it's
 403         * unbalanced and thus we'll never re-enable IRQ (until rmmod plip,
 404         * that is). So we have to treat HS_TIMEOUT and ERROR from send
 405         * in a special way.
 406         */
 407
 408        spin_lock_irq(&nl->lock);
 409        if (nl->connection == PLIP_CN_SEND) {
 410
 411                if (error != ERROR) { /* Timeout */
 412                        nl->timeout_count++;
 413                        if ((error == HS_TIMEOUT
 414                             && nl->timeout_count <= 10)
 415                            || nl->timeout_count <= 3) {
 416                                spin_unlock_irq(&nl->lock);
 417                                /* Try again later */
 418                                return TIMEOUT;
 419                        }
 420                        c0 = read_status(dev);
 421                        printk(KERN_WARNING "%s: transmit timeout(%d,%02x)\n",
 422                               dev->name, snd->state, c0);
 423                } else
 424                        error = HS_TIMEOUT;
 425                dev->stats.tx_errors++;
 426                dev->stats.tx_aborted_errors++;
 427        } else if (nl->connection == PLIP_CN_RECEIVE) {
 428                if (rcv->state == PLIP_PK_TRIGGER) {
 429                        /* Transmission was interrupted. */
 430                        spin_unlock_irq(&nl->lock);
 431                        return OK;
 432                }
 433                if (error != ERROR) { /* Timeout */
 434                        if (++nl->timeout_count <= 3) {
 435                                spin_unlock_irq(&nl->lock);
 436                                /* Try again later */
 437                                return TIMEOUT;
 438                        }
 439                        c0 = read_status(dev);
 440                        printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n",
 441                               dev->name, rcv->state, c0);
 442                }
 443                dev->stats.rx_dropped++;
 444        }
 445        rcv->state = PLIP_PK_DONE;
 446        if (rcv->skb) {
 447                kfree_skb(rcv->skb);
 448                rcv->skb = NULL;
 449        }
 450        snd->state = PLIP_PK_DONE;
 451        if (snd->skb) {
 452                dev_kfree_skb(snd->skb);
 453                snd->skb = NULL;
 454        }
 455        spin_unlock_irq(&nl->lock);
 456        if (error == HS_TIMEOUT) {
 457                DISABLE(dev->irq);
 458                synchronize_irq(dev->irq);
 459        }
 460        disable_parport_interrupts (dev);
 461        netif_stop_queue (dev);
 462        nl->connection = PLIP_CN_ERROR;
 463        write_data (dev, 0x00);
 464
 465        return TIMEOUT;
 466}
 467
 468static int
 469plip_none(struct net_device *dev, struct net_local *nl,
 470          struct plip_local *snd, struct plip_local *rcv)
 471{
 472        return OK;
 473}
 474
 475/* PLIP_RECEIVE --- receive a byte(two nibbles)
 476   Returns OK on success, TIMEOUT on timeout */
 477static inline int
 478plip_receive(unsigned short nibble_timeout, struct net_device *dev,
 479             enum plip_nibble_state *ns_p, unsigned char *data_p)
 480{
 481        unsigned char c0, c1;
 482        unsigned int cx;
 483
 484        switch (*ns_p) {
 485        case PLIP_NB_BEGIN:
 486                cx = nibble_timeout;
 487                while (1) {
 488                        c0 = read_status(dev);
 489                        udelay(PLIP_DELAY_UNIT);
 490                        if ((c0 & 0x80) == 0) {
 491                                c1 = read_status(dev);
 492                                if (c0 == c1)
 493                                        break;
 494                        }
 495                        if (--cx == 0)
 496                                return TIMEOUT;
 497                }
 498                *data_p = (c0 >> 3) & 0x0f;
 499                write_data (dev, 0x10); /* send ACK */
 500                *ns_p = PLIP_NB_1;
 501
 502        case PLIP_NB_1:
 503                cx = nibble_timeout;
 504                while (1) {
 505                        c0 = read_status(dev);
 506                        udelay(PLIP_DELAY_UNIT);
 507                        if (c0 & 0x80) {
 508                                c1 = read_status(dev);
 509                                if (c0 == c1)
 510                                        break;
 511                        }
 512                        if (--cx == 0)
 513                                return TIMEOUT;
 514                }
 515                *data_p |= (c0 << 1) & 0xf0;
 516                write_data (dev, 0x00); /* send ACK */
 517                *ns_p = PLIP_NB_BEGIN;
 518        case PLIP_NB_2:
 519                break;
 520        }
 521        return OK;
 522}
 523
 524/*
 525 *      Determine the packet's protocol ID. The rule here is that we
 526 *      assume 802.3 if the type field is short enough to be a length.
 527 *      This is normal practice and works for any 'now in use' protocol.
 528 *
 529 *      PLIP is ethernet ish but the daddr might not be valid if unicast.
 530 *      PLIP fortunately has no bus architecture (its Point-to-point).
 531 *
 532 *      We can't fix the daddr thing as that quirk (more bug) is embedded
 533 *      in far too many old systems not all even running Linux.
 534 */
 535
 536static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev)
 537{
 538        struct ethhdr *eth;
 539        unsigned char *rawp;
 540
 541        skb_reset_mac_header(skb);
 542        skb_pull(skb,dev->hard_header_len);
 543        eth = eth_hdr(skb);
 544
 545        if(*eth->h_dest&1)
 546        {
 547                if(memcmp(eth->h_dest,dev->broadcast, ETH_ALEN)==0)
 548                        skb->pkt_type=PACKET_BROADCAST;
 549                else
 550                        skb->pkt_type=PACKET_MULTICAST;
 551        }
 552
 553        /*
 554         *      This ALLMULTI check should be redundant by 1.4
 555         *      so don't forget to remove it.
 556         */
 557
 558        if (ntohs(eth->h_proto) >= 1536)
 559                return eth->h_proto;
 560
 561        rawp = skb->data;
 562
 563        /*
 564         *      This is a magic hack to spot IPX packets. Older Novell breaks
 565         *      the protocol design and runs IPX over 802.3 without an 802.2 LLC
 566         *      layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
 567         *      won't work for fault tolerant netware but does for the rest.
 568         */
 569        if (*(unsigned short *)rawp == 0xFFFF)
 570                return htons(ETH_P_802_3);
 571
 572        /*
 573         *      Real 802.2 LLC
 574         */
 575        return htons(ETH_P_802_2);
 576}
 577
 578/* PLIP_RECEIVE_PACKET --- receive a packet */
 579static int
 580plip_receive_packet(struct net_device *dev, struct net_local *nl,
 581                    struct plip_local *snd, struct plip_local *rcv)
 582{
 583        unsigned short nibble_timeout = nl->nibble;
 584        unsigned char *lbuf;
 585
 586        switch (rcv->state) {
 587        case PLIP_PK_TRIGGER:
 588                DISABLE(dev->irq);
 589                /* Don't need to synchronize irq, as we can safely ignore it */
 590                disable_parport_interrupts (dev);
 591                write_data (dev, 0x01); /* send ACK */
 592                if (net_debug > 2)
 593                        printk(KERN_DEBUG "%s: receive start\n", dev->name);
 594                rcv->state = PLIP_PK_LENGTH_LSB;
 595                rcv->nibble = PLIP_NB_BEGIN;
 596
 597        case PLIP_PK_LENGTH_LSB:
 598                if (snd->state != PLIP_PK_DONE) {
 599                        if (plip_receive(nl->trigger, dev,
 600                                         &rcv->nibble, &rcv->length.b.lsb)) {
 601                                /* collision, here dev->tbusy == 1 */
 602                                rcv->state = PLIP_PK_DONE;
 603                                nl->is_deferred = 1;
 604                                nl->connection = PLIP_CN_SEND;
 605                                schedule_delayed_work(&nl->deferred, 1);
 606                                enable_parport_interrupts (dev);
 607                                ENABLE(dev->irq);
 608                                return OK;
 609                        }
 610                } else {
 611                        if (plip_receive(nibble_timeout, dev,
 612                                         &rcv->nibble, &rcv->length.b.lsb))
 613                                return TIMEOUT;
 614                }
 615                rcv->state = PLIP_PK_LENGTH_MSB;
 616
 617        case PLIP_PK_LENGTH_MSB:
 618                if (plip_receive(nibble_timeout, dev,
 619                                 &rcv->nibble, &rcv->length.b.msb))
 620                        return TIMEOUT;
 621                if (rcv->length.h > dev->mtu + dev->hard_header_len
 622                    || rcv->length.h < 8) {
 623                        printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h);
 624                        return ERROR;
 625                }
 626                /* Malloc up new buffer. */
 627                rcv->skb = dev_alloc_skb(rcv->length.h + 2);
 628                if (rcv->skb == NULL) {
 629                        printk(KERN_ERR "%s: Memory squeeze.\n", dev->name);
 630                        return ERROR;
 631                }
 632                skb_reserve(rcv->skb, 2);       /* Align IP on 16 byte boundaries */
 633                skb_put(rcv->skb,rcv->length.h);
 634                rcv->skb->dev = dev;
 635                rcv->state = PLIP_PK_DATA;
 636                rcv->byte = 0;
 637                rcv->checksum = 0;
 638
 639        case PLIP_PK_DATA:
 640                lbuf = rcv->skb->data;
 641                do
 642                        if (plip_receive(nibble_timeout, dev,
 643                                         &rcv->nibble, &lbuf[rcv->byte]))
 644                                return TIMEOUT;
 645                while (++rcv->byte < rcv->length.h);
 646                do
 647                        rcv->checksum += lbuf[--rcv->byte];
 648                while (rcv->byte);
 649                rcv->state = PLIP_PK_CHECKSUM;
 650
 651        case PLIP_PK_CHECKSUM:
 652                if (plip_receive(nibble_timeout, dev,
 653                                 &rcv->nibble, &rcv->data))
 654                        return TIMEOUT;
 655                if (rcv->data != rcv->checksum) {
 656                        dev->stats.rx_crc_errors++;
 657                        if (net_debug)
 658                                printk(KERN_DEBUG "%s: checksum error\n", dev->name);
 659                        return ERROR;
 660                }
 661                rcv->state = PLIP_PK_DONE;
 662
 663        case PLIP_PK_DONE:
 664                /* Inform the upper layer for the arrival of a packet. */
 665                rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
 666                netif_rx_ni(rcv->skb);
 667                dev->last_rx = jiffies;
 668                dev->stats.rx_bytes += rcv->length.h;
 669                dev->stats.rx_packets++;
 670                rcv->skb = NULL;
 671                if (net_debug > 2)
 672                        printk(KERN_DEBUG "%s: receive end\n", dev->name);
 673
 674                /* Close the connection. */
 675                write_data (dev, 0x00);
 676                spin_lock_irq(&nl->lock);
 677                if (snd->state != PLIP_PK_DONE) {
 678                        nl->connection = PLIP_CN_SEND;
 679                        spin_unlock_irq(&nl->lock);
 680                        schedule_work(&nl->immediate);
 681                        enable_parport_interrupts (dev);
 682                        ENABLE(dev->irq);
 683                        return OK;
 684                } else {
 685                        nl->connection = PLIP_CN_NONE;
 686                        spin_unlock_irq(&nl->lock);
 687                        enable_parport_interrupts (dev);
 688                        ENABLE(dev->irq);
 689                        return OK;
 690                }
 691        }
 692        return OK;
 693}
 694
 695/* PLIP_SEND --- send a byte (two nibbles)
 696   Returns OK on success, TIMEOUT when timeout    */
 697static inline int
 698plip_send(unsigned short nibble_timeout, struct net_device *dev,
 699          enum plip_nibble_state *ns_p, unsigned char data)
 700{
 701        unsigned char c0;
 702        unsigned int cx;
 703
 704        switch (*ns_p) {
 705        case PLIP_NB_BEGIN:
 706                write_data (dev, data & 0x0f);
 707                *ns_p = PLIP_NB_1;
 708
 709        case PLIP_NB_1:
 710                write_data (dev, 0x10 | (data & 0x0f));
 711                cx = nibble_timeout;
 712                while (1) {
 713                        c0 = read_status(dev);
 714                        if ((c0 & 0x80) == 0)
 715                                break;
 716                        if (--cx == 0)
 717                                return TIMEOUT;
 718                        udelay(PLIP_DELAY_UNIT);
 719                }
 720                write_data (dev, 0x10 | (data >> 4));
 721                *ns_p = PLIP_NB_2;
 722
 723        case PLIP_NB_2:
 724                write_data (dev, (data >> 4));
 725                cx = nibble_timeout;
 726                while (1) {
 727                        c0 = read_status(dev);
 728                        if (c0 & 0x80)
 729                                break;
 730                        if (--cx == 0)
 731                                return TIMEOUT;
 732                        udelay(PLIP_DELAY_UNIT);
 733                }
 734                *ns_p = PLIP_NB_BEGIN;
 735                return OK;
 736        }
 737        return OK;
 738}
 739
 740/* PLIP_SEND_PACKET --- send a packet */
 741static int
 742plip_send_packet(struct net_device *dev, struct net_local *nl,
 743                 struct plip_local *snd, struct plip_local *rcv)
 744{
 745        unsigned short nibble_timeout = nl->nibble;
 746        unsigned char *lbuf;
 747        unsigned char c0;
 748        unsigned int cx;
 749
 750        if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) {
 751                printk(KERN_DEBUG "%s: send skb lost\n", dev->name);
 752                snd->state = PLIP_PK_DONE;
 753                snd->skb = NULL;
 754                return ERROR;
 755        }
 756
 757        switch (snd->state) {
 758        case PLIP_PK_TRIGGER:
 759                if ((read_status(dev) & 0xf8) != 0x80)
 760                        return HS_TIMEOUT;
 761
 762                /* Trigger remote rx interrupt. */
 763                write_data (dev, 0x08);
 764                cx = nl->trigger;
 765                while (1) {
 766                        udelay(PLIP_DELAY_UNIT);
 767                        spin_lock_irq(&nl->lock);
 768                        if (nl->connection == PLIP_CN_RECEIVE) {
 769                                spin_unlock_irq(&nl->lock);
 770                                /* Interrupted. */
 771                                dev->stats.collisions++;
 772                                return OK;
 773                        }
 774                        c0 = read_status(dev);
 775                        if (c0 & 0x08) {
 776                                spin_unlock_irq(&nl->lock);
 777                                DISABLE(dev->irq);
 778                                synchronize_irq(dev->irq);
 779                                if (nl->connection == PLIP_CN_RECEIVE) {
 780                                        /* Interrupted.
 781                                           We don't need to enable irq,
 782                                           as it is soon disabled.    */
 783                                        /* Yes, we do. New variant of
 784                                           {enable,disable}_irq *counts*
 785                                           them.  -- AV  */
 786                                        ENABLE(dev->irq);
 787                                        dev->stats.collisions++;
 788                                        return OK;
 789                                }
 790                                disable_parport_interrupts (dev);
 791                                if (net_debug > 2)
 792                                        printk(KERN_DEBUG "%s: send start\n", dev->name);
 793                                snd->state = PLIP_PK_LENGTH_LSB;
 794                                snd->nibble = PLIP_NB_BEGIN;
 795                                nl->timeout_count = 0;
 796                                break;
 797                        }
 798                        spin_unlock_irq(&nl->lock);
 799                        if (--cx == 0) {
 800                                write_data (dev, 0x00);
 801                                return HS_TIMEOUT;
 802                        }
 803                }
 804
 805        case PLIP_PK_LENGTH_LSB:
 806                if (plip_send(nibble_timeout, dev,
 807                              &snd->nibble, snd->length.b.lsb))
 808                        return TIMEOUT;
 809                snd->state = PLIP_PK_LENGTH_MSB;
 810
 811        case PLIP_PK_LENGTH_MSB:
 812                if (plip_send(nibble_timeout, dev,
 813                              &snd->nibble, snd->length.b.msb))
 814                        return TIMEOUT;
 815                snd->state = PLIP_PK_DATA;
 816                snd->byte = 0;
 817                snd->checksum = 0;
 818
 819        case PLIP_PK_DATA:
 820                do
 821                        if (plip_send(nibble_timeout, dev,
 822                                      &snd->nibble, lbuf[snd->byte]))
 823                                return TIMEOUT;
 824                while (++snd->byte < snd->length.h);
 825                do
 826                        snd->checksum += lbuf[--snd->byte];
 827                while (snd->byte);
 828                snd->state = PLIP_PK_CHECKSUM;
 829
 830        case PLIP_PK_CHECKSUM:
 831                if (plip_send(nibble_timeout, dev,
 832                              &snd->nibble, snd->checksum))
 833                        return TIMEOUT;
 834
 835                dev->stats.tx_bytes += snd->skb->len;
 836                dev_kfree_skb(snd->skb);
 837                dev->stats.tx_packets++;
 838                snd->state = PLIP_PK_DONE;
 839
 840        case PLIP_PK_DONE:
 841                /* Close the connection */
 842                write_data (dev, 0x00);
 843                snd->skb = NULL;
 844                if (net_debug > 2)
 845                        printk(KERN_DEBUG "%s: send end\n", dev->name);
 846                nl->connection = PLIP_CN_CLOSING;
 847                nl->is_deferred = 1;
 848                schedule_delayed_work(&nl->deferred, 1);
 849                enable_parport_interrupts (dev);
 850                ENABLE(dev->irq);
 851                return OK;
 852        }
 853        return OK;
 854}
 855
 856static int
 857plip_connection_close(struct net_device *dev, struct net_local *nl,
 858                      struct plip_local *snd, struct plip_local *rcv)
 859{
 860        spin_lock_irq(&nl->lock);
 861        if (nl->connection == PLIP_CN_CLOSING) {
 862                nl->connection = PLIP_CN_NONE;
 863                netif_wake_queue (dev);
 864        }
 865        spin_unlock_irq(&nl->lock);
 866        if (nl->should_relinquish) {
 867                nl->should_relinquish = nl->port_owner = 0;
 868                parport_release(nl->pardev);
 869        }
 870        return OK;
 871}
 872
 873/* PLIP_ERROR --- wait till other end settled */
 874static int
 875plip_error(struct net_device *dev, struct net_local *nl,
 876           struct plip_local *snd, struct plip_local *rcv)
 877{
 878        unsigned char status;
 879
 880        status = read_status(dev);
 881        if ((status & 0xf8) == 0x80) {
 882                if (net_debug > 2)
 883                        printk(KERN_DEBUG "%s: reset interface.\n", dev->name);
 884                nl->connection = PLIP_CN_NONE;
 885                nl->should_relinquish = 0;
 886                netif_start_queue (dev);
 887                enable_parport_interrupts (dev);
 888                ENABLE(dev->irq);
 889                netif_wake_queue (dev);
 890        } else {
 891                nl->is_deferred = 1;
 892                schedule_delayed_work(&nl->deferred, 1);
 893        }
 894
 895        return OK;
 896}
 897
 898/* Handle the parallel port interrupts. */
 899static void
 900plip_interrupt(void *dev_id)
 901{
 902        struct net_device *dev = dev_id;
 903        struct net_local *nl;
 904        struct plip_local *rcv;
 905        unsigned char c0;
 906        unsigned long flags;
 907
 908        nl = netdev_priv(dev);
 909        rcv = &nl->rcv_data;
 910
 911        spin_lock_irqsave (&nl->lock, flags);
 912
 913        c0 = read_status(dev);
 914        if ((c0 & 0xf8) != 0xc0) {
 915                if ((dev->irq != -1) && (net_debug > 1))
 916                        printk(KERN_DEBUG "%s: spurious interrupt\n", dev->name);
 917                spin_unlock_irqrestore (&nl->lock, flags);
 918                return;
 919        }
 920
 921        if (net_debug > 3)
 922                printk(KERN_DEBUG "%s: interrupt.\n", dev->name);
 923
 924        switch (nl->connection) {
 925        case PLIP_CN_CLOSING:
 926                netif_wake_queue (dev);
 927        case PLIP_CN_NONE:
 928        case PLIP_CN_SEND:
 929                rcv->state = PLIP_PK_TRIGGER;
 930                nl->connection = PLIP_CN_RECEIVE;
 931                nl->timeout_count = 0;
 932                schedule_work(&nl->immediate);
 933                break;
 934
 935        case PLIP_CN_RECEIVE:
 936                /* May occur because there is race condition
 937                   around test and set of dev->interrupt.
 938                   Ignore this interrupt. */
 939                break;
 940
 941        case PLIP_CN_ERROR:
 942                printk(KERN_ERR "%s: receive interrupt in error state\n", dev->name);
 943                break;
 944        }
 945
 946        spin_unlock_irqrestore(&nl->lock, flags);
 947}
 948
 949static int
 950plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
 951{
 952        struct net_local *nl = netdev_priv(dev);
 953        struct plip_local *snd = &nl->snd_data;
 954
 955        if (netif_queue_stopped(dev))
 956                return 1;
 957
 958        /* We may need to grab the bus */
 959        if (!nl->port_owner) {
 960                if (parport_claim(nl->pardev))
 961                        return 1;
 962                nl->port_owner = 1;
 963        }
 964
 965        netif_stop_queue (dev);
 966
 967        if (skb->len > dev->mtu + dev->hard_header_len) {
 968                printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len);
 969                netif_start_queue (dev);
 970                return 1;
 971        }
 972
 973        if (net_debug > 2)
 974                printk(KERN_DEBUG "%s: send request\n", dev->name);
 975
 976        spin_lock_irq(&nl->lock);
 977        dev->trans_start = jiffies;
 978        snd->skb = skb;
 979        snd->length.h = skb->len;
 980        snd->state = PLIP_PK_TRIGGER;
 981        if (nl->connection == PLIP_CN_NONE) {
 982                nl->connection = PLIP_CN_SEND;
 983                nl->timeout_count = 0;
 984        }
 985        schedule_work(&nl->immediate);
 986        spin_unlock_irq(&nl->lock);
 987
 988        return 0;
 989}
 990
 991static void
 992plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
 993{
 994        const struct in_device *in_dev = dev->ip_ptr;
 995
 996        if (in_dev) {
 997                /* Any address will do - we take the first */
 998                const struct in_ifaddr *ifa = in_dev->ifa_list;
 999                if (ifa) {
1000                        memcpy(eth->h_source, dev->dev_addr, 6);
1001                        memset(eth->h_dest, 0xfc, 2);
1002                        memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
1003                }
1004        }
1005}
1006
1007static int
1008plip_hard_header(struct sk_buff *skb, struct net_device *dev,
1009                 unsigned short type, const void *daddr,
1010                 const void *saddr, unsigned len)
1011{
1012        int ret;
1013
1014        ret = eth_header(skb, dev, type, daddr, saddr, len);
1015        if (ret >= 0)
1016                plip_rewrite_address (dev, (struct ethhdr *)skb->data);
1017
1018        return ret;
1019}
1020
1021int plip_hard_header_cache(const struct neighbour *neigh,
1022                           struct hh_cache *hh)
1023{
1024        int ret;
1025
1026        ret = eth_header_cache(neigh, hh);
1027        if (ret == 0) {
1028                struct ethhdr *eth;
1029
1030                eth = (struct ethhdr*)(((u8*)hh->hh_data) +
1031                                       HH_DATA_OFF(sizeof(*eth)));
1032                plip_rewrite_address (neigh->dev, eth);
1033        }
1034
1035        return ret;
1036}
1037
1038/* Open/initialize the board.  This is called (in the current kernel)
1039   sometime after booting when the 'ifconfig' program is run.
1040
1041   This routine gets exclusive access to the parallel port by allocating
1042   its IRQ line.
1043 */
1044static int
1045plip_open(struct net_device *dev)
1046{
1047        struct net_local *nl = netdev_priv(dev);
1048        struct in_device *in_dev;
1049
1050        /* Grab the port */
1051        if (!nl->port_owner) {
1052                if (parport_claim(nl->pardev)) return -EAGAIN;
1053                nl->port_owner = 1;
1054        }
1055
1056        nl->should_relinquish = 0;
1057
1058        /* Clear the data port. */
1059        write_data (dev, 0x00);
1060
1061        /* Enable rx interrupt. */
1062        enable_parport_interrupts (dev);
1063        if (dev->irq == -1)
1064        {
1065                atomic_set (&nl->kill_timer, 0);
1066                schedule_delayed_work(&nl->timer, 1);
1067        }
1068
1069        /* Initialize the state machine. */
1070        nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE;
1071        nl->rcv_data.skb = nl->snd_data.skb = NULL;
1072        nl->connection = PLIP_CN_NONE;
1073        nl->is_deferred = 0;
1074
1075        /* Fill in the MAC-level header.
1076           We used to abuse dev->broadcast to store the point-to-point
1077           MAC address, but we no longer do it. Instead, we fetch the
1078           interface address whenever it is needed, which is cheap enough
1079           because we use the hh_cache. Actually, abusing dev->broadcast
1080           didn't work, because when using plip_open the point-to-point
1081           address isn't yet known.
1082           PLIP doesn't have a real MAC address, but we need it to be
1083           DOS compatible, and to properly support taps (otherwise,
1084           when the device address isn't identical to the address of a
1085           received frame, the kernel incorrectly drops it).             */
1086
1087        if ((in_dev=dev->ip_ptr) != NULL) {
1088                /* Any address will do - we take the first. We already
1089                   have the first two bytes filled with 0xfc, from
1090                   plip_init_dev(). */
1091                struct in_ifaddr *ifa=in_dev->ifa_list;
1092                if (ifa != NULL) {
1093                        memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
1094                }
1095        }
1096
1097        netif_start_queue (dev);
1098
1099        return 0;
1100}
1101
1102/* The inverse routine to plip_open (). */
1103static int
1104plip_close(struct net_device *dev)
1105{
1106        struct net_local *nl = netdev_priv(dev);
1107        struct plip_local *snd = &nl->snd_data;
1108        struct plip_local *rcv = &nl->rcv_data;
1109
1110        netif_stop_queue (dev);
1111        DISABLE(dev->irq);
1112        synchronize_irq(dev->irq);
1113
1114        if (dev->irq == -1)
1115        {
1116                init_completion(&nl->killed_timer_cmp);
1117                atomic_set (&nl->kill_timer, 1);
1118                wait_for_completion(&nl->killed_timer_cmp);
1119        }
1120
1121#ifdef NOTDEF
1122        outb(0x00, PAR_DATA(dev));
1123#endif
1124        nl->is_deferred = 0;
1125        nl->connection = PLIP_CN_NONE;
1126        if (nl->port_owner) {
1127                parport_release(nl->pardev);
1128                nl->port_owner = 0;
1129        }
1130
1131        snd->state = PLIP_PK_DONE;
1132        if (snd->skb) {
1133                dev_kfree_skb(snd->skb);
1134                snd->skb = NULL;
1135        }
1136        rcv->state = PLIP_PK_DONE;
1137        if (rcv->skb) {
1138                kfree_skb(rcv->skb);
1139                rcv->skb = NULL;
1140        }
1141
1142#ifdef NOTDEF
1143        /* Reset. */
1144        outb(0x00, PAR_CONTROL(dev));
1145#endif
1146        return 0;
1147}
1148
1149static int
1150plip_preempt(void *handle)
1151{
1152        struct net_device *dev = (struct net_device *)handle;
1153        struct net_local *nl = netdev_priv(dev);
1154
1155        /* Stand our ground if a datagram is on the wire */
1156        if (nl->connection != PLIP_CN_NONE) {
1157                nl->should_relinquish = 1;
1158                return 1;
1159        }
1160
1161        nl->port_owner = 0;     /* Remember that we released the bus */
1162        return 0;
1163}
1164
1165static void
1166plip_wakeup(void *handle)
1167{
1168        struct net_device *dev = (struct net_device *)handle;
1169        struct net_local *nl = netdev_priv(dev);
1170
1171        if (nl->port_owner) {
1172                /* Why are we being woken up? */
1173                printk(KERN_DEBUG "%s: why am I being woken up?\n", dev->name);
1174                if (!parport_claim(nl->pardev))
1175                        /* bus_owner is already set (but why?) */
1176                        printk(KERN_DEBUG "%s: I'm broken.\n", dev->name);
1177                else
1178                        return;
1179        }
1180
1181        if (!(dev->flags & IFF_UP))
1182                /* Don't need the port when the interface is down */
1183                return;
1184
1185        if (!parport_claim(nl->pardev)) {
1186                nl->port_owner = 1;
1187                /* Clear the data port. */
1188                write_data (dev, 0x00);
1189        }
1190
1191        return;
1192}
1193
1194static int
1195plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1196{
1197        struct net_local *nl = netdev_priv(dev);
1198        struct plipconf *pc = (struct plipconf *) &rq->ifr_ifru;
1199
1200        if (cmd != SIOCDEVPLIP)
1201                return -EOPNOTSUPP;
1202
1203        switch(pc->pcmd) {
1204        case PLIP_GET_TIMEOUT:
1205                pc->trigger = nl->trigger;
1206                pc->nibble  = nl->nibble;
1207                break;
1208        case PLIP_SET_TIMEOUT:
1209                if(!capable(CAP_NET_ADMIN))
1210                        return -EPERM;
1211                nl->trigger = pc->trigger;
1212                nl->nibble  = pc->nibble;
1213                break;
1214        default:
1215                return -EOPNOTSUPP;
1216        }
1217        return 0;
1218}
1219
1220static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 };
1221static int timid;
1222
1223module_param_array(parport, int, NULL, 0);
1224module_param(timid, int, 0);
1225MODULE_PARM_DESC(parport, "List of parport device numbers to use by plip");
1226
1227static struct net_device *dev_plip[PLIP_MAX] = { NULL, };
1228
1229static inline int
1230plip_searchfor(int list[], int a)
1231{
1232        int i;
1233        for (i = 0; i < PLIP_MAX && list[i] != -1; i++) {
1234                if (list[i] == a) return 1;
1235        }
1236        return 0;
1237}
1238
1239/* plip_attach() is called (by the parport code) when a port is
1240 * available to use. */
1241static void plip_attach (struct parport *port)
1242{
1243        static int unit;
1244        struct net_device *dev;
1245        struct net_local *nl;
1246        char name[IFNAMSIZ];
1247
1248        if ((parport[0] == -1 && (!timid || !port->devices)) ||
1249            plip_searchfor(parport, port->number)) {
1250                if (unit == PLIP_MAX) {
1251                        printk(KERN_ERR "plip: too many devices\n");
1252                        return;
1253                }
1254
1255                sprintf(name, "plip%d", unit);
1256                dev = alloc_etherdev(sizeof(struct net_local));
1257                if (!dev) {
1258                        printk(KERN_ERR "plip: memory squeeze\n");
1259                        return;
1260                }
1261
1262                strcpy(dev->name, name);
1263
1264                dev->irq = port->irq;
1265                dev->base_addr = port->base;
1266                if (port->irq == -1) {
1267                        printk(KERN_INFO "plip: %s has no IRQ. Using IRQ-less mode,"
1268                                 "which is fairly inefficient!\n", port->name);
1269                }
1270
1271                nl = netdev_priv(dev);
1272                nl->dev = dev;
1273                nl->pardev = parport_register_device(port, dev->name, plip_preempt,
1274                                                 plip_wakeup, plip_interrupt,
1275                                                 0, dev);
1276
1277                if (!nl->pardev) {
1278                        printk(KERN_ERR "%s: parport_register failed\n", name);
1279                        goto err_free_dev;
1280                        return;
1281                }
1282
1283                plip_init_netdev(dev);
1284
1285                if (register_netdev(dev)) {
1286                        printk(KERN_ERR "%s: network register failed\n", name);
1287                        goto err_parport_unregister;
1288                }
1289
1290                printk(KERN_INFO "%s", version);
1291                if (dev->irq != -1)
1292                        printk(KERN_INFO "%s: Parallel port at %#3lx, "
1293                                         "using IRQ %d.\n",
1294                                         dev->name, dev->base_addr, dev->irq);
1295                else
1296                        printk(KERN_INFO "%s: Parallel port at %#3lx, "
1297                                         "not using IRQ.\n",
1298                                         dev->name, dev->base_addr);
1299                dev_plip[unit++] = dev;
1300        }
1301        return;
1302
1303err_parport_unregister:
1304        parport_unregister_device(nl->pardev);
1305err_free_dev:
1306        free_netdev(dev);
1307        return;
1308}
1309
1310/* plip_detach() is called (by the parport code) when a port is
1311 * no longer available to use. */
1312static void plip_detach (struct parport *port)
1313{
1314        /* Nothing to do */
1315}
1316
1317static struct parport_driver plip_driver = {
1318        .name   = "plip",
1319        .attach = plip_attach,
1320        .detach = plip_detach
1321};
1322
1323static void __exit plip_cleanup_module (void)
1324{
1325        struct net_device *dev;
1326        int i;
1327
1328        parport_unregister_driver (&plip_driver);
1329
1330        for (i=0; i < PLIP_MAX; i++) {
1331                if ((dev = dev_plip[i])) {
1332                        struct net_local *nl = netdev_priv(dev);
1333                        unregister_netdev(dev);
1334                        if (nl->port_owner)
1335                                parport_release(nl->pardev);
1336                        parport_unregister_device(nl->pardev);
1337                        free_netdev(dev);
1338                        dev_plip[i] = NULL;
1339                }
1340        }
1341}
1342
1343#ifndef MODULE
1344
1345static int parport_ptr;
1346
1347static int __init plip_setup(char *str)
1348{
1349        int ints[4];
1350
1351        str = get_options(str, ARRAY_SIZE(ints), ints);
1352
1353        /* Ugh. */
1354        if (!strncmp(str, "parport", 7)) {
1355                int n = simple_strtoul(str+7, NULL, 10);
1356                if (parport_ptr < PLIP_MAX)
1357                        parport[parport_ptr++] = n;
1358                else
1359                        printk(KERN_INFO "plip: too many ports, %s ignored.\n",
1360                               str);
1361        } else if (!strcmp(str, "timid")) {
1362                timid = 1;
1363        } else {
1364                if (ints[0] == 0 || ints[1] == 0) {
1365                        /* disable driver on "plip=" or "plip=0" */
1366                        parport[0] = -2;
1367                } else {
1368                        printk(KERN_WARNING "warning: 'plip=0x%x' ignored\n",
1369                               ints[1]);
1370                }
1371        }
1372        return 1;
1373}
1374
1375__setup("plip=", plip_setup);
1376
1377#endif /* !MODULE */
1378
1379static int __init plip_init (void)
1380{
1381        if (parport[0] == -2)
1382                return 0;
1383
1384        if (parport[0] != -1 && timid) {
1385                printk(KERN_WARNING "plip: warning, ignoring `timid' since specific ports given.\n");
1386                timid = 0;
1387        }
1388
1389        if (parport_register_driver (&plip_driver)) {
1390                printk (KERN_WARNING "plip: couldn't register driver\n");
1391                return 1;
1392        }
1393
1394        return 0;
1395}
1396
1397module_init(plip_init);
1398module_exit(plip_cleanup_module);
1399MODULE_LICENSE("GPL");
1400
1401/*
1402 * Local variables:
1403 * compile-command: "gcc -DMODULE -DMODVERSIONS -D__KERNEL__ -Wall -Wstrict-prototypes -O2 -g -fomit-frame-pointer -pipe -c plip.c"
1404 * End:
1405 */
1406
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.