linux/drivers/atm/nicstar.c
<<
>>
Prefs
   1/*
   2 * nicstar.c
   3 *
   4 * Device driver supporting CBR for IDT 77201/77211 "NICStAR" based cards.
   5 *
   6 * IMPORTANT: The included file nicstarmac.c was NOT WRITTEN BY ME.
   7 *            It was taken from the frle-0.22 device driver.
   8 *            As the file doesn't have a copyright notice, in the file
   9 *            nicstarmac.copyright I put the copyright notice from the
  10 *            frle-0.22 device driver.
  11 *            Some code is based on the nicstar driver by M. Welsh.
  12 *
  13 * Author: Rui Prior (rprior@inescn.pt)
  14 * PowerPC support by Jay Talbott (jay_talbott@mcg.mot.com) April 1999
  15 *
  16 *
  17 * (C) INESC 1999
  18 */
  19
  20/*
  21 * IMPORTANT INFORMATION
  22 *
  23 * There are currently three types of spinlocks:
  24 *
  25 * 1 - Per card interrupt spinlock (to protect structures and such)
  26 * 2 - Per SCQ scq spinlock
  27 * 3 - Per card resource spinlock (to access registers, etc.)
  28 *
  29 * These must NEVER be grabbed in reverse order.
  30 *
  31 */
  32
  33/* Header files */
  34
  35#include <linux/module.h>
  36#include <linux/kernel.h>
  37#include <linux/skbuff.h>
  38#include <linux/atmdev.h>
  39#include <linux/atm.h>
  40#include <linux/pci.h>
  41#include <linux/dma-mapping.h>
  42#include <linux/types.h>
  43#include <linux/string.h>
  44#include <linux/delay.h>
  45#include <linux/init.h>
  46#include <linux/sched.h>
  47#include <linux/timer.h>
  48#include <linux/interrupt.h>
  49#include <linux/bitops.h>
  50#include <linux/slab.h>
  51#include <linux/idr.h>
  52#include <asm/io.h>
  53#include <asm/uaccess.h>
  54#include <linux/atomic.h>
  55#include "nicstar.h"
  56#ifdef CONFIG_ATM_NICSTAR_USE_SUNI
  57#include "suni.h"
  58#endif /* CONFIG_ATM_NICSTAR_USE_SUNI */
  59#ifdef CONFIG_ATM_NICSTAR_USE_IDT77105
  60#include "idt77105.h"
  61#endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */
  62
  63/* Additional code */
  64
  65#include "nicstarmac.c"
  66
  67/* Configurable parameters */
  68
  69#undef PHY_LOOPBACK
  70#undef TX_DEBUG
  71#undef RX_DEBUG
  72#undef GENERAL_DEBUG
  73#undef EXTRA_DEBUG
  74
  75#undef NS_USE_DESTRUCTORS       /* For now keep this undefined unless you know
  76                                   you're going to use only raw ATM */
  77
  78/* Do not touch these */
  79
  80#ifdef TX_DEBUG
  81#define TXPRINTK(args...) printk(args)
  82#else
  83#define TXPRINTK(args...)
  84#endif /* TX_DEBUG */
  85
  86#ifdef RX_DEBUG
  87#define RXPRINTK(args...) printk(args)
  88#else
  89#define RXPRINTK(args...)
  90#endif /* RX_DEBUG */
  91
  92#ifdef GENERAL_DEBUG
  93#define PRINTK(args...) printk(args)
  94#else
  95#define PRINTK(args...)
  96#endif /* GENERAL_DEBUG */
  97
  98#ifdef EXTRA_DEBUG
  99#define XPRINTK(args...) printk(args)
 100#else
 101#define XPRINTK(args...)
 102#endif /* EXTRA_DEBUG */
 103
 104/* Macros */
 105
 106#define CMD_BUSY(card) (readl((card)->membase + STAT) & NS_STAT_CMDBZ)
 107
 108#define NS_DELAY mdelay(1)
 109
 110#define PTR_DIFF(a, b)  ((u32)((unsigned long)(a) - (unsigned long)(b)))
 111
 112#ifndef ATM_SKB
 113#define ATM_SKB(s) (&(s)->atm)
 114#endif
 115
 116#define scq_virt_to_bus(scq, p) \
 117                (scq->dma + ((unsigned long)(p) - (unsigned long)(scq)->org))
 118
 119/* Function declarations */
 120
 121static u32 ns_read_sram(ns_dev * card, u32 sram_address);
 122static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value,
 123                          int count);
 124static int ns_init_card(int i, struct pci_dev *pcidev);
 125static void ns_init_card_error(ns_dev * card, int error);
 126static scq_info *get_scq(ns_dev *card, int size, u32 scd);
 127static void free_scq(ns_dev *card, scq_info * scq, struct atm_vcc *vcc);
 128static void push_rxbufs(ns_dev *, struct sk_buff *);
 129static irqreturn_t ns_irq_handler(int irq, void *dev_id);
 130static int ns_open(struct atm_vcc *vcc);
 131static void ns_close(struct atm_vcc *vcc);
 132static void fill_tst(ns_dev * card, int n, vc_map * vc);
 133static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb);
 134static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
 135                     struct sk_buff *skb);
 136static void process_tsq(ns_dev * card);
 137static void drain_scq(ns_dev * card, scq_info * scq, int pos);
 138static void process_rsq(ns_dev * card);
 139static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe);
 140#ifdef NS_USE_DESTRUCTORS
 141static void ns_sb_destructor(struct sk_buff *sb);
 142static void ns_lb_destructor(struct sk_buff *lb);
 143static void ns_hb_destructor(struct sk_buff *hb);
 144#endif /* NS_USE_DESTRUCTORS */
 145static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb);
 146static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count);
 147static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb);
 148static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb);
 149static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb);
 150static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page);
 151static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg);
 152#ifdef EXTRA_DEBUG
 153static void which_list(ns_dev * card, struct sk_buff *skb);
 154#endif
 155static void ns_poll(unsigned long arg);
 156static int ns_parse_mac(char *mac, unsigned char *esi);
 157static void ns_phy_put(struct atm_dev *dev, unsigned char value,
 158                       unsigned long addr);
 159static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr);
 160
 161/* Global variables */
 162
 163static struct ns_dev *cards[NS_MAX_CARDS];
 164static unsigned num_cards;
 165static struct atmdev_ops atm_ops = {
 166        .open = ns_open,
 167        .close = ns_close,
 168        .ioctl = ns_ioctl,
 169        .send = ns_send,
 170        .phy_put = ns_phy_put,
 171        .phy_get = ns_phy_get,
 172        .proc_read = ns_proc_read,
 173        .owner = THIS_MODULE,
 174};
 175
 176static struct timer_list ns_timer;
 177static char *mac[NS_MAX_CARDS];
 178module_param_array(mac, charp, NULL, 0);
 179MODULE_LICENSE("GPL");
 180
 181/* Functions */
 182
 183static int nicstar_init_one(struct pci_dev *pcidev,
 184                            const struct pci_device_id *ent)
 185{
 186        static int index = -1;
 187        unsigned int error;
 188
 189        index++;
 190        cards[index] = NULL;
 191
 192        error = ns_init_card(index, pcidev);
 193        if (error) {
 194                cards[index--] = NULL;  /* don't increment index */
 195                goto err_out;
 196        }
 197
 198        return 0;
 199err_out:
 200        return -ENODEV;
 201}
 202
 203static void nicstar_remove_one(struct pci_dev *pcidev)
 204{
 205        int i, j;
 206        ns_dev *card = pci_get_drvdata(pcidev);
 207        struct sk_buff *hb;
 208        struct sk_buff *iovb;
 209        struct sk_buff *lb;
 210        struct sk_buff *sb;
 211
 212        i = card->index;
 213
 214        if (cards[i] == NULL)
 215                return;
 216
 217        if (card->atmdev->phy && card->atmdev->phy->stop)
 218                card->atmdev->phy->stop(card->atmdev);
 219
 220        /* Stop everything */
 221        writel(0x00000000, card->membase + CFG);
 222
 223        /* De-register device */
 224        atm_dev_deregister(card->atmdev);
 225
 226        /* Disable PCI device */
 227        pci_disable_device(pcidev);
 228
 229        /* Free up resources */
 230        j = 0;
 231        PRINTK("nicstar%d: freeing %d huge buffers.\n", i, card->hbpool.count);
 232        while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) {
 233                dev_kfree_skb_any(hb);
 234                j++;
 235        }
 236        PRINTK("nicstar%d: %d huge buffers freed.\n", i, j);
 237        j = 0;
 238        PRINTK("nicstar%d: freeing %d iovec buffers.\n", i,
 239               card->iovpool.count);
 240        while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) {
 241                dev_kfree_skb_any(iovb);
 242                j++;
 243        }
 244        PRINTK("nicstar%d: %d iovec buffers freed.\n", i, j);
 245        while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL)
 246                dev_kfree_skb_any(lb);
 247        while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL)
 248                dev_kfree_skb_any(sb);
 249        free_scq(card, card->scq0, NULL);
 250        for (j = 0; j < NS_FRSCD_NUM; j++) {
 251                if (card->scd2vc[j] != NULL)
 252                        free_scq(card, card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc);
 253        }
 254        idr_destroy(&card->idr);
 255        pci_free_consistent(card->pcidev, NS_RSQSIZE + NS_RSQ_ALIGNMENT,
 256                            card->rsq.org, card->rsq.dma);
 257        pci_free_consistent(card->pcidev, NS_TSQSIZE + NS_TSQ_ALIGNMENT,
 258                            card->tsq.org, card->tsq.dma);
 259        free_irq(card->pcidev->irq, card);
 260        iounmap(card->membase);
 261        kfree(card);
 262}
 263
 264static struct pci_device_id nicstar_pci_tbl[] = {
 265        { PCI_VDEVICE(IDT, PCI_DEVICE_ID_IDT_IDT77201), 0 },
 266        {0,}                    /* terminate list */
 267};
 268
 269MODULE_DEVICE_TABLE(pci, nicstar_pci_tbl);
 270
 271static struct pci_driver nicstar_driver = {
 272        .name = "nicstar",
 273        .id_table = nicstar_pci_tbl,
 274        .probe = nicstar_init_one,
 275        .remove = nicstar_remove_one,
 276};
 277
 278static int __init nicstar_init(void)
 279{
 280        unsigned error = 0;     /* Initialized to remove compile warning */
 281
 282        XPRINTK("nicstar: nicstar_init() called.\n");
 283
 284        error = pci_register_driver(&nicstar_driver);
 285
 286        TXPRINTK("nicstar: TX debug enabled.\n");
 287        RXPRINTK("nicstar: RX debug enabled.\n");
 288        PRINTK("nicstar: General debug enabled.\n");
 289#ifdef PHY_LOOPBACK
 290        printk("nicstar: using PHY loopback.\n");
 291#endif /* PHY_LOOPBACK */
 292        XPRINTK("nicstar: nicstar_init() returned.\n");
 293
 294        if (!error) {
 295                init_timer(&ns_timer);
 296                ns_timer.expires = jiffies + NS_POLL_PERIOD;
 297                ns_timer.data = 0UL;
 298                ns_timer.function = ns_poll;
 299                add_timer(&ns_timer);
 300        }
 301
 302        return error;
 303}
 304
 305static void __exit nicstar_cleanup(void)
 306{
 307        XPRINTK("nicstar: nicstar_cleanup() called.\n");
 308
 309        del_timer(&ns_timer);
 310
 311        pci_unregister_driver(&nicstar_driver);
 312
 313        XPRINTK("nicstar: nicstar_cleanup() returned.\n");
 314}
 315
 316static u32 ns_read_sram(ns_dev * card, u32 sram_address)
 317{
 318        unsigned long flags;
 319        u32 data;
 320        sram_address <<= 2;
 321        sram_address &= 0x0007FFFC;     /* address must be dword aligned */
 322        sram_address |= 0x50000000;     /* SRAM read command */
 323        spin_lock_irqsave(&card->res_lock, flags);
 324        while (CMD_BUSY(card)) ;
 325        writel(sram_address, card->membase + CMD);
 326        while (CMD_BUSY(card)) ;
 327        data = readl(card->membase + DR0);
 328        spin_unlock_irqrestore(&card->res_lock, flags);
 329        return data;
 330}
 331
 332static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value,
 333                          int count)
 334{
 335        unsigned long flags;
 336        int i, c;
 337        count--;                /* count range now is 0..3 instead of 1..4 */
 338        c = count;
 339        c <<= 2;                /* to use increments of 4 */
 340        spin_lock_irqsave(&card->res_lock, flags);
 341        while (CMD_BUSY(card)) ;
 342        for (i = 0; i <= c; i += 4)
 343                writel(*(value++), card->membase + i);
 344        /* Note: DR# registers are the first 4 dwords in nicstar's memspace,
 345           so card->membase + DR0 == card->membase */
 346        sram_address <<= 2;
 347        sram_address &= 0x0007FFFC;
 348        sram_address |= (0x40000000 | count);
 349        writel(sram_address, card->membase + CMD);
 350        spin_unlock_irqrestore(&card->res_lock, flags);
 351}
 352
 353static int ns_init_card(int i, struct pci_dev *pcidev)
 354{
 355        int j;
 356        struct ns_dev *card = NULL;
 357        unsigned char pci_latency;
 358        unsigned error;
 359        u32 data;
 360        u32 u32d[4];
 361        u32 ns_cfg_rctsize;
 362        int bcount;
 363        unsigned long membase;
 364
 365        error = 0;
 366
 367        if (pci_enable_device(pcidev)) {
 368                printk("nicstar%d: can't enable PCI device\n", i);
 369                error = 2;
 370                ns_init_card_error(card, error);
 371                return error;
 372        }
 373        if ((pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)) != 0) ||
 374            (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)) != 0)) {
 375                printk(KERN_WARNING
 376                       "nicstar%d: No suitable DMA available.\n", i);
 377                error = 2;
 378                ns_init_card_error(card, error);
 379                return error;
 380        }
 381
 382        if ((card = kmalloc(sizeof(ns_dev), GFP_KERNEL)) == NULL) {
 383                printk
 384                    ("nicstar%d: can't allocate memory for device structure.\n",
 385                     i);
 386                error = 2;
 387                ns_init_card_error(card, error);
 388                return error;
 389        }
 390        cards[i] = card;
 391        spin_lock_init(&card->int_lock);
 392        spin_lock_init(&card->res_lock);
 393
 394        pci_set_drvdata(pcidev, card);
 395
 396        card->index = i;
 397        card->atmdev = NULL;
 398        card->pcidev = pcidev;
 399        membase = pci_resource_start(pcidev, 1);
 400        card->membase = ioremap(membase, NS_IOREMAP_SIZE);
 401        if (!card->membase) {
 402                printk("nicstar%d: can't ioremap() membase.\n", i);
 403                error = 3;
 404                ns_init_card_error(card, error);
 405                return error;
 406        }
 407        PRINTK("nicstar%d: membase at 0x%p.\n", i, card->membase);
 408
 409        pci_set_master(pcidev);
 410
 411        if (pci_read_config_byte(pcidev, PCI_LATENCY_TIMER, &pci_latency) != 0) {
 412                printk("nicstar%d: can't read PCI latency timer.\n", i);
 413                error = 6;
 414                ns_init_card_error(card, error);
 415                return error;
 416        }
 417#ifdef NS_PCI_LATENCY
 418        if (pci_latency < NS_PCI_LATENCY) {
 419                PRINTK("nicstar%d: setting PCI latency timer to %d.\n", i,
 420                       NS_PCI_LATENCY);
 421                for (j = 1; j < 4; j++) {
 422                        if (pci_write_config_byte
 423                            (pcidev, PCI_LATENCY_TIMER, NS_PCI_LATENCY) != 0)
 424                                break;
 425                }
 426                if (j == 4) {
 427                        printk
 428                            ("nicstar%d: can't set PCI latency timer to %d.\n",
 429                             i, NS_PCI_LATENCY);
 430                        error = 7;
 431                        ns_init_card_error(card, error);
 432                        return error;
 433                }
 434        }
 435#endif /* NS_PCI_LATENCY */
 436
 437        /* Clear timer overflow */
 438        data = readl(card->membase + STAT);
 439        if (data & NS_STAT_TMROF)
 440                writel(NS_STAT_TMROF, card->membase + STAT);
 441
 442        /* Software reset */
 443        writel(NS_CFG_SWRST, card->membase + CFG);
 444        NS_DELAY;
 445        writel(0x00000000, card->membase + CFG);
 446
 447        /* PHY reset */
 448        writel(0x00000008, card->membase + GP);
 449        NS_DELAY;
 450        writel(0x00000001, card->membase + GP);
 451        NS_DELAY;
 452        while (CMD_BUSY(card)) ;
 453        writel(NS_CMD_WRITE_UTILITY | 0x00000100, card->membase + CMD); /* Sync UTOPIA with SAR clock */
 454        NS_DELAY;
 455
 456        /* Detect PHY type */
 457        while (CMD_BUSY(card)) ;
 458        writel(NS_CMD_READ_UTILITY | 0x00000200, card->membase + CMD);
 459        while (CMD_BUSY(card)) ;
 460        data = readl(card->membase + DR0);
 461        switch (data) {
 462        case 0x00000009:
 463                printk("nicstar%d: PHY seems to be 25 Mbps.\n", i);
 464                card->max_pcr = ATM_25_PCR;
 465                while (CMD_BUSY(card)) ;
 466                writel(0x00000008, card->membase + DR0);
 467                writel(NS_CMD_WRITE_UTILITY | 0x00000200, card->membase + CMD);
 468                /* Clear an eventual pending interrupt */
 469                writel(NS_STAT_SFBQF, card->membase + STAT);
 470#ifdef PHY_LOOPBACK
 471                while (CMD_BUSY(card)) ;
 472                writel(0x00000022, card->membase + DR0);
 473                writel(NS_CMD_WRITE_UTILITY | 0x00000202, card->membase + CMD);
 474#endif /* PHY_LOOPBACK */
 475                break;
 476        case 0x00000030:
 477        case 0x00000031:
 478                printk("nicstar%d: PHY seems to be 155 Mbps.\n", i);
 479                card->max_pcr = ATM_OC3_PCR;
 480#ifdef PHY_LOOPBACK
 481                while (CMD_BUSY(card)) ;
 482                writel(0x00000002, card->membase + DR0);
 483                writel(NS_CMD_WRITE_UTILITY | 0x00000205, card->membase + CMD);
 484#endif /* PHY_LOOPBACK */
 485                break;
 486        default:
 487                printk("nicstar%d: unknown PHY type (0x%08X).\n", i, data);
 488                error = 8;
 489                ns_init_card_error(card, error);
 490                return error;
 491        }
 492        writel(0x00000000, card->membase + GP);
 493
 494        /* Determine SRAM size */
 495        data = 0x76543210;
 496        ns_write_sram(card, 0x1C003, &data, 1);
 497        data = 0x89ABCDEF;
 498        ns_write_sram(card, 0x14003, &data, 1);
 499        if (ns_read_sram(card, 0x14003) == 0x89ABCDEF &&
 500            ns_read_sram(card, 0x1C003) == 0x76543210)
 501                card->sram_size = 128;
 502        else
 503                card->sram_size = 32;
 504        PRINTK("nicstar%d: %dK x 32bit SRAM size.\n", i, card->sram_size);
 505
 506        card->rct_size = NS_MAX_RCTSIZE;
 507
 508#if (NS_MAX_RCTSIZE == 4096)
 509        if (card->sram_size == 128)
 510                printk
 511                    ("nicstar%d: limiting maximum VCI. See NS_MAX_RCTSIZE in nicstar.h\n",
 512                     i);
 513#elif (NS_MAX_RCTSIZE == 16384)
 514        if (card->sram_size == 32) {
 515                printk
 516                    ("nicstar%d: wasting memory. See NS_MAX_RCTSIZE in nicstar.h\n",
 517                     i);
 518                card->rct_size = 4096;
 519        }
 520#else
 521#error NS_MAX_RCTSIZE must be either 4096 or 16384 in nicstar.c
 522#endif
 523
 524        card->vpibits = NS_VPIBITS;
 525        if (card->rct_size == 4096)
 526                card->vcibits = 12 - NS_VPIBITS;
 527        else                    /* card->rct_size == 16384 */
 528                card->vcibits = 14 - NS_VPIBITS;
 529
 530        /* Initialize the nicstar eeprom/eprom stuff, for the MAC addr */
 531        if (mac[i] == NULL)
 532                nicstar_init_eprom(card->membase);
 533
 534        /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */
 535        writel(0x00000000, card->membase + VPM);
 536
 537        /* Initialize TSQ */
 538        card->tsq.org = pci_alloc_consistent(card->pcidev,
 539                                             NS_TSQSIZE + NS_TSQ_ALIGNMENT,
 540                                             &card->tsq.dma);
 541        if (card->tsq.org == NULL) {
 542                printk("nicstar%d: can't allocate TSQ.\n", i);
 543                error = 10;
 544                ns_init_card_error(card, error);
 545                return error;
 546        }
 547        card->tsq.base = PTR_ALIGN(card->tsq.org, NS_TSQ_ALIGNMENT);
 548        card->tsq.next = card->tsq.base;
 549        card->tsq.last = card->tsq.base + (NS_TSQ_NUM_ENTRIES - 1);
 550        for (j = 0; j < NS_TSQ_NUM_ENTRIES; j++)
 551                ns_tsi_init(card->tsq.base + j);
 552        writel(0x00000000, card->membase + TSQH);
 553        writel(ALIGN(card->tsq.dma, NS_TSQ_ALIGNMENT), card->membase + TSQB);
 554        PRINTK("nicstar%d: TSQ base at 0x%p.\n", i, card->tsq.base);
 555
 556        /* Initialize RSQ */
 557        card->rsq.org = pci_alloc_consistent(card->pcidev,
 558                                             NS_RSQSIZE + NS_RSQ_ALIGNMENT,
 559                                             &card->rsq.dma);
 560        if (card->rsq.org == NULL) {
 561                printk("nicstar%d: can't allocate RSQ.\n", i);
 562                error = 11;
 563                ns_init_card_error(card, error);
 564                return error;
 565        }
 566        card->rsq.base = PTR_ALIGN(card->rsq.org, NS_RSQ_ALIGNMENT);
 567        card->rsq.next = card->rsq.base;
 568        card->rsq.last = card->rsq.base + (NS_RSQ_NUM_ENTRIES - 1);
 569        for (j = 0; j < NS_RSQ_NUM_ENTRIES; j++)
 570                ns_rsqe_init(card->rsq.base + j);
 571        writel(0x00000000, card->membase + RSQH);
 572        writel(ALIGN(card->rsq.dma, NS_RSQ_ALIGNMENT), card->membase + RSQB);
 573        PRINTK("nicstar%d: RSQ base at 0x%p.\n", i, card->rsq.base);
 574
 575        /* Initialize SCQ0, the only VBR SCQ used */
 576        card->scq1 = NULL;
 577        card->scq2 = NULL;
 578        card->scq0 = get_scq(card, VBR_SCQSIZE, NS_VRSCD0);
 579        if (card->scq0 == NULL) {
 580                printk("nicstar%d: can't get SCQ0.\n", i);
 581                error = 12;
 582                ns_init_card_error(card, error);
 583                return error;
 584        }
 585        u32d[0] = scq_virt_to_bus(card->scq0, card->scq0->base);
 586        u32d[1] = (u32) 0x00000000;
 587        u32d[2] = (u32) 0xffffffff;
 588        u32d[3] = (u32) 0x00000000;
 589        ns_write_sram(card, NS_VRSCD0, u32d, 4);
 590        ns_write_sram(card, NS_VRSCD1, u32d, 4);        /* These last two won't be used */
 591        ns_write_sram(card, NS_VRSCD2, u32d, 4);        /* but are initialized, just in case... */
 592        card->scq0->scd = NS_VRSCD0;
 593        PRINTK("nicstar%d: VBR-SCQ0 base at 0x%p.\n", i, card->scq0->base);
 594
 595        /* Initialize TSTs */
 596        card->tst_addr = NS_TST0;
 597        card->tst_free_entries = NS_TST_NUM_ENTRIES;
 598        data = NS_TST_OPCODE_VARIABLE;
 599        for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
 600                ns_write_sram(card, NS_TST0 + j, &data, 1);
 601        data = ns_tste_make(NS_TST_OPCODE_END, NS_TST0);
 602        ns_write_sram(card, NS_TST0 + NS_TST_NUM_ENTRIES, &data, 1);
 603        for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
 604                ns_write_sram(card, NS_TST1 + j, &data, 1);
 605        data = ns_tste_make(NS_TST_OPCODE_END, NS_TST1);
 606        ns_write_sram(card, NS_TST1 + NS_TST_NUM_ENTRIES, &data, 1);
 607        for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
 608                card->tste2vc[j] = NULL;
 609        writel(NS_TST0 << 2, card->membase + TSTB);
 610
 611        /* Initialize RCT. AAL type is set on opening the VC. */
 612#ifdef RCQ_SUPPORT
 613        u32d[0] = NS_RCTE_RAWCELLINTEN;
 614#else
 615        u32d[0] = 0x00000000;
 616#endif /* RCQ_SUPPORT */
 617        u32d[1] = 0x00000000;
 618        u32d[2] = 0x00000000;
 619        u32d[3] = 0xFFFFFFFF;
 620        for (j = 0; j < card->rct_size; j++)
 621                ns_write_sram(card, j * 4, u32d, 4);
 622
 623        memset(card->vcmap, 0, NS_MAX_RCTSIZE * sizeof(vc_map));
 624
 625        for (j = 0; j < NS_FRSCD_NUM; j++)
 626                card->scd2vc[j] = NULL;
 627
 628        /* Initialize buffer levels */
 629        card->sbnr.min = MIN_SB;
 630        card->sbnr.init = NUM_SB;
 631        card->sbnr.max = MAX_SB;
 632        card->lbnr.min = MIN_LB;
 633        card->lbnr.init = NUM_LB;
 634        card->lbnr.max = MAX_LB;
 635        card->iovnr.min = MIN_IOVB;
 636        card->iovnr.init = NUM_IOVB;
 637        card->iovnr.max = MAX_IOVB;
 638        card->hbnr.min = MIN_HB;
 639        card->hbnr.init = NUM_HB;
 640        card->hbnr.max = MAX_HB;
 641
 642        card->sm_handle = 0x00000000;
 643        card->sm_addr = 0x00000000;
 644        card->lg_handle = 0x00000000;
 645        card->lg_addr = 0x00000000;
 646
 647        card->efbie = 1;        /* To prevent push_rxbufs from enabling the interrupt */
 648
 649        idr_init(&card->idr);
 650
 651        /* Pre-allocate some huge buffers */
 652        skb_queue_head_init(&card->hbpool.queue);
 653        card->hbpool.count = 0;
 654        for (j = 0; j < NUM_HB; j++) {
 655                struct sk_buff *hb;
 656                hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
 657                if (hb == NULL) {
 658                        printk
 659                            ("nicstar%d: can't allocate %dth of %d huge buffers.\n",
 660                             i, j, NUM_HB);
 661                        error = 13;
 662                        ns_init_card_error(card, error);
 663                        return error;
 664                }
 665                NS_PRV_BUFTYPE(hb) = BUF_NONE;
 666                skb_queue_tail(&card->hbpool.queue, hb);
 667                card->hbpool.count++;
 668        }
 669
 670        /* Allocate large buffers */
 671        skb_queue_head_init(&card->lbpool.queue);
 672        card->lbpool.count = 0; /* Not used */
 673        for (j = 0; j < NUM_LB; j++) {
 674                struct sk_buff *lb;
 675                lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
 676                if (lb == NULL) {
 677                        printk
 678                            ("nicstar%d: can't allocate %dth of %d large buffers.\n",
 679                             i, j, NUM_LB);
 680                        error = 14;
 681                        ns_init_card_error(card, error);
 682                        return error;
 683                }
 684                NS_PRV_BUFTYPE(lb) = BUF_LG;
 685                skb_queue_tail(&card->lbpool.queue, lb);
 686                skb_reserve(lb, NS_SMBUFSIZE);
 687                push_rxbufs(card, lb);
 688                /* Due to the implementation of push_rxbufs() this is 1, not 0 */
 689                if (j == 1) {
 690                        card->rcbuf = lb;
 691                        card->rawcell = (struct ns_rcqe *) lb->data;
 692                        card->rawch = NS_PRV_DMA(lb);
 693                }
 694        }
 695        /* Test for strange behaviour which leads to crashes */
 696        if ((bcount =
 697             ns_stat_lfbqc_get(readl(card->membase + STAT))) < card->lbnr.min) {
 698                printk
 699                    ("nicstar%d: Strange... Just allocated %d large buffers and lfbqc = %d.\n",
 700                     i, j, bcount);
 701                error = 14;
 702                ns_init_card_error(card, error);
 703                return error;
 704        }
 705
 706        /* Allocate small buffers */
 707        skb_queue_head_init(&card->sbpool.queue);
 708        card->sbpool.count = 0; /* Not used */
 709        for (j = 0; j < NUM_SB; j++) {
 710                struct sk_buff *sb;
 711                sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
 712                if (sb == NULL) {
 713                        printk
 714                            ("nicstar%d: can't allocate %dth of %d small buffers.\n",
 715                             i, j, NUM_SB);
 716                        error = 15;
 717                        ns_init_card_error(card, error);
 718                        return error;
 719                }
 720                NS_PRV_BUFTYPE(sb) = BUF_SM;
 721                skb_queue_tail(&card->sbpool.queue, sb);
 722                skb_reserve(sb, NS_AAL0_HEADER);
 723                push_rxbufs(card, sb);
 724        }
 725        /* Test for strange behaviour which leads to crashes */
 726        if ((bcount =
 727             ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min) {
 728                printk
 729                    ("nicstar%d: Strange... Just allocated %d small buffers and sfbqc = %d.\n",
 730                     i, j, bcount);
 731                error = 15;
 732                ns_init_card_error(card, error);
 733                return error;
 734        }
 735
 736        /* Allocate iovec buffers */
 737        skb_queue_head_init(&card->iovpool.queue);
 738        card->iovpool.count = 0;
 739        for (j = 0; j < NUM_IOVB; j++) {
 740                struct sk_buff *iovb;
 741                iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
 742                if (iovb == NULL) {
 743                        printk
 744                            ("nicstar%d: can't allocate %dth of %d iovec buffers.\n",
 745                             i, j, NUM_IOVB);
 746                        error = 16;
 747                        ns_init_card_error(card, error);
 748                        return error;
 749                }
 750                NS_PRV_BUFTYPE(iovb) = BUF_NONE;
 751                skb_queue_tail(&card->iovpool.queue, iovb);
 752                card->iovpool.count++;
 753        }
 754
 755        /* Configure NICStAR */
 756        if (card->rct_size == 4096)
 757                ns_cfg_rctsize = NS_CFG_RCTSIZE_4096_ENTRIES;
 758        else                    /* (card->rct_size == 16384) */
 759                ns_cfg_rctsize = NS_CFG_RCTSIZE_16384_ENTRIES;
 760
 761        card->efbie = 1;
 762
 763        card->intcnt = 0;
 764        if (request_irq
 765            (pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) {
 766                printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
 767                error = 9;
 768                ns_init_card_error(card, error);
 769                return error;
 770        }
 771
 772        /* Register device */
 773        card->atmdev = atm_dev_register("nicstar", &card->pcidev->dev, &atm_ops,
 774                                        -1, NULL);
 775        if (card->atmdev == NULL) {
 776                printk("nicstar%d: can't register device.\n", i);
 777                error = 17;
 778                ns_init_card_error(card, error);
 779                return error;
 780        }
 781
 782        if (ns_parse_mac(mac[i], card->atmdev->esi)) {
 783                nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET,
 784                                   card->atmdev->esi, 6);
 785                if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) ==
 786                    0) {
 787                        nicstar_read_eprom(card->membase,
 788                                           NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT,
 789                                           card->atmdev->esi, 6);
 790                }
 791        }
 792
 793        printk("nicstar%d: MAC address %pM\n", i, card->atmdev->esi);
 794
 795        card->atmdev->dev_data = card;
 796        card->atmdev->ci_range.vpi_bits = card->vpibits;
 797        card->atmdev->ci_range.vci_bits = card->vcibits;
 798        card->atmdev->link_rate = card->max_pcr;
 799        card->atmdev->phy = NULL;
 800
 801#ifdef CONFIG_ATM_NICSTAR_USE_SUNI
 802        if (card->max_pcr == ATM_OC3_PCR)
 803                suni_init(card->atmdev);
 804#endif /* CONFIG_ATM_NICSTAR_USE_SUNI */
 805
 806#ifdef CONFIG_ATM_NICSTAR_USE_IDT77105
 807        if (card->max_pcr == ATM_25_PCR)
 808                idt77105_init(card->atmdev);
 809#endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */
 810
 811        if (card->atmdev->phy && card->atmdev->phy->start)
 812                card->atmdev->phy->start(card->atmdev);
 813
 814        writel(NS_CFG_RXPATH | NS_CFG_SMBUFSIZE | NS_CFG_LGBUFSIZE | NS_CFG_EFBIE | NS_CFG_RSQSIZE | NS_CFG_VPIBITS | ns_cfg_rctsize | NS_CFG_RXINT_NODELAY | NS_CFG_RAWIE |    /* Only enabled if RCQ_SUPPORT */
 815               NS_CFG_RSQAFIE | NS_CFG_TXEN | NS_CFG_TXIE | NS_CFG_TSQFIE_OPT | /* Only enabled if ENABLE_TSQFIE */
 816               NS_CFG_PHYIE, card->membase + CFG);
 817
 818        num_cards++;
 819
 820        return error;
 821}
 822
 823static void ns_init_card_error(ns_dev *card, int error)
 824{
 825        if (error >= 17) {
 826                writel(0x00000000, card->membase + CFG);
 827        }
 828        if (error >= 16) {
 829                struct sk_buff *iovb;
 830                while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL)
 831                        dev_kfree_skb_any(iovb);
 832        }
 833        if (error >= 15) {
 834                struct sk_buff *sb;
 835                while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL)
 836                        dev_kfree_skb_any(sb);
 837                free_scq(card, card->scq0, NULL);
 838        }
 839        if (error >= 14) {
 840                struct sk_buff *lb;
 841                while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL)
 842                        dev_kfree_skb_any(lb);
 843        }
 844        if (error >= 13) {
 845                struct sk_buff *hb;
 846                while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL)
 847                        dev_kfree_skb_any(hb);
 848        }
 849        if (error >= 12) {
 850                kfree(card->rsq.org);
 851        }
 852        if (error >= 11) {
 853                kfree(card->tsq.org);
 854        }
 855        if (error >= 10) {
 856                free_irq(card->pcidev->irq, card);
 857        }
 858        if (error >= 4) {
 859                iounmap(card->membase);
 860        }
 861        if (error >= 3) {
 862                pci_disable_device(card->pcidev);
 863                kfree(card);
 864        }
 865}
 866
 867static scq_info *get_scq(ns_dev *card, int size, u32 scd)
 868{
 869        scq_info *scq;
 870        int i;
 871
 872        if (size != VBR_SCQSIZE && size != CBR_SCQSIZE)
 873                return NULL;
 874
 875        scq = kmalloc(sizeof(scq_info), GFP_KERNEL);
 876        if (!scq)
 877                return NULL;
 878        scq->org = pci_alloc_consistent(card->pcidev, 2 * size, &scq->dma);
 879        if (!scq->org) {
 880                kfree(scq);
 881                return NULL;
 882        }
 883        scq->skb = kmalloc(sizeof(struct sk_buff *) *
 884                           (size / NS_SCQE_SIZE), GFP_KERNEL);
 885        if (!scq->skb) {
 886                kfree(scq->org);
 887                kfree(scq);
 888                return NULL;
 889        }
 890        scq->num_entries = size / NS_SCQE_SIZE;
 891        scq->base = PTR_ALIGN(scq->org, size);
 892        scq->next = scq->base;
 893        scq->last = scq->base + (scq->num_entries - 1);
 894        scq->tail = scq->last;
 895        scq->scd = scd;
 896        scq->num_entries = size / NS_SCQE_SIZE;
 897        scq->tbd_count = 0;
 898        init_waitqueue_head(&scq->scqfull_waitq);
 899        scq->full = 0;
 900        spin_lock_init(&scq->lock);
 901
 902        for (i = 0; i < scq->num_entries; i++)
 903                scq->skb[i] = NULL;
 904
 905        return scq;
 906}
 907
 908/* For variable rate SCQ vcc must be NULL */
 909static void free_scq(ns_dev *card, scq_info *scq, struct atm_vcc *vcc)
 910{
 911        int i;
 912
 913        if (scq->num_entries == VBR_SCQ_NUM_ENTRIES)
 914                for (i = 0; i < scq->num_entries; i++) {
 915                        if (scq->skb[i] != NULL) {
 916                                vcc = ATM_SKB(scq->skb[i])->vcc;
 917                                if (vcc->pop != NULL)
 918                                        vcc->pop(vcc, scq->skb[i]);
 919                                else
 920                                        dev_kfree_skb_any(scq->skb[i]);
 921                        }
 922        } else {                /* vcc must be != NULL */
 923
 924                if (vcc == NULL) {
 925                        printk
 926                            ("nicstar: free_scq() called with vcc == NULL for fixed rate scq.");
 927                        for (i = 0; i < scq->num_entries; i++)
 928                                dev_kfree_skb_any(scq->skb[i]);
 929                } else
 930                        for (i = 0; i < scq->num_entries; i++) {
 931                                if (scq->skb[i] != NULL) {
 932                                        if (vcc->pop != NULL)
 933                                                vcc->pop(vcc, scq->skb[i]);
 934                                        else
 935                                                dev_kfree_skb_any(scq->skb[i]);
 936                                }
 937                        }
 938        }
 939        kfree(scq->skb);
 940        pci_free_consistent(card->pcidev,
 941                            2 * (scq->num_entries == VBR_SCQ_NUM_ENTRIES ?
 942                                 VBR_SCQSIZE : CBR_SCQSIZE),
 943                            scq->org, scq->dma);
 944        kfree(scq);
 945}
 946
 947/* The handles passed must be pointers to the sk_buff containing the small
 948   or large buffer(s) cast to u32. */
 949static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
 950{
 951        struct sk_buff *handle1, *handle2;
 952        int id1, id2;
 953        u32 addr1, addr2;
 954        u32 stat;
 955        unsigned long flags;
 956
 957        /* *BARF* */
 958        handle2 = NULL;
 959        addr2 = 0;
 960        handle1 = skb;
 961        addr1 = pci_map_single(card->pcidev,
 962                               skb->data,
 963                               (NS_PRV_BUFTYPE(skb) == BUF_SM
 964                                ? NS_SMSKBSIZE : NS_LGSKBSIZE),
 965                               PCI_DMA_TODEVICE);
 966        NS_PRV_DMA(skb) = addr1; /* save so we can unmap later */
 967
 968#ifdef GENERAL_DEBUG
 969        if (!addr1)
 970                printk("nicstar%d: push_rxbufs called with addr1 = 0.\n",
 971                       card->index);
 972#endif /* GENERAL_DEBUG */
 973
 974        stat = readl(card->membase + STAT);
 975        card->sbfqc = ns_stat_sfbqc_get(stat);
 976        card->lbfqc = ns_stat_lfbqc_get(stat);
 977        if (NS_PRV_BUFTYPE(skb) == BUF_SM) {
 978                if (!addr2) {
 979                        if (card->sm_addr) {
 980                                addr2 = card->sm_addr;
 981                                handle2 = card->sm_handle;
 982                                card->sm_addr = 0x00000000;
 983                                card->sm_handle = 0x00000000;
 984                        } else {        /* (!sm_addr) */
 985
 986                                card->sm_addr = addr1;
 987                                card->sm_handle = handle1;
 988                        }
 989                }
 990        } else {                /* buf_type == BUF_LG */
 991
 992                if (!addr2) {
 993                        if (card->lg_addr) {
 994                                addr2 = card->lg_addr;
 995                                handle2 = card->lg_handle;
 996                                card->lg_addr = 0x00000000;
 997                                card->lg_handle = 0x00000000;
 998                        } else {        /* (!lg_addr) */
 999
1000                                card->lg_addr = addr1;
1001                                card->lg_handle = handle1;
1002                        }
1003                }
1004        }
1005
1006        if (addr2) {
1007                if (NS_PRV_BUFTYPE(skb) == BUF_SM) {
1008                        if (card->sbfqc >= card->sbnr.max) {
1009                                skb_unlink(handle1, &card->sbpool.queue);
1010                                dev_kfree_skb_any(handle1);
1011                                skb_unlink(handle2, &card->sbpool.queue);
1012                                dev_kfree_skb_any(handle2);
1013                                return;
1014                        } else
1015                                card->sbfqc += 2;
1016                } else {        /* (buf_type == BUF_LG) */
1017
1018                        if (card->lbfqc >= card->lbnr.max) {
1019                                skb_unlink(handle1, &card->lbpool.queue);
1020                                dev_kfree_skb_any(handle1);
1021                                skb_unlink(handle2, &card->lbpool.queue);
1022                                dev_kfree_skb_any(handle2);
1023                                return;
1024                        } else
1025                                card->lbfqc += 2;
1026                }
1027
1028                id1 = idr_alloc(&card->idr, handle1, 0, 0, GFP_ATOMIC);
1029                if (id1 < 0)
1030                        goto out;
1031
1032                id2 = idr_alloc(&card->idr, handle2, 0, 0, GFP_ATOMIC);
1033                if (id2 < 0)
1034                        goto out;
1035
1036                spin_lock_irqsave(&card->res_lock, flags);
1037                while (CMD_BUSY(card)) ;
1038                writel(addr2, card->membase + DR3);
1039                writel(id2, card->membase + DR2);
1040                writel(addr1, card->membase + DR1);
1041                writel(id1, card->membase + DR0);
1042                writel(NS_CMD_WRITE_FREEBUFQ | NS_PRV_BUFTYPE(skb),
1043                       card->membase + CMD);
1044                spin_unlock_irqrestore(&card->res_lock, flags);
1045
1046                XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n",
1047                        card->index,
1048                        (NS_PRV_BUFTYPE(skb) == BUF_SM ? "small" : "large"),
1049                        addr1, addr2);
1050        }
1051
1052        if (!card->efbie && card->sbfqc >= card->sbnr.min &&
1053            card->lbfqc >= card->lbnr.min) {
1054                card->efbie = 1;
1055                writel((readl(card->membase + CFG) | NS_CFG_EFBIE),
1056                       card->membase + CFG);
1057        }
1058
1059out:
1060        return;
1061}
1062
1063static irqreturn_t ns_irq_handler(int irq, void *dev_id)
1064{
1065        u32 stat_r;
1066        ns_dev *card;
1067        struct atm_dev *dev;
1068        unsigned long flags;
1069
1070        card = (ns_dev *) dev_id;
1071        dev = card->atmdev;
1072        card->intcnt++;
1073
1074        PRINTK("nicstar%d: NICStAR generated an interrupt\n", card->index);
1075
1076        spin_lock_irqsave(&card->int_lock, flags);
1077
1078        stat_r = readl(card->membase + STAT);
1079
1080        /* Transmit Status Indicator has been written to T. S. Queue */
1081        if (stat_r & NS_STAT_TSIF) {
1082                TXPRINTK("nicstar%d: TSI interrupt\n", card->index);
1083                process_tsq(card);
1084                writel(NS_STAT_TSIF, card->membase + STAT);
1085        }
1086
1087        /* Incomplete CS-PDU has been transmitted */
1088        if (stat_r & NS_STAT_TXICP) {
1089                writel(NS_STAT_TXICP, card->membase + STAT);
1090                TXPRINTK("nicstar%d: Incomplete CS-PDU transmitted.\n",
1091                         card->index);
1092        }
1093
1094        /* Transmit Status Queue 7/8 full */
1095        if (stat_r & NS_STAT_TSQF) {
1096                writel(NS_STAT_TSQF, card->membase + STAT);
1097                PRINTK("nicstar%d: TSQ full.\n", card->index);
1098                process_tsq(card);
1099        }
1100
1101        /* Timer overflow */
1102        if (stat_r & NS_STAT_TMROF) {
1103                writel(NS_STAT_TMROF, card->membase + STAT);
1104                PRINTK("nicstar%d: Timer overflow.\n", card->index);
1105        }
1106
1107        /* PHY device interrupt signal active */
1108        if (stat_r & NS_STAT_PHYI) {
1109                writel(NS_STAT_PHYI, card->membase + STAT);
1110                PRINTK("nicstar%d: PHY interrupt.\n", card->index);
1111                if (dev->phy && dev->phy->interrupt) {
1112                        dev->phy->interrupt(dev);
1113                }
1114        }
1115
1116        /* Small Buffer Queue is full */
1117        if (stat_r & NS_STAT_SFBQF) {
1118                writel(NS_STAT_SFBQF, card->membase + STAT);
1119                printk("nicstar%d: Small free buffer queue is full.\n",
1120                       card->index);
1121        }
1122
1123        /* Large Buffer Queue is full */
1124        if (stat_r & NS_STAT_LFBQF) {
1125                writel(NS_STAT_LFBQF, card->membase + STAT);
1126                printk("nicstar%d: Large free buffer queue is full.\n",
1127                       card->index);
1128        }
1129
1130        /* Receive Status Queue is full */
1131        if (stat_r & NS_STAT_RSQF) {
1132                writel(NS_STAT_RSQF, card->membase + STAT);
1133                printk("nicstar%d: RSQ full.\n", card->index);
1134                process_rsq(card);
1135        }
1136
1137        /* Complete CS-PDU received */
1138        if (stat_r & NS_STAT_EOPDU) {
1139                RXPRINTK("nicstar%d: End of CS-PDU received.\n", card->index);
1140                process_rsq(card);
1141                writel(NS_STAT_EOPDU, card->membase + STAT);
1142        }
1143
1144        /* Raw cell received */
1145        if (stat_r & NS_STAT_RAWCF) {
1146                writel(NS_STAT_RAWCF, card->membase + STAT);
1147#ifndef RCQ_SUPPORT
1148                printk("nicstar%d: Raw cell received and no support yet...\n",
1149                       card->index);
1150#endif /* RCQ_SUPPORT */
1151                /* NOTE: the following procedure may keep a raw cell pending until the
1152                   next interrupt. As this preliminary support is only meant to
1153                   avoid buffer leakage, this is not an issue. */
1154                while (readl(card->membase + RAWCT) != card->rawch) {
1155
1156                        if (ns_rcqe_islast(card->rawcell)) {
1157                                struct sk_buff *oldbuf;
1158
1159                                oldbuf = card->rcbuf;
1160                                card->rcbuf = idr_find(&card->idr,
1161                                                       ns_rcqe_nextbufhandle(card->rawcell));
1162                                card->rawch = NS_PRV_DMA(card->rcbuf);
1163                                card->rawcell = (struct ns_rcqe *)
1164                                                card->rcbuf->data;
1165                                recycle_rx_buf(card, oldbuf);
1166                        } else {
1167                                card->rawch += NS_RCQE_SIZE;
1168                                card->rawcell++;
1169                        }
1170                }
1171        }
1172
1173        /* Small buffer queue is empty */
1174        if (stat_r & NS_STAT_SFBQE) {
1175                int i;
1176                struct sk_buff *sb;
1177
1178                writel(NS_STAT_SFBQE, card->membase + STAT);
1179                printk("nicstar%d: Small free buffer queue empty.\n",
1180                       card->index);
1181                for (i = 0; i < card->sbnr.min; i++) {
1182                        sb = dev_alloc_skb(NS_SMSKBSIZE);
1183                        if (sb == NULL) {
1184                                writel(readl(card->membase + CFG) &
1185                                       ~NS_CFG_EFBIE, card->membase + CFG);
1186                                card->efbie = 0;
1187                                break;
1188                        }
1189                        NS_PRV_BUFTYPE(sb) = BUF_SM;
1190                        skb_queue_tail(&card->sbpool.queue, sb);
1191                        skb_reserve(sb, NS_AAL0_HEADER);
1192                        push_rxbufs(card, sb);
1193                }
1194                card->sbfqc = i;
1195                process_rsq(card);
1196        }
1197
1198        /* Large buffer queue empty */
1199        if (stat_r & NS_STAT_LFBQE) {
1200                int i;
1201                struct sk_buff *lb;
1202
1203                writel(NS_STAT_LFBQE, card->membase + STAT);
1204                printk("nicstar%d: Large free buffer queue empty.\n",
1205                       card->index);
1206                for (i = 0; i < card->lbnr.min; i++) {
1207                        lb = dev_alloc_skb(NS_LGSKBSIZE);
1208                        if (lb == NULL) {
1209                                writel(readl(card->membase + CFG) &
1210                                       ~NS_CFG_EFBIE, card->membase + CFG);
1211                                card->efbie = 0;
1212                                break;
1213                        }
1214                        NS_PRV_BUFTYPE(lb) = BUF_LG;
1215                        skb_queue_tail(&card->lbpool.queue, lb);
1216                        skb_reserve(lb, NS_SMBUFSIZE);
1217                        push_rxbufs(card, lb);
1218                }
1219                card->lbfqc = i;
1220                process_rsq(card);
1221        }
1222
1223        /* Receive Status Queue is 7/8 full */
1224        if (stat_r & NS_STAT_RSQAF) {
1225                writel(NS_STAT_RSQAF, card->membase + STAT);
1226                RXPRINTK("nicstar%d: RSQ almost full.\n", card->index);
1227                process_rsq(card);
1228        }
1229
1230        spin_unlock_irqrestore(&card->int_lock, flags);
1231        PRINTK("nicstar%d: end of interrupt service\n", card->index);
1232        return IRQ_HANDLED;
1233}
1234
1235static int ns_open(struct atm_vcc *vcc)
1236{
1237        ns_dev *card;
1238        vc_map *vc;
1239        unsigned long tmpl, modl;
1240        int tcr, tcra;          /* target cell rate, and absolute value */
1241        int n = 0;              /* Number of entries in the TST. Initialized to remove
1242                                   the compiler warning. */
1243        u32 u32d[4];
1244        int frscdi = 0;         /* Index of the SCD. Initialized to remove the compiler
1245                                   warning. How I wish compilers were clever enough to
1246                                   tell which variables can truly be used
1247                                   uninitialized... */
1248        int inuse;              /* tx or rx vc already in use by another vcc */
1249        short vpi = vcc->vpi;
1250        int vci = vcc->vci;
1251
1252        card = (ns_dev *) vcc->dev->dev_data;
1253        PRINTK("nicstar%d: opening vpi.vci %d.%d \n", card->index, (int)vpi,
1254               vci);
1255        if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
1256                PRINTK("nicstar%d: unsupported AAL.\n", card->index);
1257                return -EINVAL;
1258        }
1259
1260        vc = &(card->vcmap[vpi << card->vcibits | vci]);
1261        vcc->dev_data = vc;
1262
1263        inuse = 0;
1264        if (vcc->qos.txtp.traffic_class != ATM_NONE && vc->tx)
1265                inuse = 1;
1266        if (vcc->qos.rxtp.traffic_class != ATM_NONE && vc->rx)
1267                inuse += 2;
1268        if (inuse) {
1269                printk("nicstar%d: %s vci already in use.\n", card->index,
1270                       inuse == 1 ? "tx" : inuse == 2 ? "rx" : "tx and rx");
1271                return -EINVAL;
1272        }
1273
1274        set_bit(ATM_VF_ADDR, &vcc->flags);
1275
1276        /* NOTE: You are not allowed to modify an open connection's QOS. To change
1277           that, remove the ATM_VF_PARTIAL flag checking. There may be other changes
1278           needed to do that. */
1279        if (!test_bit(ATM_VF_PARTIAL, &vcc->flags)) {
1280                scq_info *scq;
1281
1282                set_bit(ATM_VF_PARTIAL, &vcc->flags);
1283                if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1284                        /* Check requested cell rate and availability of SCD */
1285                        if (vcc->qos.txtp.max_pcr == 0 && vcc->qos.txtp.pcr == 0
1286                            && vcc->qos.txtp.min_pcr == 0) {
1287                                PRINTK
1288                                    ("nicstar%d: trying to open a CBR vc with cell rate = 0 \n",
1289                                     card->index);
1290                                clear_bit(ATM_VF_PARTIAL, &vcc->flags);
1291                                clear_bit(ATM_VF_ADDR, &vcc->flags);
1292                                return -EINVAL;
1293                        }
1294
1295                        tcr = atm_pcr_goal(&(vcc->qos.txtp));
1296                        tcra = tcr >= 0 ? tcr : -tcr;
1297
1298                        PRINTK("nicstar%d: target cell rate = %d.\n",
1299                               card->index, vcc->qos.txtp.max_pcr);
1300
1301                        tmpl =
1302                            (unsigned long)tcra *(unsigned long)
1303                            NS_TST_NUM_ENTRIES;
1304                        modl = tmpl % card->max_pcr;
1305
1306                        n = (int)(tmpl / card->max_pcr);
1307                        if (tcr > 0) {
1308                                if (modl > 0)
1309                                        n++;
1310                        } else if (tcr == 0) {
1311                                if ((n =
1312                                     (card->tst_free_entries -
1313                                      NS_TST_RESERVED)) <= 0) {
1314                                        PRINTK
1315                                            ("nicstar%d: no CBR bandwidth free.\n",
1316                                             card->index);
1317                                        clear_bit(ATM_VF_PARTIAL, &vcc->flags);
1318                                        clear_bit(ATM_VF_ADDR, &vcc->flags);
1319                                        return -EINVAL;
1320                                }
1321                        }
1322
1323                        if (n == 0) {
1324                                printk
1325                                    ("nicstar%d: selected bandwidth < granularity.\n",
1326                                     card->index);
1327                                clear_bit(ATM_VF_PARTIAL, &vcc->flags);
1328                                clear_bit(ATM_VF_ADDR, &vcc->flags);
1329                                return -EINVAL;
1330                        }
1331
1332                        if (n > (card->tst_free_entries - NS_TST_RESERVED)) {
1333                                PRINTK
1334                                    ("nicstar%d: not enough free CBR bandwidth.\n",
1335                                     card->index);
1336                                clear_bit(ATM_VF_PARTIAL, &vcc->flags);
1337                                clear_bit(ATM_VF_ADDR, &vcc->flags);
1338                                return -EINVAL;
1339                        } else
1340                                card->tst_free_entries -= n;
1341
1342                        XPRINTK("nicstar%d: writing %d tst entries.\n",
1343                                card->index, n);
1344                        for (frscdi = 0; frscdi < NS_FRSCD_NUM; frscdi++) {
1345                                if (card->scd2vc[frscdi] == NULL) {
1346                                        card->scd2vc[frscdi] = vc;
1347                                        break;
1348                                }
1349                        }
1350                        if (frscdi == NS_FRSCD_NUM) {
1351                                PRINTK
1352                                    ("nicstar%d: no SCD available for CBR channel.\n",
1353                                     card->index);
1354                                card->tst_free_entries += n;
1355                                clear_bit(ATM_VF_PARTIAL, &vcc->flags);
1356                                clear_bit(ATM_VF_ADDR, &vcc->flags);
1357                                return -EBUSY;
1358                        }
1359
1360                        vc->cbr_scd = NS_FRSCD + frscdi * NS_FRSCD_SIZE;
1361
1362                        scq = get_scq(card, CBR_SCQSIZE, vc->cbr_scd);
1363                        if (scq == NULL) {
1364                                PRINTK("nicstar%d: can't get fixed rate SCQ.\n",
1365                                       card->index);
1366                                card->scd2vc[frscdi] = NULL;
1367                                card->tst_free_entries += n;
1368                                clear_bit(ATM_VF_PARTIAL, &vcc->flags);
1369                                clear_bit(ATM_VF_ADDR, &vcc->flags);
1370                                return -ENOMEM;
1371                        }
1372                        vc->scq = scq;
1373                        u32d[0] = scq_virt_to_bus(scq, scq->base);
1374                        u32d[1] = (u32) 0x00000000;
1375                        u32d[2] = (u32) 0xffffffff;
1376                        u32d[3] = (u32) 0x00000000;
1377                        ns_write_sram(card, vc->cbr_scd, u32d, 4);
1378
1379                        fill_tst(card, n, vc);
1380                } else if (vcc->qos.txtp.traffic_class == ATM_UBR) {
1381                        vc->cbr_scd = 0x00000000;
1382                        vc->scq = card->scq0;
1383                }
1384
1385                if (vcc->qos.txtp.traffic_class != ATM_NONE) {
1386                        vc->tx = 1;
1387                        vc->tx_vcc = vcc;
1388                        vc->tbd_count = 0;
1389                }
1390                if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
1391                        u32 status;
1392
1393                        vc->rx = 1;
1394                        vc->rx_vcc = vcc;
1395                        vc->rx_iov = NULL;
1396
1397                        /* Open the connection in hardware */
1398                        if (vcc->qos.aal == ATM_AAL5)
1399                                status = NS_RCTE_AAL5 | NS_RCTE_CONNECTOPEN;
1400                        else    /* vcc->qos.aal == ATM_AAL0 */
1401                                status = NS_RCTE_AAL0 | NS_RCTE_CONNECTOPEN;
1402#ifdef RCQ_SUPPORT
1403                        status |= NS_RCTE_RAWCELLINTEN;
1404#endif /* RCQ_SUPPORT */
1405                        ns_write_sram(card,
1406                                      NS_RCT +
1407                                      (vpi << card->vcibits | vci) *
1408                                      NS_RCT_ENTRY_SIZE, &status, 1);
1409                }
1410
1411        }
1412
1413        set_bit(ATM_VF_READY, &vcc->flags);
1414        return 0;
1415}
1416
1417static void ns_close(struct atm_vcc *vcc)
1418{
1419        vc_map *vc;
1420        ns_dev *card;
1421        u32 data;
1422        int i;
1423
1424        vc = vcc->dev_data;
1425        card = vcc->dev->dev_data;
1426        PRINTK("nicstar%d: closing vpi.vci %d.%d \n", card->index,
1427               (int)vcc->vpi, vcc->vci);
1428
1429        clear_bit(ATM_VF_READY, &vcc->flags);
1430
1431        if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
1432                u32 addr;
1433                unsigned long flags;
1434
1435                addr =
1436                    NS_RCT +
1437                    (vcc->vpi << card->vcibits | vcc->vci) * NS_RCT_ENTRY_SIZE;
1438                spin_lock_irqsave(&card->res_lock, flags);
1439                while (CMD_BUSY(card)) ;
1440                writel(NS_CMD_CLOSE_CONNECTION | addr << 2,
1441                       card->membase + CMD);
1442                spin_unlock_irqrestore(&card->res_lock, flags);
1443
1444                vc->rx = 0;
1445                if (vc->rx_iov != NULL) {
1446                        struct sk_buff *iovb;
1447                        u32 stat;
1448
1449                        stat = readl(card->membase + STAT);
1450                        card->sbfqc = ns_stat_sfbqc_get(stat);
1451                        card->lbfqc = ns_stat_lfbqc_get(stat);
1452
1453                        PRINTK
1454                            ("nicstar%d: closing a VC with pending rx buffers.\n",
1455                             card->index);
1456                        iovb = vc->rx_iov;
1457                        recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
1458                                              NS_PRV_IOVCNT(iovb));
1459                        NS_PRV_IOVCNT(iovb) = 0;
1460                        spin_lock_irqsave(&card->int_lock, flags);
1461                        recycle_iov_buf(card, iovb);
1462                        spin_unlock_irqrestore(&card->int_lock, flags);
1463                        vc->rx_iov = NULL;
1464                }
1465        }
1466
1467        if (vcc->qos.txtp.traffic_class != ATM_NONE) {
1468                vc->tx = 0;
1469        }
1470
1471        if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1472                unsigned long flags;
1473                ns_scqe *scqep;
1474                scq_info *scq;
1475
1476                scq = vc->scq;
1477
1478                for (;;) {
1479                        spin_lock_irqsave(&scq->lock, flags);
1480                        scqep = scq->next;
1481                        if (scqep == scq->base)
1482                                scqep = scq->last;
1483                        else
1484                                scqep--;
1485                        if (scqep == scq->tail) {
1486                                spin_unlock_irqrestore(&scq->lock, flags);
1487                                break;
1488                        }
1489                        /* If the last entry is not a TSR, place one in the SCQ in order to
1490                           be able to completely drain it and then close. */
1491                        if (!ns_scqe_is_tsr(scqep) && scq->tail != scq->next) {
1492                                ns_scqe tsr;
1493                                u32 scdi, scqi;
1494                                u32 data;
1495                                int index;
1496
1497                                tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE);
1498                                scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE;
1499                                scqi = scq->next - scq->base;
1500                                tsr.word_2 = ns_tsr_mkword_2(scdi, scqi);
1501                                tsr.word_3 = 0x00000000;
1502                                tsr.word_4 = 0x00000000;
1503                                *scq->next = tsr;
1504                                index = (int)scqi;
1505                                scq->skb[index] = NULL;
1506                                if (scq->next == scq->last)
1507                                        scq->next = scq->base;
1508                                else
1509                                        scq->next++;
1510                                data = scq_virt_to_bus(scq, scq->next);
1511                                ns_write_sram(card, scq->scd, &data, 1);
1512                        }
1513                        spin_unlock_irqrestore(&scq->lock, flags);
1514                        schedule();
1515                }
1516
1517                /* Free all TST entries */
1518                data = NS_TST_OPCODE_VARIABLE;
1519                for (i = 0; i < NS_TST_NUM_ENTRIES; i++) {
1520                        if (card->tste2vc[i] == vc) {
1521                                ns_write_sram(card, card->tst_addr + i, &data,
1522                                              1);
1523                                card->tste2vc[i] = NULL;
1524                                card->tst_free_entries++;
1525                        }
1526                }
1527
1528                card->scd2vc[(vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE] = NULL;
1529                free_scq(card, vc->scq, vcc);
1530        }
1531
1532        /* remove all references to vcc before deleting it */
1533        if (vcc->qos.txtp.traffic_class != ATM_NONE) {
1534                unsigned long flags;
1535                scq_info *scq = card->scq0;
1536
1537                spin_lock_irqsave(&scq->lock, flags);
1538
1539                for (i = 0; i < scq->num_entries; i++) {
1540                        if (scq->skb[i] && ATM_SKB(scq->skb[i])->vcc == vcc) {
1541                                ATM_SKB(scq->skb[i])->vcc = NULL;
1542                                atm_return(vcc, scq->skb[i]->truesize);
1543                                PRINTK
1544                                    ("nicstar: deleted pending vcc mapping\n");
1545                        }
1546                }
1547
1548                spin_unlock_irqrestore(&scq->lock, flags);
1549        }
1550
1551        vcc->dev_data = NULL;
1552        clear_bit(ATM_VF_PARTIAL, &vcc->flags);
1553        clear_bit(ATM_VF_ADDR, &vcc->flags);
1554
1555#ifdef RX_DEBUG
1556        {
1557                u32 stat, cfg;
1558                stat = readl(card->membase + STAT);
1559                cfg = readl(card->membase + CFG);
1560                printk("STAT = 0x%08X  CFG = 0x%08X  \n", stat, cfg);
1561                printk
1562                    ("TSQ: base = 0x%p  next = 0x%p  last = 0x%p  TSQT = 0x%08X \n",
1563                     card->tsq.base, card->tsq.next,
1564                     card->tsq.last, readl(card->membase + TSQT));
1565                printk
1566                    ("RSQ: base = 0x%p  next = 0x%p  last = 0x%p  RSQT = 0x%08X \n",
1567                     card->rsq.base, card->rsq.next,
1568                     card->rsq.last, readl(card->membase + RSQT));
1569                printk("Empty free buffer queue interrupt %s \n",
1570                       card->efbie ? "enabled" : "disabled");
1571                printk("SBCNT = %d  count = %d   LBCNT = %d count = %d \n",
1572                       ns_stat_sfbqc_get(stat), card->sbpool.count,
1573                       ns_stat_lfbqc_get(stat), card->lbpool.count);
1574                printk("hbpool.count = %d  iovpool.count = %d \n",
1575                       card->hbpool.count, card->iovpool.count);
1576        }
1577#endif /* RX_DEBUG */
1578}
1579
1580static void fill_tst(ns_dev * card, int n, vc_map * vc)
1581{
1582        u32 new_tst;
1583        unsigned long cl;
1584        int e, r;
1585        u32 data;
1586
1587        /* It would be very complicated to keep the two TSTs synchronized while
1588           assuring that writes are only made to the inactive TST. So, for now I
1589           will use only one TST. If problems occur, I will change this again */
1590
1591        new_tst = card->tst_addr;
1592
1593        /* Fill procedure */
1594
1595        for (e = 0; e < NS_TST_NUM_ENTRIES; e++) {
1596                if (card->tste2vc[e] == NULL)
1597                        break;
1598        }
1599        if (e == NS_TST_NUM_ENTRIES) {
1600                printk("nicstar%d: No free TST entries found. \n", card->index);
1601                return;
1602        }
1603
1604        r = n;
1605        cl = NS_TST_NUM_ENTRIES;
1606        data = ns_tste_make(NS_TST_OPCODE_FIXED, vc->cbr_scd);
1607
1608        while (r > 0) {
1609                if (cl >= NS_TST_NUM_ENTRIES && card->tste2vc[e] == NULL) {
1610                        card->tste2vc[e] = vc;
1611                        ns_write_sram(card, new_tst + e, &data, 1);
1612                        cl -= NS_TST_NUM_ENTRIES;
1613                        r--;
1614                }
1615
1616                if (++e == NS_TST_NUM_ENTRIES) {
1617                        e = 0;
1618                }
1619                cl += n;
1620        }
1621
1622        /* End of fill procedure */
1623
1624        data = ns_tste_make(NS_TST_OPCODE_END, new_tst);
1625        ns_write_sram(card, new_tst + NS_TST_NUM_ENTRIES, &data, 1);
1626        ns_write_sram(card, card->tst_addr + NS_TST_NUM_ENTRIES, &data, 1);
1627        card->tst_addr = new_tst;
1628}
1629
1630static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
1631{
1632        ns_dev *card;
1633        vc_map *vc;
1634        scq_info *scq;
1635        unsigned long buflen;
1636        ns_scqe scqe;
1637        u32 flags;              /* TBD flags, not CPU flags */
1638
1639        card = vcc->dev->dev_data;
1640        TXPRINTK("nicstar%d: ns_send() called.\n", card->index);
1641        if ((vc = (vc_map *) vcc->dev_data) == NULL) {
1642                printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
1643                       card->index);
1644                atomic_inc(&vcc->stats->tx_err);
1645                dev_kfree_skb_any(skb);
1646                return -EINVAL;
1647        }
1648
1649        if (!vc->tx) {
1650                printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
1651                       card->index);
1652                atomic_inc(&vcc->stats->tx_err);
1653                dev_kfree_skb_any(skb);
1654                return -EINVAL;
1655        }
1656
1657        if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
1658                printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
1659                       card->index);
1660                atomic_inc(&vcc->stats->tx_err);
1661                dev_kfree_skb_any(skb);
1662                return -EINVAL;
1663        }
1664
1665        if (skb_shinfo(skb)->nr_frags != 0) {
1666                printk("nicstar%d: No scatter-gather yet.\n", card->index);
1667                atomic_inc(&vcc->stats->tx_err);
1668                dev_kfree_skb_any(skb);
1669                return -EINVAL;
1670        }
1671
1672        ATM_SKB(skb)->vcc = vcc;
1673
1674        NS_PRV_DMA(skb) = pci_map_single(card->pcidev, skb->data,
1675                                         skb->len, PCI_DMA_TODEVICE);
1676
1677        if (vcc->qos.aal == ATM_AAL5) {
1678                buflen = (skb->len + 47 + 8) / 48 * 48; /* Multiple of 48 */
1679                flags = NS_TBD_AAL5;
1680                scqe.word_2 = cpu_to_le32(NS_PRV_DMA(skb));
1681                scqe.word_3 = cpu_to_le32(skb->len);
1682                scqe.word_4 =
1683                    ns_tbd_mkword_4(0, (u32) vcc->vpi, (u32) vcc->vci, 0,
1684                                    ATM_SKB(skb)->
1685                                    atm_options & ATM_ATMOPT_CLP ? 1 : 0);
1686                flags |= NS_TBD_EOPDU;
1687        } else {                /* (vcc->qos.aal == ATM_AAL0) */
1688
1689                buflen = ATM_CELL_PAYLOAD;      /* i.e., 48 bytes */
1690                flags = NS_TBD_AAL0;
1691                scqe.word_2 = cpu_to_le32(NS_PRV_DMA(skb) + NS_AAL0_HEADER);
1692                scqe.word_3 = cpu_to_le32(0x00000000);
1693                if (*skb->data & 0x02)  /* Payload type 1 - end of pdu */
1694                        flags |= NS_TBD_EOPDU;
1695                scqe.word_4 =
1696                    cpu_to_le32(*((u32 *) skb->data) & ~NS_TBD_VC_MASK);
1697                /* Force the VPI/VCI to be the same as in VCC struct */
1698                scqe.word_4 |=
1699                    cpu_to_le32((((u32) vcc->
1700                                  vpi) << NS_TBD_VPI_SHIFT | ((u32) vcc->
1701                                                              vci) <<
1702                                 NS_TBD_VCI_SHIFT) & NS_TBD_VC_MASK);
1703        }
1704
1705        if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1706                scqe.word_1 = ns_tbd_mkword_1_novbr(flags, (u32) buflen);
1707                scq = ((vc_map *) vcc->dev_data)->scq;
1708        } else {
1709                scqe.word_1 =
1710                    ns_tbd_mkword_1(flags, (u32) 1, (u32) 1, (u32) buflen);
1711                scq = card->scq0;
1712        }
1713
1714        if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
1715                atomic_inc(&vcc->stats->tx_err);
1716                dev_kfree_skb_any(skb);
1717                return -EIO;
1718        }
1719        atomic_inc(&vcc->stats->tx);
1720
1721        return 0;
1722}
1723
1724static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
1725                     struct sk_buff *skb)
1726{
1727        unsigned long flags;
1728        ns_scqe tsr;
1729        u32 scdi, scqi;
1730        int scq_is_vbr;
1731        u32 data;
1732        int index;
1733
1734        spin_lock_irqsave(&scq->lock, flags);
1735        while (scq->tail == scq->next) {
1736                if (in_interrupt()) {
1737                        spin_unlock_irqrestore(&scq->lock, flags);
1738                        printk("nicstar%d: Error pushing TBD.\n", card->index);
1739                        return 1;
1740                }
1741
1742                scq->full = 1;
1743                spin_unlock_irqrestore(&scq->lock, flags);
1744                interruptible_sleep_on_timeout(&scq->scqfull_waitq,
1745                                               SCQFULL_TIMEOUT);
1746                spin_lock_irqsave(&scq->lock, flags);
1747
1748                if (scq->full) {
1749                        spin_unlock_irqrestore(&scq->lock, flags);
1750                        printk("nicstar%d: Timeout pushing TBD.\n",
1751                               card->index);
1752                        return 1;
1753                }
1754        }
1755        *scq->next = *tbd;
1756        index = (int)(scq->next - scq->base);
1757        scq->skb[index] = skb;
1758        XPRINTK("nicstar%d: sending skb at 0x%p (pos %d).\n",
1759                card->index, skb, index);
1760        XPRINTK("nicstar%d: TBD written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%p.\n",
1761                card->index, le32_to_cpu(tbd->word_1), le32_to_cpu(tbd->word_2),
1762                le32_to_cpu(tbd->word_3), le32_to_cpu(tbd->word_4),
1763                scq->next);
1764        if (scq->next == scq->last)
1765                scq->next = scq->base;
1766        else
1767                scq->next++;
1768
1769        vc->tbd_count++;
1770        if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) {
1771                scq->tbd_count++;
1772                scq_is_vbr = 1;
1773        } else
1774                scq_is_vbr = 0;
1775
1776        if (vc->tbd_count >= MAX_TBD_PER_VC
1777            || scq->tbd_count >= MAX_TBD_PER_SCQ) {
1778                int has_run = 0;
1779
1780                while (scq->tail == scq->next) {
1781                        if (in_interrupt()) {
1782                                data = scq_virt_to_bus(scq, scq->next);
1783                                ns_write_sram(card, scq->scd, &data, 1);
1784                                spin_unlock_irqrestore(&scq->lock, flags);
1785                                printk("nicstar%d: Error pushing TSR.\n",
1786                                       card->index);
1787                                return 0;
1788                        }
1789
1790                        scq->full = 1;
1791                        if (has_run++)
1792                                break;
1793                        spin_unlock_irqrestore(&scq->lock, flags);
1794                        interruptible_sleep_on_timeout(&scq->scqfull_waitq,
1795                                                       SCQFULL_TIMEOUT);
1796                        spin_lock_irqsave(&scq->lock, flags);
1797                }
1798
1799                if (!scq->full) {
1800                        tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE);
1801                        if (scq_is_vbr)
1802                                scdi = NS_TSR_SCDISVBR;
1803                        else
1804                                scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE;
1805                        scqi = scq->next - scq->base;
1806                        tsr.word_2 = ns_tsr_mkword_2(scdi, scqi);
1807                        tsr.word_3 = 0x00000000;
1808                        tsr.word_4 = 0x00000000;
1809
1810                        *scq->next = tsr;
1811                        index = (int)scqi;
1812                        scq->skb[index] = NULL;
1813                        XPRINTK
1814                            ("nicstar%d: TSR written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%p.\n",
1815                             card->index, le32_to_cpu(tsr.word_1),
1816                             le32_to_cpu(tsr.word_2), le32_to_cpu(tsr.word_3),
1817                             le32_to_cpu(tsr.word_4), scq->next);
1818                        if (scq->next == scq->last)
1819                                scq->next = scq->base;
1820                        else
1821                                scq->next++;
1822                        vc->tbd_count = 0;
1823                        scq->tbd_count = 0;
1824                } else
1825                        PRINTK("nicstar%d: Timeout pushing TSR.\n",
1826                               card->index);
1827        }
1828        data = scq_virt_to_bus(scq, scq->next);
1829        ns_write_sram(card, scq->scd, &data, 1);
1830
1831        spin_unlock_irqrestore(&scq->lock, flags);
1832
1833        return 0;
1834}
1835
1836static void process_tsq(ns_dev * card)
1837{
1838        u32 scdi;
1839        scq_info *scq;
1840        ns_tsi *previous = NULL, *one_ahead, *two_ahead;
1841        int serviced_entries;   /* flag indicating at least on entry was serviced */
1842
1843        serviced_entries = 0;
1844
1845        if (card->tsq.next == card->tsq.last)
1846                one_ahead = card->tsq.base;
1847        else
1848                one_ahead = card->tsq.next + 1;
1849
1850        if (one_ahead == card->tsq.last)
1851                two_ahead = card->tsq.base;
1852        else
1853                two_ahead = one_ahead + 1;
1854
1855        while (!ns_tsi_isempty(card->tsq.next) || !ns_tsi_isempty(one_ahead) ||
1856               !ns_tsi_isempty(two_ahead))
1857                /* At most two empty, as stated in the 77201 errata */
1858        {
1859                serviced_entries = 1;
1860
1861                /* Skip the one or two possible empty entries */
1862                while (ns_tsi_isempty(card->tsq.next)) {
1863                        if (card->tsq.next == card->tsq.last)
1864                                card->tsq.next = card->tsq.base;
1865                        else
1866                                card->tsq.next++;
1867                }
1868
1869                if (!ns_tsi_tmrof(card->tsq.next)) {
1870                        scdi = ns_tsi_getscdindex(card->tsq.next);
1871                        if (scdi == NS_TSI_SCDISVBR)
1872                                scq = card->scq0;
1873                        else {
1874                                if (card->scd2vc[scdi] == NULL) {
1875                                        printk
1876                                            ("nicstar%d: could not find VC from SCD index.\n",
1877                                             card->index);
1878                                        ns_tsi_init(card->tsq.next);
1879                                        return;
1880                                }
1881                                scq = card->scd2vc[scdi]->scq;
1882                        }
1883                        drain_scq(card, scq, ns_tsi_getscqpos(card->tsq.next));
1884                        scq->full = 0;
1885                        wake_up_interruptible(&(scq->scqfull_waitq));
1886                }
1887
1888                ns_tsi_init(card->tsq.next);
1889                previous = card->tsq.next;
1890                if (card->tsq.next == card->tsq.last)
1891                        card->tsq.next = card->tsq.base;
1892                else
1893                        card->tsq.next++;
1894
1895                if (card->tsq.next == card->tsq.last)
1896                        one_ahead = card->tsq.base;
1897                else
1898                        one_ahead = card->tsq.next + 1;
1899
1900                if (one_ahead == card->tsq.last)
1901                        two_ahead = card->tsq.base;
1902                else
1903                        two_ahead = one_ahead + 1;
1904        }
1905
1906        if (serviced_entries)
1907                writel(PTR_DIFF(previous, card->tsq.base),
1908                       card->membase + TSQH);
1909}
1910
1911static void drain_scq(ns_dev * card, scq_info * scq, int pos)
1912{
1913        struct atm_vcc *vcc;
1914        struct sk_buff *skb;
1915        int i;
1916        unsigned long flags;
1917
1918        XPRINTK("nicstar%d: drain_scq() called, scq at 0x%p, pos %d.\n",
1919                card->index, scq, pos);
1920        if (pos >= scq->num_entries) {
1921                printk("nicstar%d: Bad index on drain_scq().\n", card->index);
1922                return;
1923        }
1924
1925        spin_lock_irqsave(&scq->lock, flags);
1926        i = (int)(scq->tail - scq->base);
1927        if (++i == scq->num_entries)
1928                i = 0;
1929        while (i != pos) {
1930                skb = scq->skb[i];
1931                XPRINTK("nicstar%d: freeing skb at 0x%p (index %d).\n",
1932                        card->index, skb, i);
1933                if (skb != NULL) {
1934                        pci_unmap_single(card->pcidev,
1935                                         NS_PRV_DMA(skb),
1936                                         skb->len,
1937                                         PCI_DMA_TODEVICE);
1938                        vcc = ATM_SKB(skb)->vcc;
1939                        if (vcc && vcc->pop != NULL) {
1940                                vcc->pop(vcc, skb);
1941                        } else {
1942                                dev_kfree_skb_irq(skb);
1943                        }
1944                        scq->skb[i] = NULL;
1945                }
1946                if (++i == scq->num_entries)
1947                        i = 0;
1948        }
1949        scq->tail = scq->base + pos;
1950        spin_unlock_irqrestore(&scq->lock, flags);
1951}
1952
1953static void process_rsq(ns_dev * card)
1954{
1955        ns_rsqe *previous;
1956
1957        if (!ns_rsqe_valid(card->rsq.next))
1958                return;
1959        do {
1960                dequeue_rx(card, card->rsq.next);
1961                ns_rsqe_init(card->rsq.next);
1962                previous = card->rsq.next;
1963                if (card->rsq.next == card->rsq.last)
1964                        card->rsq.next = card->rsq.base;
1965                else
1966                        card->rsq.next++;
1967        } while (ns_rsqe_valid(card->rsq.next));
1968        writel(PTR_DIFF(previous, card->rsq.base), card->membase + RSQH);
1969}
1970
1971static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
1972{
1973        u32 vpi, vci;
1974        vc_map *vc;
1975        struct sk_buff *iovb;
1976        struct iovec *iov;
1977        struct atm_vcc *vcc;
1978        struct sk_buff *skb;
1979        unsigned short aal5_len;
1980        int len;
1981        u32 stat;
1982        u32 id;
1983
1984        stat = readl(card->membase + STAT);
1985        card->sbfqc = ns_stat_sfbqc_get(stat);
1986        card->lbfqc = ns_stat_lfbqc_get(stat);
1987
1988        id = le32_to_cpu(rsqe->buffer_handle);
1989        skb = idr_find(&card->idr, id);
1990        if (!skb) {
1991                RXPRINTK(KERN_ERR
1992                         "nicstar%d: idr_find() failed!\n", card->index);
1993                return;
1994        }
1995        idr_remove(&card->idr, id);
1996        pci_dma_sync_single_for_cpu(card->pcidev,
1997                                    NS_PRV_DMA(skb),
1998                                    (NS_PRV_BUFTYPE(skb) == BUF_SM
1999                                     ? NS_SMSKBSIZE : NS_LGSKBSIZE),
2000                                    PCI_DMA_FROMDEVICE);
2001        pci_unmap_single(card->pcidev,
2002                         NS_PRV_DMA(skb),
2003                         (NS_PRV_BUFTYPE(skb) == BUF_SM
2004                          ? NS_SMSKBSIZE : NS_LGSKBSIZE),
2005                         PCI_DMA_FROMDEVICE);
2006        vpi = ns_rsqe_vpi(rsqe);
2007        vci = ns_rsqe_vci(rsqe);
2008        if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits) {
2009                printk("nicstar%d: SDU received for out-of-range vc %d.%d.\n",
2010                       card->index, vpi, vci);
2011                recycle_rx_buf(card, skb);
2012                return;
2013        }
2014
2015        vc = &(card->vcmap[vpi << card->vcibits | vci]);
2016        if (!vc->rx) {
2017                RXPRINTK("nicstar%d: SDU received on non-rx vc %d.%d.\n",
2018                         card->index, vpi, vci);
2019                recycle_rx_buf(card, skb);
2020                return;
2021        }
2022
2023        vcc = vc->rx_vcc;
2024
2025        if (vcc->qos.aal == ATM_AAL0) {
2026                struct sk_buff *sb;
2027                unsigned char *cell;
2028                int i;
2029
2030                cell = skb->data;
2031                for (i = ns_rsqe_cellcount(rsqe); i; i--) {
2032                        if ((sb = dev_alloc_skb(NS_SMSKBSIZE)) == NULL) {
2033                                printk
2034                                    ("nicstar%d: Can't allocate buffers for aal0.\n",
2035                                     card->index);
2036                                atomic_add(i, &vcc->stats->rx_drop);
2037                                break;
2038                        }
2039                        if (!atm_charge(vcc, sb->truesize)) {
2040                                RXPRINTK
2041                                    ("nicstar%d: atm_charge() dropped aal0 packets.\n",
2042                                     card->index);
2043                                atomic_add(i - 1, &vcc->stats->rx_drop);        /* already increased by 1 */
2044                                dev_kfree_skb_any(sb);
2045                                break;
2046                        }
2047                        /* Rebuild the header */
2048                        *((u32 *) sb->data) = le32_to_cpu(rsqe->word_1) << 4 |
2049                            (ns_rsqe_clp(rsqe) ? 0x00000001 : 0x00000000);
2050                        if (i == 1 && ns_rsqe_eopdu(rsqe))
2051                                *((u32 *) sb->data) |= 0x00000002;
2052                        skb_put(sb, NS_AAL0_HEADER);
2053                        memcpy(skb_tail_pointer(sb), cell, ATM_CELL_PAYLOAD);
2054                        skb_put(sb, ATM_CELL_PAYLOAD);
2055                        ATM_SKB(sb)->vcc = vcc;
2056                        __net_timestamp(sb);
2057                        vcc->push(vcc, sb);
2058                        atomic_inc(&vcc->stats->rx);
2059                        cell += ATM_CELL_PAYLOAD;
2060                }
2061
2062                recycle_rx_buf(card, skb);
2063                return;
2064        }
2065
2066        /* To reach this point, the AAL layer can only be AAL5 */
2067
2068        if ((iovb = vc->rx_iov) == NULL) {
2069                iovb = skb_dequeue(&(card->iovpool.queue));
2070                if (iovb == NULL) {     /* No buffers in the queue */
2071                        iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC);
2072                        if (iovb == NULL) {
2073                                printk("nicstar%d: Out of iovec buffers.\n",
2074                                       card->index);
2075                                atomic_inc(&vcc->stats->rx_drop);
2076                                recycle_rx_buf(card, skb);
2077                                return;
2078                        }
2079                        NS_PRV_BUFTYPE(iovb) = BUF_NONE;
2080                } else if (--card->iovpool.count < card->iovnr.min) {
2081                        struct sk_buff *new_iovb;
2082                        if ((new_iovb =
2083                             alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL) {
2084                                NS_PRV_BUFTYPE(iovb) = BUF_NONE;
2085                                skb_queue_tail(&card->iovpool.queue, new_iovb);
2086                                card->iovpool.count++;
2087                        }
2088                }
2089                vc->rx_iov = iovb;
2090                NS_PRV_IOVCNT(iovb) = 0;
2091                iovb->len = 0;
2092                iovb->data = iovb->head;
2093                skb_reset_tail_pointer(iovb);
2094                /* IMPORTANT: a pointer to the sk_buff containing the small or large
2095                   buffer is stored as iovec base, NOT a pointer to the
2096                   small or large buffer itself. */
2097        } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
2098                printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
2099                atomic_inc(&vcc->stats->rx_err);
2100                recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
2101                                      NS_MAX_IOVECS);
2102                NS_PRV_IOVCNT(iovb) = 0;
2103                iovb->len = 0;
2104                iovb->data = iovb->head;
2105                skb_reset_tail_pointer(iovb);
2106        }
2107        iov = &((struct iovec *)iovb->data)[NS_PRV_IOVCNT(iovb)++];
2108        iov->iov_base = (void *)skb;
2109        iov->iov_len = ns_rsqe_cellcount(rsqe) * 48;
2110        iovb->len += iov->iov_len;
2111
2112#ifdef EXTRA_DEBUG
2113        if (NS_PRV_IOVCNT(iovb) == 1) {
2114                if (NS_PRV_BUFTYPE(skb) != BUF_SM) {
2115                        printk
2116                            ("nicstar%d: Expected a small buffer, and this is not one.\n",
2117                             card->index);
2118                        which_list(card, skb);
2119                        atomic_inc(&vcc->stats->rx_err);
2120                        recycle_rx_buf(card, skb);
2121                        vc->rx_iov = NULL;
2122                        recycle_iov_buf(card, iovb);
2123                        return;
2124                }
2125        } else {                /* NS_PRV_IOVCNT(iovb) >= 2 */
2126
2127                if (NS_PRV_BUFTYPE(skb) != BUF_LG) {
2128                        printk
2129                            ("nicstar%d: Expected a large buffer, and this is not one.\n",
2130                             card->index);
2131                        which_list(card, skb);
2132                        atomic_inc(&vcc->stats->rx_err);
2133                        recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
2134                                              NS_PRV_IOVCNT(iovb));
2135                        vc->rx_iov = NULL;
2136                        recycle_iov_buf(card, iovb);
2137                        return;
2138                }
2139        }
2140#endif /* EXTRA_DEBUG */
2141
2142        if (ns_rsqe_eopdu(rsqe)) {
2143                /* This works correctly regardless of the endianness of the host */
2144                unsigned char *L1L2 = (unsigned char *)
2145                                                (skb->data + iov->iov_len - 6);
2146                aal5_len = L1L2[0] << 8 | L1L2[1];
2147                len = (aal5_len == 0x0000) ? 0x10000 : aal5_len;
2148                if (ns_rsqe_crcerr(rsqe) ||
2149                    len + 8 > iovb->len || len + (47 + 8) < iovb->len) {
2150                        printk("nicstar%d: AAL5 CRC error", card->index);
2151                        if (len + 8 > iovb->len || len + (47 + 8) < iovb->len)
2152                                printk(" - PDU size mismatch.\n");
2153                        else
2154                                printk(".\n");
2155                        atomic_inc(&vcc->stats->rx_err);
2156                        recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
2157                                              NS_PRV_IOVCNT(iovb));
2158                        vc->rx_iov = NULL;
2159                        recycle_iov_buf(card, iovb);
2160                        return;
2161                }
2162
2163                /* By this point we (hopefully) have a complete SDU without errors. */
2164
2165                if (NS_PRV_IOVCNT(iovb) == 1) { /* Just a small buffer */
2166                        /* skb points to a small buffer */
2167                        if (!atm_charge(vcc, skb->truesize)) {
2168                                push_rxbufs(card, skb);
2169                                atomic_inc(&vcc->stats->rx_drop);
2170                        } else {
2171                                skb_put(skb, len);
2172                                dequeue_sm_buf(card, skb);
2173#ifdef NS_USE_DESTRUCTORS
2174                                skb->destructor = ns_sb_destructor;
2175#endif /* NS_USE_DESTRUCTORS */
2176                                ATM_SKB(skb)->vcc = vcc;
2177                                __net_timestamp(skb);
2178                                vcc->push(vcc, skb);
2179                                atomic_inc(&vcc->stats->rx);
2180                        }
2181                } else if (NS_PRV_IOVCNT(iovb) == 2) {  /* One small plus one large buffer */
2182                        struct sk_buff *sb;
2183
2184                        sb = (struct sk_buff *)(iov - 1)->iov_base;
2185                        /* skb points to a large buffer */
2186
2187                        if (len <= NS_SMBUFSIZE) {
2188                                if (!atm_charge(vcc, sb->truesize)) {
2189                                        push_rxbufs(card, sb);
2190                                        atomic_inc(&vcc->stats->rx_drop);
2191                                } else {
2192                                        skb_put(sb, len);
2193                                        dequeue_sm_buf(card, sb);
2194#ifdef NS_USE_DESTRUCTORS
2195                                        sb->destructor = ns_sb_destructor;
2196#endif /* NS_USE_DESTRUCTORS */
2197                                        ATM_SKB(sb)->vcc = vcc;
2198                                        __net_timestamp(sb);
2199                                        vcc->push(vcc, sb);
2200                                        atomic_inc(&vcc->stats->rx);
2201                                }
2202
2203                                push_rxbufs(card, skb);
2204
2205                        } else {        /* len > NS_SMBUFSIZE, the usual case */
2206
2207                                if (!atm_charge(vcc, skb->truesize)) {
2208                                        push_rxbufs(card, skb);
2209                                        atomic_inc(&vcc->stats->rx_drop);
2210                                } else {
2211                                        dequeue_lg_buf(card, skb);
2212#ifdef NS_USE_DESTRUCTORS
2213                                        skb->destructor = ns_lb_destructor;
2214#endif /* NS_USE_DESTRUCTORS */
2215                                        skb_push(skb, NS_SMBUFSIZE);
2216                                        skb_copy_from_linear_data(sb, skb->data,
2217                                                                  NS_SMBUFSIZE);
2218                                        skb_put(skb, len - NS_SMBUFSIZE);
2219                                        ATM_SKB(skb)->vcc = vcc;
2220                                        __net_timestamp(skb);
2221                                        vcc->push(vcc, skb);
2222                                        atomic_inc(&vcc->stats->rx);
2223                                }
2224
2225                                push_rxbufs(card, sb);
2226
2227                        }
2228
2229                } else {        /* Must push a huge buffer */
2230
2231                        struct sk_buff *hb, *sb, *lb;
2232                        int remaining, tocopy;
2233                        int j;
2234
2235                        hb = skb_dequeue(&(card->hbpool.queue));
2236                        if (hb == NULL) {       /* No buffers in the queue */
2237
2238                                hb = dev_alloc_skb(NS_HBUFSIZE);
2239                                if (hb == NULL) {
2240                                        printk
2241                                            ("nicstar%d: Out of huge buffers.\n",
2242                                             card->index);
2243                                        atomic_inc(&vcc->stats->rx_drop);
2244                                        recycle_iovec_rx_bufs(card,
2245                                                              (struct iovec *)
2246                                                              iovb->data,
2247                                                              NS_PRV_IOVCNT(iovb));
2248                                        vc->rx_iov = NULL;
2249                                        recycle_iov_buf(card, iovb);
2250                                        return;
2251                                } else if (card->hbpool.count < card->hbnr.min) {
2252                                        struct sk_buff *new_hb;
2253                                        if ((new_hb =
2254                                             dev_alloc_skb(NS_HBUFSIZE)) !=
2255                                            NULL) {
2256                                                skb_queue_tail(&card->hbpool.
2257                                                               queue, new_hb);
2258                                                card->hbpool.count++;
2259                                        }
2260                                }
2261                                NS_PRV_BUFTYPE(hb) = BUF_NONE;
2262                        } else if (--card->hbpool.count < card->hbnr.min) {
2263                                struct sk_buff *new_hb;
2264                                if ((new_hb =
2265                                     dev_alloc_skb(NS_HBUFSIZE)) != NULL) {
2266                                        NS_PRV_BUFTYPE(new_hb) = BUF_NONE;
2267                                        skb_queue_tail(&card->hbpool.queue,
2268                                                       new_hb);
2269                                        card->hbpool.count++;
2270                                }
2271                                if (card->hbpool.count < card->hbnr.min) {
2272                                        if ((new_hb =
2273                                             dev_alloc_skb(NS_HBUFSIZE)) !=
2274                                            NULL) {
2275                                                NS_PRV_BUFTYPE(new_hb) =
2276                                                    BUF_NONE;
2277                                                skb_queue_tail(&card->hbpool.
2278                                                               queue, new_hb);
2279                                                card->hbpool.count++;
2280                                        }
2281                                }
2282                        }
2283
2284                        iov = (struct iovec *)iovb->data;
2285
2286                        if (!atm_charge(vcc, hb->truesize)) {
2287                                recycle_iovec_rx_bufs(card, iov,
2288                                                      NS_PRV_IOVCNT(iovb));
2289                                if (card->hbpool.count < card->hbnr.max) {
2290                                        skb_queue_tail(&card->hbpool.queue, hb);
2291                                        card->hbpool.count++;
2292                                } else
2293                                        dev_kfree_skb_any(hb);
2294                                atomic_inc(&vcc->stats->rx_drop);
2295                        } else {
2296                                /* Copy the small buffer to the huge buffer */
2297                                sb = (struct sk_buff *)iov->iov_base;
2298                                skb_copy_from_linear_data(sb, hb->data,
2299                                                          iov->iov_len);
2300                                skb_put(hb, iov->iov_len);
2301                                remaining = len - iov->iov_len;
2302                                iov++;
2303                                /* Free the small buffer */
2304                                push_rxbufs(card, sb);
2305
2306                                /* Copy all large buffers to the huge buffer and free them */
2307                                for (j = 1; j < NS_PRV_IOVCNT(iovb); j++) {
2308                                        lb = (struct sk_buff *)iov->iov_base;
2309                                        tocopy =
2310                                            min_t(int, remaining, iov->iov_len);
2311                                        skb_copy_from_linear_data(lb,
2312                                                                  skb_tail_pointer
2313                                                                  (hb), tocopy);
2314                                        skb_put(hb, tocopy);
2315                                        iov++;
2316                                        remaining -= tocopy;
2317                                        push_rxbufs(card, lb);
2318                                }
2319#ifdef EXTRA_DEBUG
2320                                if (remaining != 0 || hb->len != len)
2321                                        printk
2322                                            ("nicstar%d: Huge buffer len mismatch.\n",
2323                                             card->index);
2324#endif /* EXTRA_DEBUG */
2325                                ATM_SKB(hb)->vcc = vcc;
2326#ifdef NS_USE_DESTRUCTORS
2327                                hb->destructor = ns_hb_destructor;
2328#endif /* NS_USE_DESTRUCTORS */
2329                                __net_timestamp(hb);
2330                                vcc->push(vcc, hb);
2331                                atomic_inc(&vcc->stats->rx);
2332                        }
2333                }
2334
2335                vc->rx_iov = NULL;
2336                recycle_iov_buf(card, iovb);
2337        }
2338
2339}
2340
2341#ifdef NS_USE_DESTRUCTORS
2342
2343static void ns_sb_destructor(struct sk_buff *sb)
2344{
2345        ns_dev *card;
2346        u32 stat;
2347
2348        card = (ns_dev *) ATM_SKB(sb)->vcc->dev->dev_data;
2349        stat = readl(card->membase + STAT);
2350        card->sbfqc = ns_stat_sfbqc_get(stat);
2351        card->lbfqc = ns_stat_lfbqc_get(stat);
2352
2353        do {
2354                sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
2355                if (sb == NULL)
2356                        break;
2357                NS_PRV_BUFTYPE(sb) = BUF_SM;
2358                skb_queue_tail(&card->sbpool.queue, sb);
2359                skb_reserve(sb, NS_AAL0_HEADER);
2360                push_rxbufs(card, sb);
2361        } while (card->sbfqc < card->sbnr.min);
2362}
2363
2364static void ns_lb_destructor(struct sk_buff *lb)
2365{
2366        ns_dev *card;
2367        u32 stat;
2368
2369        card = (ns_dev *) ATM_SKB(lb)->vcc->dev->dev_data;
2370        stat = readl(card->membase + STAT);
2371        card->sbfqc = ns_stat_sfbqc_get(stat);
2372        card->lbfqc = ns_stat_lfbqc_get(stat);
2373
2374        do {
2375                lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
2376                if (lb == NULL)
2377                        break;
2378                NS_PRV_BUFTYPE(lb) = BUF_LG;
2379                skb_queue_tail(&card->lbpool.queue, lb);
2380                skb_reserve(lb, NS_SMBUFSIZE);
2381                push_rxbufs(card, lb);
2382        } while (card->lbfqc < card->lbnr.min);
2383}
2384
2385static void ns_hb_destructor(struct sk_buff *hb)
2386{
2387        ns_dev *card;
2388
2389        card = (ns_dev *) ATM_SKB(hb)->vcc->dev->dev_data;
2390
2391        while (card->hbpool.count < card->hbnr.init) {
2392                hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
2393                if (hb == NULL)
2394                        break;
2395                NS_PRV_BUFTYPE(hb) = BUF_NONE;
2396                skb_queue_tail(&card->hbpool.queue, hb);
2397                card->hbpool.count++;
2398        }
2399}
2400
2401#endif /* NS_USE_DESTRUCTORS */
2402
2403static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb)
2404{
2405        if (unlikely(NS_PRV_BUFTYPE(skb) == BUF_NONE)) {
2406                printk("nicstar%d: What kind of rx buffer is this?\n",
2407                       card->index);
2408                dev_kfree_skb_any(skb);
2409        } else
2410                push_rxbufs(card, skb);
2411}
2412
2413static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count)
2414{
2415        while (count-- > 0)
2416                recycle_rx_buf(card, (struct sk_buff *)(iov++)->iov_base);
2417}
2418
2419static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb)
2420{
2421        if (card->iovpool.count < card->iovnr.max) {
2422                skb_queue_tail(&card->iovpool.queue, iovb);
2423                card->iovpool.count++;
2424        } else
2425                dev_kfree_skb_any(iovb);
2426}
2427
2428static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb)
2429{
2430        skb_unlink(sb, &card->sbpool.queue);
2431#ifdef NS_USE_DESTRUCTORS
2432        if (card->sbfqc < card->sbnr.min)
2433#else
2434        if (card->sbfqc < card->sbnr.init) {
2435                struct sk_buff *new_sb;
2436                if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) {
2437                        NS_PRV_BUFTYPE(new_sb) = BUF_SM;
2438                        skb_queue_tail(&card->sbpool.queue, new_sb);
2439                        skb_reserve(new_sb, NS_AAL0_HEADER);
2440                        push_rxbufs(card, new_sb);
2441                }
2442        }
2443        if (card->sbfqc < card->sbnr.init)
2444#endif /* NS_USE_DESTRUCTORS */
2445        {
2446                struct sk_buff *new_sb;
2447                if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) {
2448                        NS_PRV_BUFTYPE(new_sb) = BUF_SM;
2449                        skb_queue_tail(&card->sbpool.queue, new_sb);
2450                        skb_reserve(new_sb, NS_AAL0_HEADER);
2451                        push_rxbufs(card, new_sb);
2452                }
2453        }
2454}
2455
2456static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb)
2457{
2458        skb_unlink(lb, &card->lbpool.queue);
2459#ifdef NS_USE_DESTRUCTORS
2460        if (card->lbfqc < card->lbnr.min)
2461#else
2462        if (card->lbfqc < card->lbnr.init) {
2463                struct sk_buff *new_lb;
2464                if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) {
2465                        NS_PRV_BUFTYPE(new_lb) = BUF_LG;
2466                        skb_queue_tail(&card->lbpool.queue, new_lb);
2467                        skb_reserve(new_lb, NS_SMBUFSIZE);
2468                        push_rxbufs(card, new_lb);
2469                }
2470        }
2471        if (card->lbfqc < card->lbnr.init)
2472#endif /* NS_USE_DESTRUCTORS */
2473        {
2474                struct sk_buff *new_lb;
2475                if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) {
2476                        NS_PRV_BUFTYPE(new_lb) = BUF_LG;
2477                        skb_queue_tail(&card->lbpool.queue, new_lb);
2478                        skb_reserve(new_lb, NS_SMBUFSIZE);
2479                        push_rxbufs(card, new_lb);
2480                }
2481        }
2482}
2483
2484static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page)
2485{
2486        u32 stat;
2487        ns_dev *card;
2488        int left;
2489
2490        left = (int)*pos;
2491        card = (ns_dev *) dev->dev_data;
2492        stat = readl(card->membase + STAT);
2493        if (!left--)
2494                return sprintf(page, "Pool   count    min   init    max \n");
2495        if (!left--)
2496                return sprintf(page, "Small  %5d  %5d  %5d  %5d \n",
2497                               ns_stat_sfbqc_get(stat), card->sbnr.min,
2498                               card->sbnr.init, card->sbnr.max);
2499        if (!left--)
2500                return sprintf(page, "Large  %5d  %5d  %5d  %5d \n",
2501                               ns_stat_lfbqc_get(stat), card->lbnr.min,
2502                               card->lbnr.init, card->lbnr.max);
2503        if (!left--)
2504                return sprintf(page, "Huge   %5d  %5d  %5d  %5d \n",
2505                               card->hbpool.count, card->hbnr.min,
2506                               card->hbnr.init, card->hbnr.max);
2507        if (!left--)
2508                return sprintf(page, "Iovec  %5d  %5d  %5d  %5d \n",
2509                               card->iovpool.count, card->iovnr.min,
2510                               card->iovnr.init, card->iovnr.max);
2511        if (!left--) {
2512                int retval;
2513                retval =
2514                    sprintf(page, "Interrupt counter: %u \n", card->intcnt);
2515                card->intcnt = 0;
2516                return retval;
2517        }
2518#if 0
2519        /* Dump 25.6 Mbps PHY registers */
2520        /* Now there's a 25.6 Mbps PHY driver this code isn't needed. I left it
2521           here just in case it's needed for debugging. */
2522        if (card->max_pcr == ATM_25_PCR && !left--) {
2523                u32 phy_regs[4];
2524                u32 i;
2525
2526                for (i = 0; i < 4; i++) {
2527                        while (CMD_BUSY(card)) ;
2528                        writel(NS_CMD_READ_UTILITY | 0x00000200 | i,
2529                               card->membase + CMD);
2530                        while (CMD_BUSY(card)) ;
2531                        phy_regs[i] = readl(card->membase + DR0) & 0x000000FF;
2532                }
2533
2534                return sprintf(page, "PHY regs: 0x%02X 0x%02X 0x%02X 0x%02X \n",
2535                               phy_regs[0], phy_regs[1], phy_regs[2],
2536                               phy_regs[3]);
2537        }
2538#endif /* 0 - Dump 25.6 Mbps PHY registers */
2539#if 0
2540        /* Dump TST */
2541        if (left-- < NS_TST_NUM_ENTRIES) {
2542                if (card->tste2vc[left + 1] == NULL)
2543                        return sprintf(page, "%5d - VBR/UBR \n", left + 1);
2544                else
2545                        return sprintf(page, "%5d - %d %d \n", left + 1,
2546                                       card->tste2vc[left + 1]->tx_vcc->vpi,
2547                                       card->tste2vc[left + 1]->tx_vcc->vci);
2548        }
2549#endif /* 0 */
2550        return 0;
2551}
2552
2553static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg)
2554{
2555        ns_dev *card;
2556        pool_levels pl;
2557        long btype;
2558        unsigned long flags;
2559
2560        card = dev->dev_data;
2561        switch (cmd) {
2562        case NS_GETPSTAT:
2563                if (get_user
2564                    (pl.buftype, &((pool_levels __user *) arg)->buftype))
2565                        return -EFAULT;
2566                switch (pl.buftype) {
2567                case NS_BUFTYPE_SMALL:
2568                        pl.count =
2569                            ns_stat_sfbqc_get(readl(card->membase + STAT));
2570                        pl.level.min = card->sbnr.min;
2571                        pl.level.init = card->sbnr.init;
2572                        pl.level.max = card->sbnr.max;
2573                        break;
2574
2575                case NS_BUFTYPE_LARGE:
2576                        pl.count =
2577                            ns_stat_lfbqc_get(readl(card->membase + STAT));
2578                        pl.level.min = card->lbnr.min;
2579                        pl.level.init = card->lbnr.init;
2580                        pl.level.max = card->lbnr.max;
2581                        break;
2582
2583                case NS_BUFTYPE_HUGE:
2584                        pl.count = card->hbpool.count;
2585                        pl.level.min = card->hbnr.min;
2586                        pl.level.init = card->hbnr.init;
2587                        pl.level.max = card->hbnr.max;
2588                        break;
2589
2590                case NS_BUFTYPE_IOVEC:
2591                        pl.count = card->iovpool.count;
2592                        pl.level.min = card->iovnr.min;
2593                        pl.level.init = card->iovnr.init;
2594                        pl.level.max = card->iovnr.max;
2595                        break;
2596
2597                default:
2598                        return -ENOIOCTLCMD;
2599
2600                }
2601                if (!copy_to_user((pool_levels __user *) arg, &pl, sizeof(pl)))
2602                        return (sizeof(pl));
2603                else
2604                        return -EFAULT;
2605
2606        case NS_SETBUFLEV:
2607                if (!capable(CAP_NET_ADMIN))
2608                        return -EPERM;
2609                if (copy_from_user(&pl, (pool_levels __user *) arg, sizeof(pl)))
2610                        return -EFAULT;
2611                if (pl.level.min >= pl.level.init
2612                    || pl.level.init >= pl.level.max)
2613                        return -EINVAL;
2614                if (pl.level.min == 0)
2615                        return -EINVAL;
2616                switch (pl.buftype) {
2617                case NS_BUFTYPE_SMALL:
2618                        if (pl.level.max > TOP_SB)
2619                                return -EINVAL;
2620                        card->sbnr.min = pl.level.min;
2621                        card->sbnr.init = pl.level.init;
2622                        card->sbnr.max = pl.level.max;
2623                        break;
2624
2625                case NS_BUFTYPE_LARGE:
2626                        if (pl.level.max > TOP_LB)
2627                                return -EINVAL;
2628                        card->lbnr.min = pl.level.min;
2629                        card->lbnr.init = pl.level.init;
2630                        card->lbnr.max = pl.level.max;
2631                        break;
2632
2633                case NS_BUFTYPE_HUGE:
2634                        if (pl.level.max > TOP_HB)
2635                                return -EINVAL;
2636                        card->hbnr.min = pl.level.min;
2637                        card->hbnr.init = pl.level.init;
2638                        card->hbnr.max = pl.level.max;
2639                        break;
2640
2641                case NS_BUFTYPE_IOVEC:
2642                        if (pl.level.max > TOP_IOVB)
2643                                return -EINVAL;
2644                        card->iovnr.min = pl.level.min;
2645                        card->iovnr.init = pl.level.init;
2646                        card->iovnr.max = pl.level.max;
2647                        break;
2648
2649                default:
2650                        return -EINVAL;
2651
2652                }
2653                return 0;
2654
2655        case NS_ADJBUFLEV:
2656                if (!capable(CAP_NET_ADMIN))
2657                        return -EPERM;
2658                btype = (long)arg;      /* a long is the same size as a pointer or bigger */
2659                switch (btype) {
2660                case NS_BUFTYPE_SMALL:
2661                        while (card->sbfqc < card->sbnr.init) {
2662                                struct sk_buff *sb;
2663
2664                                sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
2665                                if (sb == NULL)
2666                                        return -ENOMEM;
2667                                NS_PRV_BUFTYPE(sb) = BUF_SM;
2668                                skb_queue_tail(&card->sbpool.queue, sb);
2669                                skb_reserve(sb, NS_AAL0_HEADER);
2670                                push_rxbufs(card, sb);
2671                        }
2672                        break;
2673
2674                case NS_BUFTYPE_LARGE:
2675                        while (card->lbfqc < card->lbnr.init) {
2676                                struct sk_buff *lb;
2677
2678                                lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
2679                                if (lb == NULL)
2680                                        return -ENOMEM;
2681                                NS_PRV_BUFTYPE(lb) = BUF_LG;
2682                                skb_queue_tail(&card->lbpool.queue, lb);
2683                                skb_reserve(lb, NS_SMBUFSIZE);
2684                                push_rxbufs(card, lb);
2685                        }
2686                        break;
2687
2688                case NS_BUFTYPE_HUGE:
2689                        while (card->hbpool.count > card->hbnr.init) {
2690                                struct sk_buff *hb;
2691
2692                                spin_lock_irqsave(&card->int_lock, flags);
2693                                hb = skb_dequeue(&card->hbpool.queue);
2694                                card->hbpool.count--;
2695                                spin_unlock_irqrestore(&card->int_lock, flags);
2696                                if (hb == NULL)
2697                                        printk
2698                                            ("nicstar%d: huge buffer count inconsistent.\n",
2699                                             card->index);
2700                                else
2701                                        dev_kfree_skb_any(hb);
2702
2703                        }
2704                        while (card->hbpool.count < card->hbnr.init) {
2705                                struct sk_buff *hb;
2706
2707                                hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
2708                                if (hb == NULL)
2709                                        return -ENOMEM;
2710                                NS_PRV_BUFTYPE(hb) = BUF_NONE;
2711                                spin_lock_irqsave(&card->int_lock, flags);
2712                                skb_queue_tail(&card->hbpool.queue, hb);
2713                                card->hbpool.count++;
2714                                spin_unlock_irqrestore(&card->int_lock, flags);
2715                        }
2716                        break;
2717
2718                case NS_BUFTYPE_IOVEC:
2719                        while (card->iovpool.count > card->iovnr.init) {
2720                                struct sk_buff *iovb;
2721
2722                                spin_lock_irqsave(&card->int_lock, flags);
2723                                iovb = skb_dequeue(&card->iovpool.queue);
2724                                card->iovpool.count--;
2725                                spin_unlock_irqrestore(&card->int_lock, flags);
2726                                if (iovb == NULL)
2727                                        printk
2728                                            ("nicstar%d: iovec buffer count inconsistent.\n",
2729                                             card->index);
2730                                else
2731                                        dev_kfree_skb_any(iovb);
2732
2733                        }
2734                        while (card->iovpool.count < card->iovnr.init) {
2735                                struct sk_buff *iovb;
2736
2737                                iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
2738                                if (iovb == NULL)
2739                                        return -ENOMEM;
2740                                NS_PRV_BUFTYPE(iovb) = BUF_NONE;
2741                                spin_lock_irqsave(&card->int_lock, flags);
2742                                skb_queue_tail(&card->iovpool.queue, iovb);
2743                                card->iovpool.count++;
2744                                spin_unlock_irqrestore(&card->int_lock, flags);
2745                        }
2746                        break;
2747
2748                default:
2749                        return -EINVAL;
2750
2751                }
2752                return 0;
2753
2754        default:
2755                if (dev->phy && dev->phy->ioctl) {
2756                        return dev->phy->ioctl(dev, cmd, arg);
2757                } else {
2758                        printk("nicstar%d: %s == NULL \n", card->index,
2759                               dev->phy ? "dev->phy->ioctl" : "dev->phy");
2760                        return -ENOIOCTLCMD;
2761                }
2762        }
2763}
2764
2765#ifdef EXTRA_DEBUG
2766static void which_list(ns_dev * card, struct sk_buff *skb)
2767{
2768        printk("skb buf_type: 0x%08x\n", NS_PRV_BUFTYPE(skb));
2769}
2770#endif /* EXTRA_DEBUG */
2771
2772static void ns_poll(unsigned long arg)
2773{
2774        int i;
2775        ns_dev *card;
2776        unsigned long flags;
2777        u32 stat_r, stat_w;
2778
2779        PRINTK("nicstar: Entering ns_poll().\n");
2780        for (i = 0; i < num_cards; i++) {
2781                card = cards[i];
2782                if (spin_is_locked(&card->int_lock)) {
2783                        /* Probably it isn't worth spinning */
2784                        continue;
2785                }
2786                spin_lock_irqsave(&card->int_lock, flags);
2787
2788                stat_w = 0;
2789                stat_r = readl(card->membase + STAT);
2790                if (stat_r & NS_STAT_TSIF)
2791                        stat_w |= NS_STAT_TSIF;
2792                if (stat_r & NS_STAT_EOPDU)
2793                        stat_w |= NS_STAT_EOPDU;
2794
2795                process_tsq(card);
2796                process_rsq(card);
2797
2798                writel(stat_w, card->membase + STAT);
2799                spin_unlock_irqrestore(&card->int_lock, flags);
2800        }
2801        mod_timer(&ns_timer, jiffies + NS_POLL_PERIOD);
2802        PRINTK("nicstar: Leaving ns_poll().\n");
2803}
2804
2805static int ns_parse_mac(char *mac, unsigned char *esi)
2806{
2807        int i, j;
2808        short byte1, byte0;
2809
2810        if (mac == NULL || esi == NULL)
2811                return -1;
2812        j = 0;
2813        for (i = 0; i < 6; i++) {
2814                if ((byte1 = hex_to_bin(mac[j++])) < 0)
2815                        return -1;
2816                if ((byte0 = hex_to_bin(mac[j++])) < 0)
2817                        return -1;
2818                esi[i] = (unsigned char)(byte1 * 16 + byte0);
2819                if (i < 5) {
2820                        if (mac[j++] != ':')
2821                                return -1;
2822                }
2823        }
2824        return 0;
2825}
2826
2827
2828static void ns_phy_put(struct atm_dev *dev, unsigned char value,
2829                       unsigned long addr)
2830{
2831        ns_dev *card;
2832        unsigned long flags;
2833
2834        card = dev->dev_data;
2835        spin_lock_irqsave(&card->res_lock, flags);
2836        while (CMD_BUSY(card)) ;
2837        writel((u32) value, card->membase + DR0);
2838        writel(NS_CMD_WRITE_UTILITY | 0x00000200 | (addr & 0x000000FF),
2839               card->membase + CMD);
2840        spin_unlock_irqrestore(&card->res_lock, flags);
2841}
2842
2843static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr)
2844{
2845        ns_dev *card;
2846        unsigned long flags;
2847        u32 data;
2848
2849        card = dev->dev_data;
2850        spin_lock_irqsave(&card->res_lock, flags);
2851        while (CMD_BUSY(card)) ;
2852        writel(NS_CMD_READ_UTILITY | 0x00000200 | (addr & 0x000000FF),
2853               card->membase + CMD);
2854        while (CMD_BUSY(card)) ;
2855        data = readl(card->membase + DR0) & 0x000000FF;
2856        spin_unlock_irqrestore(&card->res_lock, flags);
2857        return (unsigned char)data;
2858}
2859
2860module_init(nicstar_init);
2861module_exit(nicstar_cleanup);
2862
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.