linux/drivers/media/rc/nuvoton-cir.c
<<
>>
Prefs
   1/*
   2 * Driver for Nuvoton Technology Corporation w83667hg/w83677hg-i CIR
   3 *
   4 * Copyright (C) 2010 Jarod Wilson <jarod@redhat.com>
   5 * Copyright (C) 2009 Nuvoton PS Team
   6 *
   7 * Special thanks to Nuvoton for providing hardware, spec sheets and
   8 * sample code upon which portions of this driver are based. Indirect
   9 * thanks also to Maxim Levitsky, whose ene_ir driver this driver is
  10 * modeled after.
  11 *
  12 * This program is free software; you can redistribute it and/or
  13 * modify it under the terms of the GNU General Public License as
  14 * published by the Free Software Foundation; either version 2 of the
  15 * License, or (at your option) any later version.
  16 *
  17 * This program is distributed in the hope that it will be useful, but
  18 * WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  20 * General Public License for more details.
  21 *
  22 * You should have received a copy of the GNU General Public License
  23 * along with this program; if not, write to the Free Software
  24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  25 * USA
  26 */
  27
  28#include <linux/kernel.h>
  29#include <linux/module.h>
  30#include <linux/pnp.h>
  31#include <linux/io.h>
  32#include <linux/interrupt.h>
  33#include <linux/sched.h>
  34#include <linux/slab.h>
  35#include <media/rc-core.h>
  36#include <linux/pci_ids.h>
  37
  38#include "nuvoton-cir.h"
  39
  40/* write val to config reg */
  41static inline void nvt_cr_write(struct nvt_dev *nvt, u8 val, u8 reg)
  42{
  43        outb(reg, nvt->cr_efir);
  44        outb(val, nvt->cr_efdr);
  45}
  46
  47/* read val from config reg */
  48static inline u8 nvt_cr_read(struct nvt_dev *nvt, u8 reg)
  49{
  50        outb(reg, nvt->cr_efir);
  51        return inb(nvt->cr_efdr);
  52}
  53
  54/* update config register bit without changing other bits */
  55static inline void nvt_set_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg)
  56{
  57        u8 tmp = nvt_cr_read(nvt, reg) | val;
  58        nvt_cr_write(nvt, tmp, reg);
  59}
  60
  61/* clear config register bit without changing other bits */
  62static inline void nvt_clear_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg)
  63{
  64        u8 tmp = nvt_cr_read(nvt, reg) & ~val;
  65        nvt_cr_write(nvt, tmp, reg);
  66}
  67
  68/* enter extended function mode */
  69static inline void nvt_efm_enable(struct nvt_dev *nvt)
  70{
  71        /* Enabling Extended Function Mode explicitly requires writing 2x */
  72        outb(EFER_EFM_ENABLE, nvt->cr_efir);
  73        outb(EFER_EFM_ENABLE, nvt->cr_efir);
  74}
  75
  76/* exit extended function mode */
  77static inline void nvt_efm_disable(struct nvt_dev *nvt)
  78{
  79        outb(EFER_EFM_DISABLE, nvt->cr_efir);
  80}
  81
  82/*
  83 * When you want to address a specific logical device, write its logical
  84 * device number to CR_LOGICAL_DEV_SEL, then enable/disable by writing
  85 * 0x1/0x0 respectively to CR_LOGICAL_DEV_EN.
  86 */
  87static inline void nvt_select_logical_dev(struct nvt_dev *nvt, u8 ldev)
  88{
  89        outb(CR_LOGICAL_DEV_SEL, nvt->cr_efir);
  90        outb(ldev, nvt->cr_efdr);
  91}
  92
  93/* write val to cir config register */
  94static inline void nvt_cir_reg_write(struct nvt_dev *nvt, u8 val, u8 offset)
  95{
  96        outb(val, nvt->cir_addr + offset);
  97}
  98
  99/* read val from cir config register */
 100static u8 nvt_cir_reg_read(struct nvt_dev *nvt, u8 offset)
 101{
 102        u8 val;
 103
 104        val = inb(nvt->cir_addr + offset);
 105
 106        return val;
 107}
 108
 109/* write val to cir wake register */
 110static inline void nvt_cir_wake_reg_write(struct nvt_dev *nvt,
 111                                          u8 val, u8 offset)
 112{
 113        outb(val, nvt->cir_wake_addr + offset);
 114}
 115
 116/* read val from cir wake config register */
 117static u8 nvt_cir_wake_reg_read(struct nvt_dev *nvt, u8 offset)
 118{
 119        u8 val;
 120
 121        val = inb(nvt->cir_wake_addr + offset);
 122
 123        return val;
 124}
 125
 126#define pr_reg(text, ...) \
 127        printk(KERN_INFO KBUILD_MODNAME ": " text, ## __VA_ARGS__)
 128
 129/* dump current cir register contents */
 130static void cir_dump_regs(struct nvt_dev *nvt)
 131{
 132        nvt_efm_enable(nvt);
 133        nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
 134
 135        pr_reg("%s: Dump CIR logical device registers:\n", NVT_DRIVER_NAME);
 136        pr_reg(" * CR CIR ACTIVE :   0x%x\n",
 137               nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
 138        pr_reg(" * CR CIR BASE ADDR: 0x%x\n",
 139               (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
 140                nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
 141        pr_reg(" * CR CIR IRQ NUM:   0x%x\n",
 142               nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
 143
 144        nvt_efm_disable(nvt);
 145
 146        pr_reg("%s: Dump CIR registers:\n", NVT_DRIVER_NAME);
 147        pr_reg(" * IRCON:     0x%x\n", nvt_cir_reg_read(nvt, CIR_IRCON));
 148        pr_reg(" * IRSTS:     0x%x\n", nvt_cir_reg_read(nvt, CIR_IRSTS));
 149        pr_reg(" * IREN:      0x%x\n", nvt_cir_reg_read(nvt, CIR_IREN));
 150        pr_reg(" * RXFCONT:   0x%x\n", nvt_cir_reg_read(nvt, CIR_RXFCONT));
 151        pr_reg(" * CP:        0x%x\n", nvt_cir_reg_read(nvt, CIR_CP));
 152        pr_reg(" * CC:        0x%x\n", nvt_cir_reg_read(nvt, CIR_CC));
 153        pr_reg(" * SLCH:      0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCH));
 154        pr_reg(" * SLCL:      0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCL));
 155        pr_reg(" * FIFOCON:   0x%x\n", nvt_cir_reg_read(nvt, CIR_FIFOCON));
 156        pr_reg(" * IRFIFOSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFIFOSTS));
 157        pr_reg(" * SRXFIFO:   0x%x\n", nvt_cir_reg_read(nvt, CIR_SRXFIFO));
 158        pr_reg(" * TXFCONT:   0x%x\n", nvt_cir_reg_read(nvt, CIR_TXFCONT));
 159        pr_reg(" * STXFIFO:   0x%x\n", nvt_cir_reg_read(nvt, CIR_STXFIFO));
 160        pr_reg(" * FCCH:      0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCH));
 161        pr_reg(" * FCCL:      0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCL));
 162        pr_reg(" * IRFSM:     0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFSM));
 163}
 164
 165/* dump current cir wake register contents */
 166static void cir_wake_dump_regs(struct nvt_dev *nvt)
 167{
 168        u8 i, fifo_len;
 169
 170        nvt_efm_enable(nvt);
 171        nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
 172
 173        pr_reg("%s: Dump CIR WAKE logical device registers:\n",
 174               NVT_DRIVER_NAME);
 175        pr_reg(" * CR CIR WAKE ACTIVE :   0x%x\n",
 176               nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
 177        pr_reg(" * CR CIR WAKE BASE ADDR: 0x%x\n",
 178               (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
 179                nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
 180        pr_reg(" * CR CIR WAKE IRQ NUM:   0x%x\n",
 181               nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
 182
 183        nvt_efm_disable(nvt);
 184
 185        pr_reg("%s: Dump CIR WAKE registers\n", NVT_DRIVER_NAME);
 186        pr_reg(" * IRCON:          0x%x\n",
 187               nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON));
 188        pr_reg(" * IRSTS:          0x%x\n",
 189               nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS));
 190        pr_reg(" * IREN:           0x%x\n",
 191               nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN));
 192        pr_reg(" * FIFO CMP DEEP:  0x%x\n",
 193               nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_DEEP));
 194        pr_reg(" * FIFO CMP TOL:   0x%x\n",
 195               nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_TOL));
 196        pr_reg(" * FIFO COUNT:     0x%x\n",
 197               nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT));
 198        pr_reg(" * SLCH:           0x%x\n",
 199               nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCH));
 200        pr_reg(" * SLCL:           0x%x\n",
 201               nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCL));
 202        pr_reg(" * FIFOCON:        0x%x\n",
 203               nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON));
 204        pr_reg(" * SRXFSTS:        0x%x\n",
 205               nvt_cir_wake_reg_read(nvt, CIR_WAKE_SRXFSTS));
 206        pr_reg(" * SAMPLE RX FIFO: 0x%x\n",
 207               nvt_cir_wake_reg_read(nvt, CIR_WAKE_SAMPLE_RX_FIFO));
 208        pr_reg(" * WR FIFO DATA:   0x%x\n",
 209               nvt_cir_wake_reg_read(nvt, CIR_WAKE_WR_FIFO_DATA));
 210        pr_reg(" * RD FIFO ONLY:   0x%x\n",
 211               nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
 212        pr_reg(" * RD FIFO ONLY IDX: 0x%x\n",
 213               nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX));
 214        pr_reg(" * FIFO IGNORE:    0x%x\n",
 215               nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_IGNORE));
 216        pr_reg(" * IRFSM:          0x%x\n",
 217               nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRFSM));
 218
 219        fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT);
 220        pr_reg("%s: Dump CIR WAKE FIFO (len %d)\n", NVT_DRIVER_NAME, fifo_len);
 221        pr_reg("* Contents = ");
 222        for (i = 0; i < fifo_len; i++)
 223                printk(KERN_CONT "%02x ",
 224                       nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
 225        printk(KERN_CONT "\n");
 226}
 227
 228/* detect hardware features */
 229static int nvt_hw_detect(struct nvt_dev *nvt)
 230{
 231        unsigned long flags;
 232        u8 chip_major, chip_minor;
 233        int ret = 0;
 234        char chip_id[12];
 235        bool chip_unknown = false;
 236
 237        nvt_efm_enable(nvt);
 238
 239        /* Check if we're wired for the alternate EFER setup */
 240        chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
 241        if (chip_major == 0xff) {
 242                nvt->cr_efir = CR_EFIR2;
 243                nvt->cr_efdr = CR_EFDR2;
 244                nvt_efm_enable(nvt);
 245                chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
 246        }
 247
 248        chip_minor = nvt_cr_read(nvt, CR_CHIP_ID_LO);
 249
 250        /* these are the known working chip revisions... */
 251        switch (chip_major) {
 252        case CHIP_ID_HIGH_667:
 253                strcpy(chip_id, "w83667hg\0");
 254                if (chip_minor != CHIP_ID_LOW_667)
 255                        chip_unknown = true;
 256                break;
 257        case CHIP_ID_HIGH_677B:
 258                strcpy(chip_id, "w83677hg\0");
 259                if (chip_minor != CHIP_ID_LOW_677B2 &&
 260                    chip_minor != CHIP_ID_LOW_677B3)
 261                        chip_unknown = true;
 262                break;
 263        case CHIP_ID_HIGH_677C:
 264                strcpy(chip_id, "w83677hg-c\0");
 265                if (chip_minor != CHIP_ID_LOW_677C)
 266                        chip_unknown = true;
 267                break;
 268        default:
 269                strcpy(chip_id, "w836x7hg\0");
 270                chip_unknown = true;
 271                break;
 272        }
 273
 274        /* warn, but still let the driver load, if we don't know this chip */
 275        if (chip_unknown)
 276                nvt_pr(KERN_WARNING, "%s: unknown chip, id: 0x%02x 0x%02x, "
 277                       "it may not work...", chip_id, chip_major, chip_minor);
 278        else
 279                nvt_dbg("%s: chip id: 0x%02x 0x%02x",
 280                        chip_id, chip_major, chip_minor);
 281
 282        nvt_efm_disable(nvt);
 283
 284        spin_lock_irqsave(&nvt->nvt_lock, flags);
 285        nvt->chip_major = chip_major;
 286        nvt->chip_minor = chip_minor;
 287        spin_unlock_irqrestore(&nvt->nvt_lock, flags);
 288
 289        return ret;
 290}
 291
 292static void nvt_cir_ldev_init(struct nvt_dev *nvt)
 293{
 294        u8 val, psreg, psmask, psval;
 295
 296        if (nvt->chip_major == CHIP_ID_HIGH_667) {
 297                psreg = CR_MULTIFUNC_PIN_SEL;
 298                psmask = MULTIFUNC_PIN_SEL_MASK;
 299                psval = MULTIFUNC_ENABLE_CIR | MULTIFUNC_ENABLE_CIRWB;
 300        } else {
 301                psreg = CR_OUTPUT_PIN_SEL;
 302                psmask = OUTPUT_PIN_SEL_MASK;
 303                psval = OUTPUT_ENABLE_CIR | OUTPUT_ENABLE_CIRWB;
 304        }
 305
 306        /* output pin selection: enable CIR, with WB sensor enabled */
 307        val = nvt_cr_read(nvt, psreg);
 308        val &= psmask;
 309        val |= psval;
 310        nvt_cr_write(nvt, val, psreg);
 311
 312        /* Select CIR logical device and enable */
 313        nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
 314        nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
 315
 316        nvt_cr_write(nvt, nvt->cir_addr >> 8, CR_CIR_BASE_ADDR_HI);
 317        nvt_cr_write(nvt, nvt->cir_addr & 0xff, CR_CIR_BASE_ADDR_LO);
 318
 319        nvt_cr_write(nvt, nvt->cir_irq, CR_CIR_IRQ_RSRC);
 320
 321        nvt_dbg("CIR initialized, base io port address: 0x%lx, irq: %d",
 322                nvt->cir_addr, nvt->cir_irq);
 323}
 324
 325static void nvt_cir_wake_ldev_init(struct nvt_dev *nvt)
 326{
 327        /* Select ACPI logical device, enable it and CIR Wake */
 328        nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
 329        nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
 330
 331        /* Enable CIR Wake via PSOUT# (Pin60) */
 332        nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
 333
 334        /* enable cir interrupt of mouse/keyboard IRQ event */
 335        nvt_set_reg_bit(nvt, CIR_INTR_MOUSE_IRQ_BIT, CR_ACPI_IRQ_EVENTS);
 336
 337        /* enable pme interrupt of cir wakeup event */
 338        nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
 339
 340        /* Select CIR Wake logical device and enable */
 341        nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
 342        nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
 343
 344        nvt_cr_write(nvt, nvt->cir_wake_addr >> 8, CR_CIR_BASE_ADDR_HI);
 345        nvt_cr_write(nvt, nvt->cir_wake_addr & 0xff, CR_CIR_BASE_ADDR_LO);
 346
 347        nvt_cr_write(nvt, nvt->cir_wake_irq, CR_CIR_IRQ_RSRC);
 348
 349        nvt_dbg("CIR Wake initialized, base io port address: 0x%lx, irq: %d",
 350                nvt->cir_wake_addr, nvt->cir_wake_irq);
 351}
 352
 353/* clear out the hardware's cir rx fifo */
 354static void nvt_clear_cir_fifo(struct nvt_dev *nvt)
 355{
 356        u8 val;
 357
 358        val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
 359        nvt_cir_reg_write(nvt, val | CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON);
 360}
 361
 362/* clear out the hardware's cir wake rx fifo */
 363static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt)
 364{
 365        u8 val;
 366
 367        val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON);
 368        nvt_cir_wake_reg_write(nvt, val | CIR_WAKE_FIFOCON_RXFIFOCLR,
 369                               CIR_WAKE_FIFOCON);
 370}
 371
 372/* clear out the hardware's cir tx fifo */
 373static void nvt_clear_tx_fifo(struct nvt_dev *nvt)
 374{
 375        u8 val;
 376
 377        val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
 378        nvt_cir_reg_write(nvt, val | CIR_FIFOCON_TXFIFOCLR, CIR_FIFOCON);
 379}
 380
 381/* enable RX Trigger Level Reach and Packet End interrupts */
 382static void nvt_set_cir_iren(struct nvt_dev *nvt)
 383{
 384        u8 iren;
 385
 386        iren = CIR_IREN_RTR | CIR_IREN_PE;
 387        nvt_cir_reg_write(nvt, iren, CIR_IREN);
 388}
 389
 390static void nvt_cir_regs_init(struct nvt_dev *nvt)
 391{
 392        /* set sample limit count (PE interrupt raised when reached) */
 393        nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT >> 8, CIR_SLCH);
 394        nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT & 0xff, CIR_SLCL);
 395
 396        /* set fifo irq trigger levels */
 397        nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV |
 398                          CIR_FIFOCON_RX_TRIGGER_LEV, CIR_FIFOCON);
 399
 400        /*
 401         * Enable TX and RX, specify carrier on = low, off = high, and set
 402         * sample period (currently 50us)
 403         */
 404        nvt_cir_reg_write(nvt,
 405                          CIR_IRCON_TXEN | CIR_IRCON_RXEN |
 406                          CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL,
 407                          CIR_IRCON);
 408
 409        /* clear hardware rx and tx fifos */
 410        nvt_clear_cir_fifo(nvt);
 411        nvt_clear_tx_fifo(nvt);
 412
 413        /* clear any and all stray interrupts */
 414        nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
 415
 416        /* and finally, enable interrupts */
 417        nvt_set_cir_iren(nvt);
 418}
 419
 420static void nvt_cir_wake_regs_init(struct nvt_dev *nvt)
 421{
 422        /* set number of bytes needed for wake from s3 (default 65) */
 423        nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFO_CMP_BYTES,
 424                               CIR_WAKE_FIFO_CMP_DEEP);
 425
 426        /* set tolerance/variance allowed per byte during wake compare */
 427        nvt_cir_wake_reg_write(nvt, CIR_WAKE_CMP_TOLERANCE,
 428                               CIR_WAKE_FIFO_CMP_TOL);
 429
 430        /* set sample limit count (PE interrupt raised when reached) */
 431        nvt_cir_wake_reg_write(nvt, CIR_RX_LIMIT_COUNT >> 8, CIR_WAKE_SLCH);
 432        nvt_cir_wake_reg_write(nvt, CIR_RX_LIMIT_COUNT & 0xff, CIR_WAKE_SLCL);
 433
 434        /* set cir wake fifo rx trigger level (currently 67) */
 435        nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFOCON_RX_TRIGGER_LEV,
 436                               CIR_WAKE_FIFOCON);
 437
 438        /*
 439         * Enable TX and RX, specific carrier on = low, off = high, and set
 440         * sample period (currently 50us)
 441         */
 442        nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN |
 443                               CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
 444                               CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
 445                               CIR_WAKE_IRCON);
 446
 447        /* clear cir wake rx fifo */
 448        nvt_clear_cir_wake_fifo(nvt);
 449
 450        /* clear any and all stray interrupts */
 451        nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
 452}
 453
 454static void nvt_enable_wake(struct nvt_dev *nvt)
 455{
 456        nvt_efm_enable(nvt);
 457
 458        nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
 459        nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
 460        nvt_set_reg_bit(nvt, CIR_INTR_MOUSE_IRQ_BIT, CR_ACPI_IRQ_EVENTS);
 461        nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
 462
 463        nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
 464        nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
 465
 466        nvt_efm_disable(nvt);
 467
 468        nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN |
 469                               CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
 470                               CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
 471                               CIR_WAKE_IRCON);
 472        nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
 473        nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
 474}
 475
 476/* rx carrier detect only works in learning mode, must be called w/nvt_lock */
 477static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
 478{
 479        u32 count, carrier, duration = 0;
 480        int i;
 481
 482        count = nvt_cir_reg_read(nvt, CIR_FCCL) |
 483                nvt_cir_reg_read(nvt, CIR_FCCH) << 8;
 484
 485        for (i = 0; i < nvt->pkts; i++) {
 486                if (nvt->buf[i] & BUF_PULSE_BIT)
 487                        duration += nvt->buf[i] & BUF_LEN_MASK;
 488        }
 489
 490        duration *= SAMPLE_PERIOD;
 491
 492        if (!count || !duration) {
 493                nvt_pr(KERN_NOTICE, "Unable to determine carrier! (c:%u, d:%u)",
 494                       count, duration);
 495                return 0;
 496        }
 497
 498        carrier = MS_TO_NS(count) / duration;
 499
 500        if ((carrier > MAX_CARRIER) || (carrier < MIN_CARRIER))
 501                nvt_dbg("WTF? Carrier frequency out of range!");
 502
 503        nvt_dbg("Carrier frequency: %u (count %u, duration %u)",
 504                carrier, count, duration);
 505
 506        return carrier;
 507}
 508
 509/*
 510 * set carrier frequency
 511 *
 512 * set carrier on 2 registers: CP & CC
 513 * always set CP as 0x81
 514 * set CC by SPEC, CC = 3MHz/carrier - 1
 515 */
 516static int nvt_set_tx_carrier(struct rc_dev *dev, u32 carrier)
 517{
 518        struct nvt_dev *nvt = dev->priv;
 519        u16 val;
 520
 521        nvt_cir_reg_write(nvt, 1, CIR_CP);
 522        val = 3000000 / (carrier) - 1;
 523        nvt_cir_reg_write(nvt, val & 0xff, CIR_CC);
 524
 525        nvt_dbg("cp: 0x%x cc: 0x%x\n",
 526                nvt_cir_reg_read(nvt, CIR_CP), nvt_cir_reg_read(nvt, CIR_CC));
 527
 528        return 0;
 529}
 530
 531/*
 532 * nvt_tx_ir
 533 *
 534 * 1) clean TX fifo first (handled by AP)
 535 * 2) copy data from user space
 536 * 3) disable RX interrupts, enable TX interrupts: TTR & TFU
 537 * 4) send 9 packets to TX FIFO to open TTR
 538 * in interrupt_handler:
 539 * 5) send all data out
 540 * go back to write():
 541 * 6) disable TX interrupts, re-enable RX interupts
 542 *
 543 * The key problem of this function is user space data may larger than
 544 * driver's data buf length. So nvt_tx_ir() will only copy TX_BUF_LEN data to
 545 * buf, and keep current copied data buf num in cur_buf_num. But driver's buf
 546 * number may larger than TXFCONT (0xff). So in interrupt_handler, it has to
 547 * set TXFCONT as 0xff, until buf_count less than 0xff.
 548 */
 549static int nvt_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned n)
 550{
 551        struct nvt_dev *nvt = dev->priv;
 552        unsigned long flags;
 553        unsigned int i;
 554        u8 iren;
 555        int ret;
 556
 557        spin_lock_irqsave(&nvt->tx.lock, flags);
 558
 559        ret = min((unsigned)(TX_BUF_LEN / sizeof(unsigned)), n);
 560        nvt->tx.buf_count = (ret * sizeof(unsigned));
 561
 562        memcpy(nvt->tx.buf, txbuf, nvt->tx.buf_count);
 563
 564        nvt->tx.cur_buf_num = 0;
 565
 566        /* save currently enabled interrupts */
 567        iren = nvt_cir_reg_read(nvt, CIR_IREN);
 568
 569        /* now disable all interrupts, save TFU & TTR */
 570        nvt_cir_reg_write(nvt, CIR_IREN_TFU | CIR_IREN_TTR, CIR_IREN);
 571
 572        nvt->tx.tx_state = ST_TX_REPLY;
 573
 574        nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV_8 |
 575                          CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON);
 576
 577        /* trigger TTR interrupt by writing out ones, (yes, it's ugly) */
 578        for (i = 0; i < 9; i++)
 579                nvt_cir_reg_write(nvt, 0x01, CIR_STXFIFO);
 580
 581        spin_unlock_irqrestore(&nvt->tx.lock, flags);
 582
 583        wait_event(nvt->tx.queue, nvt->tx.tx_state == ST_TX_REQUEST);
 584
 585        spin_lock_irqsave(&nvt->tx.lock, flags);
 586        nvt->tx.tx_state = ST_TX_NONE;
 587        spin_unlock_irqrestore(&nvt->tx.lock, flags);
 588
 589        /* restore enabled interrupts to prior state */
 590        nvt_cir_reg_write(nvt, iren, CIR_IREN);
 591
 592        return ret;
 593}
 594
 595/* dump contents of the last rx buffer we got from the hw rx fifo */
 596static void nvt_dump_rx_buf(struct nvt_dev *nvt)
 597{
 598        int i;
 599
 600        printk(KERN_DEBUG "%s (len %d): ", __func__, nvt->pkts);
 601        for (i = 0; (i < nvt->pkts) && (i < RX_BUF_LEN); i++)
 602                printk(KERN_CONT "0x%02x ", nvt->buf[i]);
 603        printk(KERN_CONT "\n");
 604}
 605
 606/*
 607 * Process raw data in rx driver buffer, store it in raw IR event kfifo,
 608 * trigger decode when appropriate.
 609 *
 610 * We get IR data samples one byte at a time. If the msb is set, its a pulse,
 611 * otherwise its a space. The lower 7 bits are the count of SAMPLE_PERIOD
 612 * (default 50us) intervals for that pulse/space. A discrete signal is
 613 * followed by a series of 0x7f packets, then either 0x7<something> or 0x80
 614 * to signal more IR coming (repeats) or end of IR, respectively. We store
 615 * sample data in the raw event kfifo until we see 0x7<something> (except f)
 616 * or 0x80, at which time, we trigger a decode operation.
 617 */
 618static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
 619{
 620        DEFINE_IR_RAW_EVENT(rawir);
 621        u32 carrier;
 622        u8 sample;
 623        int i;
 624
 625        nvt_dbg_verbose("%s firing", __func__);
 626
 627        if (debug)
 628                nvt_dump_rx_buf(nvt);
 629
 630        if (nvt->carrier_detect_enabled)
 631                carrier = nvt_rx_carrier_detect(nvt);
 632
 633        nvt_dbg_verbose("Processing buffer of len %d", nvt->pkts);
 634
 635        init_ir_raw_event(&rawir);
 636
 637        for (i = 0; i < nvt->pkts; i++) {
 638                sample = nvt->buf[i];
 639
 640                rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
 641                rawir.duration = US_TO_NS((sample & BUF_LEN_MASK)
 642                                          * SAMPLE_PERIOD);
 643
 644                nvt_dbg("Storing %s with duration %d",
 645                        rawir.pulse ? "pulse" : "space", rawir.duration);
 646
 647                ir_raw_event_store_with_filter(nvt->rdev, &rawir);
 648
 649                /*
 650                 * BUF_PULSE_BIT indicates end of IR data, BUF_REPEAT_BYTE
 651                 * indicates end of IR signal, but new data incoming. In both
 652                 * cases, it means we're ready to call ir_raw_event_handle
 653                 */
 654                if ((sample == BUF_PULSE_BIT) && (i + 1 < nvt->pkts)) {
 655                        nvt_dbg("Calling ir_raw_event_handle (signal end)\n");
 656                        ir_raw_event_handle(nvt->rdev);
 657                }
 658        }
 659
 660        nvt->pkts = 0;
 661
 662        nvt_dbg("Calling ir_raw_event_handle (buffer empty)\n");
 663        ir_raw_event_handle(nvt->rdev);
 664
 665        nvt_dbg_verbose("%s done", __func__);
 666}
 667
 668static void nvt_handle_rx_fifo_overrun(struct nvt_dev *nvt)
 669{
 670        nvt_pr(KERN_WARNING, "RX FIFO overrun detected, flushing data!");
 671
 672        nvt->pkts = 0;
 673        nvt_clear_cir_fifo(nvt);
 674        ir_raw_event_reset(nvt->rdev);
 675}
 676
 677/* copy data from hardware rx fifo into driver buffer */
 678static void nvt_get_rx_ir_data(struct nvt_dev *nvt)
 679{
 680        unsigned long flags;
 681        u8 fifocount, val;
 682        unsigned int b_idx;
 683        bool overrun = false;
 684        int i;
 685
 686        /* Get count of how many bytes to read from RX FIFO */
 687        fifocount = nvt_cir_reg_read(nvt, CIR_RXFCONT);
 688        /* if we get 0xff, probably means the logical dev is disabled */
 689        if (fifocount == 0xff)
 690                return;
 691        /* watch out for a fifo overrun condition */
 692        else if (fifocount > RX_BUF_LEN) {
 693                overrun = true;
 694                fifocount = RX_BUF_LEN;
 695        }
 696
 697        nvt_dbg("attempting to fetch %u bytes from hw rx fifo", fifocount);
 698
 699        spin_lock_irqsave(&nvt->nvt_lock, flags);
 700
 701        b_idx = nvt->pkts;
 702
 703        /* This should never happen, but lets check anyway... */
 704        if (b_idx + fifocount > RX_BUF_LEN) {
 705                nvt_process_rx_ir_data(nvt);
 706                b_idx = 0;
 707        }
 708
 709        /* Read fifocount bytes from CIR Sample RX FIFO register */
 710        for (i = 0; i < fifocount; i++) {
 711                val = nvt_cir_reg_read(nvt, CIR_SRXFIFO);
 712                nvt->buf[b_idx + i] = val;
 713        }
 714
 715        nvt->pkts += fifocount;
 716        nvt_dbg("%s: pkts now %d", __func__, nvt->pkts);
 717
 718        nvt_process_rx_ir_data(nvt);
 719
 720        if (overrun)
 721                nvt_handle_rx_fifo_overrun(nvt);
 722
 723        spin_unlock_irqrestore(&nvt->nvt_lock, flags);
 724}
 725
 726static void nvt_cir_log_irqs(u8 status, u8 iren)
 727{
 728        nvt_pr(KERN_INFO, "IRQ 0x%02x (IREN 0x%02x) :%s%s%s%s%s%s%s%s%s",
 729                status, iren,
 730                status & CIR_IRSTS_RDR  ? " RDR"        : "",
 731                status & CIR_IRSTS_RTR  ? " RTR"        : "",
 732                status & CIR_IRSTS_PE   ? " PE"         : "",
 733                status & CIR_IRSTS_RFO  ? " RFO"        : "",
 734                status & CIR_IRSTS_TE   ? " TE"         : "",
 735                status & CIR_IRSTS_TTR  ? " TTR"        : "",
 736                status & CIR_IRSTS_TFU  ? " TFU"        : "",
 737                status & CIR_IRSTS_GH   ? " GH"         : "",
 738                status & ~(CIR_IRSTS_RDR | CIR_IRSTS_RTR | CIR_IRSTS_PE |
 739                           CIR_IRSTS_RFO | CIR_IRSTS_TE | CIR_IRSTS_TTR |
 740                           CIR_IRSTS_TFU | CIR_IRSTS_GH) ? " ?" : "");
 741}
 742
 743static bool nvt_cir_tx_inactive(struct nvt_dev *nvt)
 744{
 745        unsigned long flags;
 746        bool tx_inactive;
 747        u8 tx_state;
 748
 749        spin_lock_irqsave(&nvt->tx.lock, flags);
 750        tx_state = nvt->tx.tx_state;
 751        spin_unlock_irqrestore(&nvt->tx.lock, flags);
 752
 753        tx_inactive = (tx_state == ST_TX_NONE);
 754
 755        return tx_inactive;
 756}
 757
 758/* interrupt service routine for incoming and outgoing CIR data */
 759static irqreturn_t nvt_cir_isr(int irq, void *data)
 760{
 761        struct nvt_dev *nvt = data;
 762        u8 status, iren, cur_state;
 763        unsigned long flags;
 764
 765        nvt_dbg_verbose("%s firing", __func__);
 766
 767        nvt_efm_enable(nvt);
 768        nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
 769        nvt_efm_disable(nvt);
 770
 771        /*
 772         * Get IR Status register contents. Write 1 to ack/clear
 773         *
 774         * bit: reg name      - description
 775         *   7: CIR_IRSTS_RDR - RX Data Ready
 776         *   6: CIR_IRSTS_RTR - RX FIFO Trigger Level Reach
 777         *   5: CIR_IRSTS_PE  - Packet End
 778         *   4: CIR_IRSTS_RFO - RX FIFO Overrun (RDR will also be set)
 779         *   3: CIR_IRSTS_TE  - TX FIFO Empty
 780         *   2: CIR_IRSTS_TTR - TX FIFO Trigger Level Reach
 781         *   1: CIR_IRSTS_TFU - TX FIFO Underrun
 782         *   0: CIR_IRSTS_GH  - Min Length Detected
 783         */
 784        status = nvt_cir_reg_read(nvt, CIR_IRSTS);
 785        if (!status) {
 786                nvt_dbg_verbose("%s exiting, IRSTS 0x0", __func__);
 787                nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
 788                return IRQ_RETVAL(IRQ_NONE);
 789        }
 790
 791        /* ack/clear all irq flags we've got */
 792        nvt_cir_reg_write(nvt, status, CIR_IRSTS);
 793        nvt_cir_reg_write(nvt, 0, CIR_IRSTS);
 794
 795        /* Interrupt may be shared with CIR Wake, bail if CIR not enabled */
 796        iren = nvt_cir_reg_read(nvt, CIR_IREN);
 797        if (!iren) {
 798                nvt_dbg_verbose("%s exiting, CIR not enabled", __func__);
 799                return IRQ_RETVAL(IRQ_NONE);
 800        }
 801
 802        if (debug)
 803                nvt_cir_log_irqs(status, iren);
 804
 805        if (status & CIR_IRSTS_RTR) {
 806                /* FIXME: add code for study/learn mode */
 807                /* We only do rx if not tx'ing */
 808                if (nvt_cir_tx_inactive(nvt))
 809                        nvt_get_rx_ir_data(nvt);
 810        }
 811
 812        if (status & CIR_IRSTS_PE) {
 813                if (nvt_cir_tx_inactive(nvt))
 814                        nvt_get_rx_ir_data(nvt);
 815
 816                spin_lock_irqsave(&nvt->nvt_lock, flags);
 817
 818                cur_state = nvt->study_state;
 819
 820                spin_unlock_irqrestore(&nvt->nvt_lock, flags);
 821
 822                if (cur_state == ST_STUDY_NONE)
 823                        nvt_clear_cir_fifo(nvt);
 824        }
 825
 826        if (status & CIR_IRSTS_TE)
 827                nvt_clear_tx_fifo(nvt);
 828
 829        if (status & CIR_IRSTS_TTR) {
 830                unsigned int pos, count;
 831                u8 tmp;
 832
 833                spin_lock_irqsave(&nvt->tx.lock, flags);
 834
 835                pos = nvt->tx.cur_buf_num;
 836                count = nvt->tx.buf_count;
 837
 838                /* Write data into the hardware tx fifo while pos < count */
 839                if (pos < count) {
 840                        nvt_cir_reg_write(nvt, nvt->tx.buf[pos], CIR_STXFIFO);
 841                        nvt->tx.cur_buf_num++;
 842                /* Disable TX FIFO Trigger Level Reach (TTR) interrupt */
 843                } else {
 844                        tmp = nvt_cir_reg_read(nvt, CIR_IREN);
 845                        nvt_cir_reg_write(nvt, tmp & ~CIR_IREN_TTR, CIR_IREN);
 846                }
 847
 848                spin_unlock_irqrestore(&nvt->tx.lock, flags);
 849
 850        }
 851
 852        if (status & CIR_IRSTS_TFU) {
 853                spin_lock_irqsave(&nvt->tx.lock, flags);
 854                if (nvt->tx.tx_state == ST_TX_REPLY) {
 855                        nvt->tx.tx_state = ST_TX_REQUEST;
 856                        wake_up(&nvt->tx.queue);
 857                }
 858                spin_unlock_irqrestore(&nvt->tx.lock, flags);
 859        }
 860
 861        nvt_dbg_verbose("%s done", __func__);
 862        return IRQ_RETVAL(IRQ_HANDLED);
 863}
 864
 865/* Interrupt service routine for CIR Wake */
 866static irqreturn_t nvt_cir_wake_isr(int irq, void *data)
 867{
 868        u8 status, iren, val;
 869        struct nvt_dev *nvt = data;
 870        unsigned long flags;
 871
 872        nvt_dbg_wake("%s firing", __func__);
 873
 874        status = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS);
 875        if (!status)
 876                return IRQ_RETVAL(IRQ_NONE);
 877
 878        if (status & CIR_WAKE_IRSTS_IR_PENDING)
 879                nvt_clear_cir_wake_fifo(nvt);
 880
 881        nvt_cir_wake_reg_write(nvt, status, CIR_WAKE_IRSTS);
 882        nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IRSTS);
 883
 884        /* Interrupt may be shared with CIR, bail if Wake not enabled */
 885        iren = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN);
 886        if (!iren) {
 887                nvt_dbg_wake("%s exiting, wake not enabled", __func__);
 888                return IRQ_RETVAL(IRQ_HANDLED);
 889        }
 890
 891        if ((status & CIR_WAKE_IRSTS_PE) &&
 892            (nvt->wake_state == ST_WAKE_START)) {
 893                while (nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX)) {
 894                        val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY);
 895                        nvt_dbg("setting wake up key: 0x%x", val);
 896                }
 897
 898                nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
 899                spin_lock_irqsave(&nvt->nvt_lock, flags);
 900                nvt->wake_state = ST_WAKE_FINISH;
 901                spin_unlock_irqrestore(&nvt->nvt_lock, flags);
 902        }
 903
 904        nvt_dbg_wake("%s done", __func__);
 905        return IRQ_RETVAL(IRQ_HANDLED);
 906}
 907
 908static void nvt_enable_cir(struct nvt_dev *nvt)
 909{
 910        /* set function enable flags */
 911        nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN |
 912                          CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL,
 913                          CIR_IRCON);
 914
 915        nvt_efm_enable(nvt);
 916
 917        /* enable the CIR logical device */
 918        nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
 919        nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
 920
 921        nvt_efm_disable(nvt);
 922
 923        /* clear all pending interrupts */
 924        nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
 925
 926        /* enable interrupts */
 927        nvt_set_cir_iren(nvt);
 928}
 929
 930static void nvt_disable_cir(struct nvt_dev *nvt)
 931{
 932        /* disable CIR interrupts */
 933        nvt_cir_reg_write(nvt, 0, CIR_IREN);
 934
 935        /* clear any and all pending interrupts */
 936        nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
 937
 938        /* clear all function enable flags */
 939        nvt_cir_reg_write(nvt, 0, CIR_IRCON);
 940
 941        /* clear hardware rx and tx fifos */
 942        nvt_clear_cir_fifo(nvt);
 943        nvt_clear_tx_fifo(nvt);
 944
 945        nvt_efm_enable(nvt);
 946
 947        /* disable the CIR logical device */
 948        nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
 949        nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
 950
 951        nvt_efm_disable(nvt);
 952}
 953
 954static int nvt_open(struct rc_dev *dev)
 955{
 956        struct nvt_dev *nvt = dev->priv;
 957        unsigned long flags;
 958
 959        spin_lock_irqsave(&nvt->nvt_lock, flags);
 960        nvt_enable_cir(nvt);
 961        spin_unlock_irqrestore(&nvt->nvt_lock, flags);
 962
 963        return 0;
 964}
 965
 966static void nvt_close(struct rc_dev *dev)
 967{
 968        struct nvt_dev *nvt = dev->priv;
 969        unsigned long flags;
 970
 971        spin_lock_irqsave(&nvt->nvt_lock, flags);
 972        nvt_disable_cir(nvt);
 973        spin_unlock_irqrestore(&nvt->nvt_lock, flags);
 974}
 975
 976/* Allocate memory, probe hardware, and initialize everything */
 977static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
 978{
 979        struct nvt_dev *nvt;
 980        struct rc_dev *rdev;
 981        int ret = -ENOMEM;
 982
 983        nvt = kzalloc(sizeof(struct nvt_dev), GFP_KERNEL);
 984        if (!nvt)
 985                return ret;
 986
 987        /* input device for IR remote (and tx) */
 988        rdev = rc_allocate_device();
 989        if (!rdev)
 990                goto failure;
 991
 992        ret = -ENODEV;
 993        /* validate pnp resources */
 994        if (!pnp_port_valid(pdev, 0) ||
 995            pnp_port_len(pdev, 0) < CIR_IOREG_LENGTH) {
 996                dev_err(&pdev->dev, "IR PNP Port not valid!\n");
 997                goto failure;
 998        }
 999
1000        if (!pnp_irq_valid(pdev, 0)) {
1001                dev_err(&pdev->dev, "PNP IRQ not valid!\n");
1002                goto failure;
1003        }
1004
1005        if (!pnp_port_valid(pdev, 1) ||
1006            pnp_port_len(pdev, 1) < CIR_IOREG_LENGTH) {
1007                dev_err(&pdev->dev, "Wake PNP Port not valid!\n");
1008                goto failure;
1009        }
1010
1011        nvt->cir_addr = pnp_port_start(pdev, 0);
1012        nvt->cir_irq  = pnp_irq(pdev, 0);
1013
1014        nvt->cir_wake_addr = pnp_port_start(pdev, 1);
1015        /* irq is always shared between cir and cir wake */
1016        nvt->cir_wake_irq  = nvt->cir_irq;
1017
1018        nvt->cr_efir = CR_EFIR;
1019        nvt->cr_efdr = CR_EFDR;
1020
1021        spin_lock_init(&nvt->nvt_lock);
1022        spin_lock_init(&nvt->tx.lock);
1023
1024        ret = -EBUSY;
1025        /* now claim resources */
1026        if (!request_region(nvt->cir_addr,
1027                            CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
1028                goto failure;
1029
1030        if (request_irq(nvt->cir_irq, nvt_cir_isr, IRQF_SHARED,
1031                        NVT_DRIVER_NAME, (void *)nvt))
1032                goto failure;
1033
1034        if (!request_region(nvt->cir_wake_addr,
1035                            CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
1036                goto failure;
1037
1038        if (request_irq(nvt->cir_wake_irq, nvt_cir_wake_isr, IRQF_SHARED,
1039                        NVT_DRIVER_NAME, (void *)nvt))
1040                goto failure;
1041
1042        pnp_set_drvdata(pdev, nvt);
1043        nvt->pdev = pdev;
1044
1045        init_waitqueue_head(&nvt->tx.queue);
1046
1047        ret = nvt_hw_detect(nvt);
1048        if (ret)
1049                goto failure;
1050
1051        /* Initialize CIR & CIR Wake Logical Devices */
1052        nvt_efm_enable(nvt);
1053        nvt_cir_ldev_init(nvt);
1054        nvt_cir_wake_ldev_init(nvt);
1055        nvt_efm_disable(nvt);
1056
1057        /* Initialize CIR & CIR Wake Config Registers */
1058        nvt_cir_regs_init(nvt);
1059        nvt_cir_wake_regs_init(nvt);
1060
1061        /* Set up the rc device */
1062        rdev->priv = nvt;
1063        rdev->driver_type = RC_DRIVER_IR_RAW;
1064        rdev->allowed_protos = RC_TYPE_ALL;
1065        rdev->open = nvt_open;
1066        rdev->close = nvt_close;
1067        rdev->tx_ir = nvt_tx_ir;
1068        rdev->s_tx_carrier = nvt_set_tx_carrier;
1069        rdev->input_name = "Nuvoton w836x7hg Infrared Remote Transceiver";
1070        rdev->input_phys = "nuvoton/cir0";
1071        rdev->input_id.bustype = BUS_HOST;
1072        rdev->input_id.vendor = PCI_VENDOR_ID_WINBOND2;
1073        rdev->input_id.product = nvt->chip_major;
1074        rdev->input_id.version = nvt->chip_minor;
1075        rdev->dev.parent = &pdev->dev;
1076        rdev->driver_name = NVT_DRIVER_NAME;
1077        rdev->map_name = RC_MAP_RC6_MCE;
1078        rdev->timeout = MS_TO_NS(100);
1079        /* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
1080        rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD);
1081#if 0
1082        rdev->min_timeout = XYZ;
1083        rdev->max_timeout = XYZ;
1084        /* tx bits */
1085        rdev->tx_resolution = XYZ;
1086#endif
1087
1088        ret = rc_register_device(rdev);
1089        if (ret)
1090                goto failure;
1091
1092        device_init_wakeup(&pdev->dev, true);
1093        nvt->rdev = rdev;
1094        nvt_pr(KERN_NOTICE, "driver has been successfully loaded\n");
1095        if (debug) {
1096                cir_dump_regs(nvt);
1097                cir_wake_dump_regs(nvt);
1098        }
1099
1100        return 0;
1101
1102failure:
1103        if (nvt->cir_irq)
1104                free_irq(nvt->cir_irq, nvt);
1105        if (nvt->cir_addr)
1106                release_region(nvt->cir_addr, CIR_IOREG_LENGTH);
1107
1108        if (nvt->cir_wake_irq)
1109                free_irq(nvt->cir_wake_irq, nvt);
1110        if (nvt->cir_wake_addr)
1111                release_region(nvt->cir_wake_addr, CIR_IOREG_LENGTH);
1112
1113        rc_free_device(rdev);
1114        kfree(nvt);
1115
1116        return ret;
1117}
1118
1119static void __devexit nvt_remove(struct pnp_dev *pdev)
1120{
1121        struct nvt_dev *nvt = pnp_get_drvdata(pdev);
1122        unsigned long flags;
1123
1124        spin_lock_irqsave(&nvt->nvt_lock, flags);
1125        /* disable CIR */
1126        nvt_cir_reg_write(nvt, 0, CIR_IREN);
1127        nvt_disable_cir(nvt);
1128        /* enable CIR Wake (for IR power-on) */
1129        nvt_enable_wake(nvt);
1130        spin_unlock_irqrestore(&nvt->nvt_lock, flags);
1131
1132        /* free resources */
1133        free_irq(nvt->cir_irq, nvt);
1134        free_irq(nvt->cir_wake_irq, nvt);
1135        release_region(nvt->cir_addr, CIR_IOREG_LENGTH);
1136        release_region(nvt->cir_wake_addr, CIR_IOREG_LENGTH);
1137
1138        rc_unregister_device(nvt->rdev);
1139
1140        kfree(nvt);
1141}
1142
1143static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state)
1144{
1145        struct nvt_dev *nvt = pnp_get_drvdata(pdev);
1146        unsigned long flags;
1147
1148        nvt_dbg("%s called", __func__);
1149
1150        /* zero out misc state tracking */
1151        spin_lock_irqsave(&nvt->nvt_lock, flags);
1152        nvt->study_state = ST_STUDY_NONE;
1153        nvt->wake_state = ST_WAKE_NONE;
1154        spin_unlock_irqrestore(&nvt->nvt_lock, flags);
1155
1156        spin_lock_irqsave(&nvt->tx.lock, flags);
1157        nvt->tx.tx_state = ST_TX_NONE;
1158        spin_unlock_irqrestore(&nvt->tx.lock, flags);
1159
1160        /* disable all CIR interrupts */
1161        nvt_cir_reg_write(nvt, 0, CIR_IREN);
1162
1163        nvt_efm_enable(nvt);
1164
1165        /* disable cir logical dev */
1166        nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
1167        nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
1168
1169        nvt_efm_disable(nvt);
1170
1171        /* make sure wake is enabled */
1172        nvt_enable_wake(nvt);
1173
1174        return 0;
1175}
1176
1177static int nvt_resume(struct pnp_dev *pdev)
1178{
1179        int ret = 0;
1180        struct nvt_dev *nvt = pnp_get_drvdata(pdev);
1181
1182        nvt_dbg("%s called", __func__);
1183
1184        /* open interrupt */
1185        nvt_set_cir_iren(nvt);
1186
1187        /* Enable CIR logical device */
1188        nvt_efm_enable(nvt);
1189        nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
1190        nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
1191
1192        nvt_efm_disable(nvt);
1193
1194        nvt_cir_regs_init(nvt);
1195        nvt_cir_wake_regs_init(nvt);
1196
1197        return ret;
1198}
1199
1200static void nvt_shutdown(struct pnp_dev *pdev)
1201{
1202        struct nvt_dev *nvt = pnp_get_drvdata(pdev);
1203        nvt_enable_wake(nvt);
1204}
1205
1206static const struct pnp_device_id nvt_ids[] = {
1207        { "WEC0530", 0 },   /* CIR */
1208        { "NTN0530", 0 },   /* CIR for new chip's pnp id*/
1209        { "", 0 },
1210};
1211
1212static struct pnp_driver nvt_driver = {
1213        .name           = NVT_DRIVER_NAME,
1214        .id_table       = nvt_ids,
1215        .flags          = PNP_DRIVER_RES_DO_NOT_CHANGE,
1216        .probe          = nvt_probe,
1217        .remove         = __devexit_p(nvt_remove),
1218        .suspend        = nvt_suspend,
1219        .resume         = nvt_resume,
1220        .shutdown       = nvt_shutdown,
1221};
1222
1223int nvt_init(void)
1224{
1225        return pnp_register_driver(&nvt_driver);
1226}
1227
1228void nvt_exit(void)
1229{
1230        pnp_unregister_driver(&nvt_driver);
1231}
1232
1233module_param(debug, int, S_IRUGO | S_IWUSR);
1234MODULE_PARM_DESC(debug, "Enable debugging output");
1235
1236MODULE_DEVICE_TABLE(pnp, nvt_ids);
1237MODULE_DESCRIPTION("Nuvoton W83667HG-A & W83677HG-I CIR driver");
1238
1239MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
1240MODULE_LICENSE("GPL");
1241
1242module_init(nvt_init);
1243module_exit(nvt_exit);
1244