linux/net/nfc/nci/core.c
<<
>>
Prefs
   1/*
   2 *  The NFC Controller Interface is the communication protocol between an
   3 *  NFC Controller (NFCC) and a Device Host (DH).
   4 *
   5 *  Copyright (C) 2011 Texas Instruments, Inc.
   6 *
   7 *  Written by Ilan Elias <ilane@ti.com>
   8 *
   9 *  Acknowledgements:
  10 *  This file is based on hci_core.c, which was written
  11 *  by Maxim Krasnyansky.
  12 *
  13 *  This program is free software; you can redistribute it and/or modify
  14 *  it under the terms of the GNU General Public License version 2
  15 *  as published by the Free Software Foundation
  16 *
  17 *  This program is distributed in the hope that it will be useful,
  18 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  20 *  GNU General Public License for more details.
  21 *
  22 *  You should have received a copy of the GNU General Public License
  23 *  along with this program; if not, write to the Free Software
  24 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  25 *
  26 */
  27
  28#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
  29
  30#include <linux/types.h>
  31#include <linux/workqueue.h>
  32#include <linux/completion.h>
  33#include <linux/export.h>
  34#include <linux/sched.h>
  35#include <linux/bitops.h>
  36#include <linux/skbuff.h>
  37
  38#include "../nfc.h"
  39#include <net/nfc/nci.h>
  40#include <net/nfc/nci_core.h>
  41#include <linux/nfc.h>
  42
  43static void nci_cmd_work(struct work_struct *work);
  44static void nci_rx_work(struct work_struct *work);
  45static void nci_tx_work(struct work_struct *work);
  46
  47/* ---- NCI requests ---- */
  48
  49void nci_req_complete(struct nci_dev *ndev, int result)
  50{
  51        if (ndev->req_status == NCI_REQ_PEND) {
  52                ndev->req_result = result;
  53                ndev->req_status = NCI_REQ_DONE;
  54                complete(&ndev->req_completion);
  55        }
  56}
  57
  58static void nci_req_cancel(struct nci_dev *ndev, int err)
  59{
  60        if (ndev->req_status == NCI_REQ_PEND) {
  61                ndev->req_result = err;
  62                ndev->req_status = NCI_REQ_CANCELED;
  63                complete(&ndev->req_completion);
  64        }
  65}
  66
  67/* Execute request and wait for completion. */
  68static int __nci_request(struct nci_dev *ndev,
  69        void (*req)(struct nci_dev *ndev, unsigned long opt),
  70        unsigned long opt,
  71        __u32 timeout)
  72{
  73        int rc = 0;
  74        long completion_rc;
  75
  76        ndev->req_status = NCI_REQ_PEND;
  77
  78        init_completion(&ndev->req_completion);
  79        req(ndev, opt);
  80        completion_rc = wait_for_completion_interruptible_timeout(
  81                                                        &ndev->req_completion,
  82                                                        timeout);
  83
  84        pr_debug("wait_for_completion return %ld\n", completion_rc);
  85
  86        if (completion_rc > 0) {
  87                switch (ndev->req_status) {
  88                case NCI_REQ_DONE:
  89                        rc = nci_to_errno(ndev->req_result);
  90                        break;
  91
  92                case NCI_REQ_CANCELED:
  93                        rc = -ndev->req_result;
  94                        break;
  95
  96                default:
  97                        rc = -ETIMEDOUT;
  98                        break;
  99                }
 100        } else {
 101                pr_err("wait_for_completion_interruptible_timeout failed %ld\n",
 102                       completion_rc);
 103
 104                rc = ((completion_rc == 0) ? (-ETIMEDOUT) : (completion_rc));
 105        }
 106
 107        ndev->req_status = ndev->req_result = 0;
 108
 109        return rc;
 110}
 111
 112static inline int nci_request(struct nci_dev *ndev,
 113                void (*req)(struct nci_dev *ndev, unsigned long opt),
 114                unsigned long opt, __u32 timeout)
 115{
 116        int rc;
 117
 118        if (!test_bit(NCI_UP, &ndev->flags))
 119                return -ENETDOWN;
 120
 121        /* Serialize all requests */
 122        mutex_lock(&ndev->req_lock);
 123        rc = __nci_request(ndev, req, opt, timeout);
 124        mutex_unlock(&ndev->req_lock);
 125
 126        return rc;
 127}
 128
 129static void nci_reset_req(struct nci_dev *ndev, unsigned long opt)
 130{
 131        struct nci_core_reset_cmd cmd;
 132
 133        cmd.reset_type = NCI_RESET_TYPE_RESET_CONFIG;
 134        nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 1, &cmd);
 135}
 136
 137static void nci_init_req(struct nci_dev *ndev, unsigned long opt)
 138{
 139        nci_send_cmd(ndev, NCI_OP_CORE_INIT_CMD, 0, NULL);
 140}
 141
 142static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
 143{
 144        struct nci_rf_disc_map_cmd cmd;
 145        struct disc_map_config *cfg = cmd.mapping_configs;
 146        __u8 *num = &cmd.num_mapping_configs;
 147        int i;
 148
 149        /* set rf mapping configurations */
 150        *num = 0;
 151
 152        /* by default mapping is set to NCI_RF_INTERFACE_FRAME */
 153        for (i = 0; i < ndev->num_supported_rf_interfaces; i++) {
 154                if (ndev->supported_rf_interfaces[i] ==
 155                        NCI_RF_INTERFACE_ISO_DEP) {
 156                        cfg[*num].rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
 157                        cfg[*num].mode = NCI_DISC_MAP_MODE_POLL |
 158                                NCI_DISC_MAP_MODE_LISTEN;
 159                        cfg[*num].rf_interface = NCI_RF_INTERFACE_ISO_DEP;
 160                        (*num)++;
 161                } else if (ndev->supported_rf_interfaces[i] ==
 162                        NCI_RF_INTERFACE_NFC_DEP) {
 163                        cfg[*num].rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
 164                        cfg[*num].mode = NCI_DISC_MAP_MODE_POLL |
 165                                NCI_DISC_MAP_MODE_LISTEN;
 166                        cfg[*num].rf_interface = NCI_RF_INTERFACE_NFC_DEP;
 167                        (*num)++;
 168                }
 169
 170                if (*num == NCI_MAX_NUM_MAPPING_CONFIGS)
 171                        break;
 172        }
 173
 174        nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_MAP_CMD,
 175                (1 + ((*num)*sizeof(struct disc_map_config))),
 176                &cmd);
 177}
 178
 179static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
 180{
 181        struct nci_rf_disc_cmd cmd;
 182        __u32 protocols = opt;
 183
 184        cmd.num_disc_configs = 0;
 185
 186        if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
 187                (protocols & NFC_PROTO_JEWEL_MASK
 188                || protocols & NFC_PROTO_MIFARE_MASK
 189                || protocols & NFC_PROTO_ISO14443_MASK
 190                || protocols & NFC_PROTO_NFC_DEP_MASK)) {
 191                cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
 192                NCI_NFC_A_PASSIVE_POLL_MODE;
 193                cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
 194                cmd.num_disc_configs++;
 195        }
 196
 197        if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
 198                (protocols & NFC_PROTO_ISO14443_MASK)) {
 199                cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
 200                NCI_NFC_B_PASSIVE_POLL_MODE;
 201                cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
 202                cmd.num_disc_configs++;
 203        }
 204
 205        if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
 206                (protocols & NFC_PROTO_FELICA_MASK
 207                || protocols & NFC_PROTO_NFC_DEP_MASK)) {
 208                cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
 209                NCI_NFC_F_PASSIVE_POLL_MODE;
 210                cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
 211                cmd.num_disc_configs++;
 212        }
 213
 214        nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_CMD,
 215                (1 + (cmd.num_disc_configs*sizeof(struct disc_config))),
 216                &cmd);
 217}
 218
 219static void nci_rf_deactivate_req(struct nci_dev *ndev, unsigned long opt)
 220{
 221        struct nci_rf_deactivate_cmd cmd;
 222
 223        cmd.type = NCI_DEACTIVATE_TYPE_IDLE_MODE;
 224
 225        nci_send_cmd(ndev, NCI_OP_RF_DEACTIVATE_CMD,
 226                        sizeof(struct nci_rf_deactivate_cmd),
 227                        &cmd);
 228}
 229
 230static int nci_open_device(struct nci_dev *ndev)
 231{
 232        int rc = 0;
 233
 234        mutex_lock(&ndev->req_lock);
 235
 236        if (test_bit(NCI_UP, &ndev->flags)) {
 237                rc = -EALREADY;
 238                goto done;
 239        }
 240
 241        if (ndev->ops->open(ndev)) {
 242                rc = -EIO;
 243                goto done;
 244        }
 245
 246        atomic_set(&ndev->cmd_cnt, 1);
 247
 248        set_bit(NCI_INIT, &ndev->flags);
 249
 250        rc = __nci_request(ndev, nci_reset_req, 0,
 251                                msecs_to_jiffies(NCI_RESET_TIMEOUT));
 252
 253        if (!rc) {
 254                rc = __nci_request(ndev, nci_init_req, 0,
 255                                msecs_to_jiffies(NCI_INIT_TIMEOUT));
 256        }
 257
 258        if (!rc) {
 259                rc = __nci_request(ndev, nci_init_complete_req, 0,
 260                                msecs_to_jiffies(NCI_INIT_TIMEOUT));
 261        }
 262
 263        clear_bit(NCI_INIT, &ndev->flags);
 264
 265        if (!rc) {
 266                set_bit(NCI_UP, &ndev->flags);
 267        } else {
 268                /* Init failed, cleanup */
 269                skb_queue_purge(&ndev->cmd_q);
 270                skb_queue_purge(&ndev->rx_q);
 271                skb_queue_purge(&ndev->tx_q);
 272
 273                ndev->ops->close(ndev);
 274                ndev->flags = 0;
 275        }
 276
 277done:
 278        mutex_unlock(&ndev->req_lock);
 279        return rc;
 280}
 281
 282static int nci_close_device(struct nci_dev *ndev)
 283{
 284        nci_req_cancel(ndev, ENODEV);
 285        mutex_lock(&ndev->req_lock);
 286
 287        if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
 288                del_timer_sync(&ndev->cmd_timer);
 289                mutex_unlock(&ndev->req_lock);
 290                return 0;
 291        }
 292
 293        /* Drop RX and TX queues */
 294        skb_queue_purge(&ndev->rx_q);
 295        skb_queue_purge(&ndev->tx_q);
 296
 297        /* Flush RX and TX wq */
 298        flush_workqueue(ndev->rx_wq);
 299        flush_workqueue(ndev->tx_wq);
 300
 301        /* Reset device */
 302        skb_queue_purge(&ndev->cmd_q);
 303        atomic_set(&ndev->cmd_cnt, 1);
 304
 305        set_bit(NCI_INIT, &ndev->flags);
 306        __nci_request(ndev, nci_reset_req, 0,
 307                                msecs_to_jiffies(NCI_RESET_TIMEOUT));
 308        clear_bit(NCI_INIT, &ndev->flags);
 309
 310        /* Flush cmd wq */
 311        flush_workqueue(ndev->cmd_wq);
 312
 313        /* After this point our queues are empty
 314         * and no works are scheduled. */
 315        ndev->ops->close(ndev);
 316
 317        /* Clear flags */
 318        ndev->flags = 0;
 319
 320        mutex_unlock(&ndev->req_lock);
 321
 322        return 0;
 323}
 324
 325/* NCI command timer function */
 326static void nci_cmd_timer(unsigned long arg)
 327{
 328        struct nci_dev *ndev = (void *) arg;
 329
 330        atomic_set(&ndev->cmd_cnt, 1);
 331        queue_work(ndev->cmd_wq, &ndev->cmd_work);
 332}
 333
 334static int nci_dev_up(struct nfc_dev *nfc_dev)
 335{
 336        struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 337
 338        return nci_open_device(ndev);
 339}
 340
 341static int nci_dev_down(struct nfc_dev *nfc_dev)
 342{
 343        struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 344
 345        return nci_close_device(ndev);
 346}
 347
 348static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols)
 349{
 350        struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 351        int rc;
 352
 353        if (test_bit(NCI_DISCOVERY, &ndev->flags)) {
 354                pr_err("unable to start poll, since poll is already active\n");
 355                return -EBUSY;
 356        }
 357
 358        if (ndev->target_active_prot) {
 359                pr_err("there is an active target\n");
 360                return -EBUSY;
 361        }
 362
 363        if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
 364                pr_debug("target is active, implicitly deactivate...\n");
 365
 366                rc = nci_request(ndev, nci_rf_deactivate_req, 0,
 367                        msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
 368                if (rc)
 369                        return -EBUSY;
 370        }
 371
 372        rc = nci_request(ndev, nci_rf_discover_req, protocols,
 373                msecs_to_jiffies(NCI_RF_DISC_TIMEOUT));
 374
 375        if (!rc)
 376                ndev->poll_prots = protocols;
 377
 378        return rc;
 379}
 380
 381static void nci_stop_poll(struct nfc_dev *nfc_dev)
 382{
 383        struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 384
 385        if (!test_bit(NCI_DISCOVERY, &ndev->flags)) {
 386                pr_err("unable to stop poll, since poll is not active\n");
 387                return;
 388        }
 389
 390        nci_request(ndev, nci_rf_deactivate_req, 0,
 391                msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
 392}
 393
 394static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
 395                                __u32 protocol)
 396{
 397        struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 398
 399        pr_debug("target_idx %d, protocol 0x%x\n", target_idx, protocol);
 400
 401        if (!test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
 402                pr_err("there is no available target to activate\n");
 403                return -EINVAL;
 404        }
 405
 406        if (ndev->target_active_prot) {
 407                pr_err("there is already an active target\n");
 408                return -EBUSY;
 409        }
 410
 411        if (!(ndev->target_available_prots & (1 << protocol))) {
 412                pr_err("target does not support the requested protocol 0x%x\n",
 413                       protocol);
 414                return -EINVAL;
 415        }
 416
 417        ndev->target_active_prot = protocol;
 418        ndev->target_available_prots = 0;
 419
 420        return 0;
 421}
 422
 423static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx)
 424{
 425        struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 426
 427        pr_debug("target_idx %d\n", target_idx);
 428
 429        if (!ndev->target_active_prot) {
 430                pr_err("unable to deactivate target, no active target\n");
 431                return;
 432        }
 433
 434        ndev->target_active_prot = 0;
 435
 436        if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
 437                nci_request(ndev, nci_rf_deactivate_req, 0,
 438                        msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
 439        }
 440}
 441
 442static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx,
 443                                                struct sk_buff *skb,
 444                                                data_exchange_cb_t cb,
 445                                                void *cb_context)
 446{
 447        struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 448        int rc;
 449
 450        pr_debug("target_idx %d, len %d\n", target_idx, skb->len);
 451
 452        if (!ndev->target_active_prot) {
 453                pr_err("unable to exchange data, no active target\n");
 454                return -EINVAL;
 455        }
 456
 457        if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags))
 458                return -EBUSY;
 459
 460        /* store cb and context to be used on receiving data */
 461        ndev->data_exchange_cb = cb;
 462        ndev->data_exchange_cb_context = cb_context;
 463
 464        rc = nci_send_data(ndev, NCI_STATIC_RF_CONN_ID, skb);
 465        if (rc)
 466                clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
 467
 468        return rc;
 469}
 470
 471static struct nfc_ops nci_nfc_ops = {
 472        .dev_up = nci_dev_up,
 473        .dev_down = nci_dev_down,
 474        .start_poll = nci_start_poll,
 475        .stop_poll = nci_stop_poll,
 476        .activate_target = nci_activate_target,
 477        .deactivate_target = nci_deactivate_target,
 478        .data_exchange = nci_data_exchange,
 479};
 480
 481/* ---- Interface to NCI drivers ---- */
 482
 483/**
 484 * nci_allocate_device - allocate a new nci device
 485 *
 486 * @ops: device operations
 487 * @supported_protocols: NFC protocols supported by the device
 488 */
 489struct nci_dev *nci_allocate_device(struct nci_ops *ops,
 490                                        __u32 supported_protocols,
 491                                        int tx_headroom,
 492                                        int tx_tailroom)
 493{
 494        struct nci_dev *ndev;
 495
 496        pr_debug("supported_protocols 0x%x\n", supported_protocols);
 497
 498        if (!ops->open || !ops->close || !ops->send)
 499                return NULL;
 500
 501        if (!supported_protocols)
 502                return NULL;
 503
 504        ndev = kzalloc(sizeof(struct nci_dev), GFP_KERNEL);
 505        if (!ndev)
 506                return NULL;
 507
 508        ndev->ops = ops;
 509        ndev->tx_headroom = tx_headroom;
 510        ndev->tx_tailroom = tx_tailroom;
 511
 512        ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops,
 513                                                supported_protocols,
 514                                                tx_headroom + NCI_DATA_HDR_SIZE,
 515                                                tx_tailroom);
 516        if (!ndev->nfc_dev)
 517                goto free_exit;
 518
 519        nfc_set_drvdata(ndev->nfc_dev, ndev);
 520
 521        return ndev;
 522
 523free_exit:
 524        kfree(ndev);
 525        return NULL;
 526}
 527EXPORT_SYMBOL(nci_allocate_device);
 528
 529/**
 530 * nci_free_device - deallocate nci device
 531 *
 532 * @ndev: The nci device to deallocate
 533 */
 534void nci_free_device(struct nci_dev *ndev)
 535{
 536        nfc_free_device(ndev->nfc_dev);
 537        kfree(ndev);
 538}
 539EXPORT_SYMBOL(nci_free_device);
 540
 541/**
 542 * nci_register_device - register a nci device in the nfc subsystem
 543 *
 544 * @dev: The nci device to register
 545 */
 546int nci_register_device(struct nci_dev *ndev)
 547{
 548        int rc;
 549        struct device *dev = &ndev->nfc_dev->dev;
 550        char name[32];
 551
 552        rc = nfc_register_device(ndev->nfc_dev);
 553        if (rc)
 554                goto exit;
 555
 556        ndev->flags = 0;
 557
 558        INIT_WORK(&ndev->cmd_work, nci_cmd_work);
 559        snprintf(name, sizeof(name), "%s_nci_cmd_wq", dev_name(dev));
 560        ndev->cmd_wq = create_singlethread_workqueue(name);
 561        if (!ndev->cmd_wq) {
 562                rc = -ENOMEM;
 563                goto unreg_exit;
 564        }
 565
 566        INIT_WORK(&ndev->rx_work, nci_rx_work);
 567        snprintf(name, sizeof(name), "%s_nci_rx_wq", dev_name(dev));
 568        ndev->rx_wq = create_singlethread_workqueue(name);
 569        if (!ndev->rx_wq) {
 570                rc = -ENOMEM;
 571                goto destroy_cmd_wq_exit;
 572        }
 573
 574        INIT_WORK(&ndev->tx_work, nci_tx_work);
 575        snprintf(name, sizeof(name), "%s_nci_tx_wq", dev_name(dev));
 576        ndev->tx_wq = create_singlethread_workqueue(name);
 577        if (!ndev->tx_wq) {
 578                rc = -ENOMEM;
 579                goto destroy_rx_wq_exit;
 580        }
 581
 582        skb_queue_head_init(&ndev->cmd_q);
 583        skb_queue_head_init(&ndev->rx_q);
 584        skb_queue_head_init(&ndev->tx_q);
 585
 586        setup_timer(&ndev->cmd_timer, nci_cmd_timer,
 587                        (unsigned long) ndev);
 588
 589        mutex_init(&ndev->req_lock);
 590
 591        goto exit;
 592
 593destroy_rx_wq_exit:
 594        destroy_workqueue(ndev->rx_wq);
 595
 596destroy_cmd_wq_exit:
 597        destroy_workqueue(ndev->cmd_wq);
 598
 599unreg_exit:
 600        nfc_unregister_device(ndev->nfc_dev);
 601
 602exit:
 603        return rc;
 604}
 605EXPORT_SYMBOL(nci_register_device);
 606
 607/**
 608 * nci_unregister_device - unregister a nci device in the nfc subsystem
 609 *
 610 * @dev: The nci device to unregister
 611 */
 612void nci_unregister_device(struct nci_dev *ndev)
 613{
 614        nci_close_device(ndev);
 615
 616        destroy_workqueue(ndev->cmd_wq);
 617        destroy_workqueue(ndev->rx_wq);
 618        destroy_workqueue(ndev->tx_wq);
 619
 620        nfc_unregister_device(ndev->nfc_dev);
 621}
 622EXPORT_SYMBOL(nci_unregister_device);
 623
 624/**
 625 * nci_recv_frame - receive frame from NCI drivers
 626 *
 627 * @skb: The sk_buff to receive
 628 */
 629int nci_recv_frame(struct sk_buff *skb)
 630{
 631        struct nci_dev *ndev = (struct nci_dev *) skb->dev;
 632
 633        pr_debug("len %d\n", skb->len);
 634
 635        if (!ndev || (!test_bit(NCI_UP, &ndev->flags)
 636                && !test_bit(NCI_INIT, &ndev->flags))) {
 637                kfree_skb(skb);
 638                return -ENXIO;
 639        }
 640
 641        /* Queue frame for rx worker thread */
 642        skb_queue_tail(&ndev->rx_q, skb);
 643        queue_work(ndev->rx_wq, &ndev->rx_work);
 644
 645        return 0;
 646}
 647EXPORT_SYMBOL(nci_recv_frame);
 648
 649static int nci_send_frame(struct sk_buff *skb)
 650{
 651        struct nci_dev *ndev = (struct nci_dev *) skb->dev;
 652
 653        pr_debug("len %d\n", skb->len);
 654
 655        if (!ndev) {
 656                kfree_skb(skb);
 657                return -ENODEV;
 658        }
 659
 660        /* Get rid of skb owner, prior to sending to the driver. */
 661        skb_orphan(skb);
 662
 663        return ndev->ops->send(skb);
 664}
 665
 666/* Send NCI command */
 667int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload)
 668{
 669        struct nci_ctrl_hdr *hdr;
 670        struct sk_buff *skb;
 671
 672        pr_debug("opcode 0x%x, plen %d\n", opcode, plen);
 673
 674        skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + plen), GFP_KERNEL);
 675        if (!skb) {
 676                pr_err("no memory for command\n");
 677                return -ENOMEM;
 678        }
 679
 680        hdr = (struct nci_ctrl_hdr *) skb_put(skb, NCI_CTRL_HDR_SIZE);
 681        hdr->gid = nci_opcode_gid(opcode);
 682        hdr->oid = nci_opcode_oid(opcode);
 683        hdr->plen = plen;
 684
 685        nci_mt_set((__u8 *)hdr, NCI_MT_CMD_PKT);
 686        nci_pbf_set((__u8 *)hdr, NCI_PBF_LAST);
 687
 688        if (plen)
 689                memcpy(skb_put(skb, plen), payload, plen);
 690
 691        skb->dev = (void *) ndev;
 692
 693        skb_queue_tail(&ndev->cmd_q, skb);
 694        queue_work(ndev->cmd_wq, &ndev->cmd_work);
 695
 696        return 0;
 697}
 698
 699/* ---- NCI TX Data worker thread ---- */
 700
 701static void nci_tx_work(struct work_struct *work)
 702{
 703        struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work);
 704        struct sk_buff *skb;
 705
 706        pr_debug("credits_cnt %d\n", atomic_read(&ndev->credits_cnt));
 707
 708        /* Send queued tx data */
 709        while (atomic_read(&ndev->credits_cnt)) {
 710                skb = skb_dequeue(&ndev->tx_q);
 711                if (!skb)
 712                        return;
 713
 714                /* Check if data flow control is used */
 715                if (atomic_read(&ndev->credits_cnt) !=
 716                                NCI_DATA_FLOW_CONTROL_NOT_USED)
 717                        atomic_dec(&ndev->credits_cnt);
 718
 719                pr_debug("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d\n",
 720                         nci_pbf(skb->data),
 721                         nci_conn_id(skb->data),
 722                         nci_plen(skb->data));
 723
 724                nci_send_frame(skb);
 725        }
 726}
 727
 728/* ----- NCI RX worker thread (data & control) ----- */
 729
 730static void nci_rx_work(struct work_struct *work)
 731{
 732        struct nci_dev *ndev = container_of(work, struct nci_dev, rx_work);
 733        struct sk_buff *skb;
 734
 735        while ((skb = skb_dequeue(&ndev->rx_q))) {
 736                /* Process frame */
 737                switch (nci_mt(skb->data)) {
 738                case NCI_MT_RSP_PKT:
 739                        nci_rsp_packet(ndev, skb);
 740                        break;
 741
 742                case NCI_MT_NTF_PKT:
 743                        nci_ntf_packet(ndev, skb);
 744                        break;
 745
 746                case NCI_MT_DATA_PKT:
 747                        nci_rx_data_packet(ndev, skb);
 748                        break;
 749
 750                default:
 751                        pr_err("unknown MT 0x%x\n", nci_mt(skb->data));
 752                        kfree_skb(skb);
 753                        break;
 754                }
 755        }
 756}
 757
 758/* ----- NCI TX CMD worker thread ----- */
 759
 760static void nci_cmd_work(struct work_struct *work)
 761{
 762        struct nci_dev *ndev = container_of(work, struct nci_dev, cmd_work);
 763        struct sk_buff *skb;
 764
 765        pr_debug("cmd_cnt %d\n", atomic_read(&ndev->cmd_cnt));
 766
 767        /* Send queued command */
 768        if (atomic_read(&ndev->cmd_cnt)) {
 769                skb = skb_dequeue(&ndev->cmd_q);
 770                if (!skb)
 771                        return;
 772
 773                atomic_dec(&ndev->cmd_cnt);
 774
 775                pr_debug("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n",
 776                         nci_pbf(skb->data),
 777                         nci_opcode_gid(nci_opcode(skb->data)),
 778                         nci_opcode_oid(nci_opcode(skb->data)),
 779                         nci_plen(skb->data));
 780
 781                nci_send_frame(skb);
 782
 783                mod_timer(&ndev->cmd_timer,
 784                        jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT));
 785        }
 786}
 787