linux/drivers/crypto/hisilicon/qm.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/* Copyright (c) 2019 HiSilicon Limited. */
   3#ifndef HISI_ACC_QM_H
   4#define HISI_ACC_QM_H
   5
   6#include <linux/bitfield.h>
   7#include <linux/iopoll.h>
   8#include <linux/module.h>
   9#include <linux/pci.h>
  10
  11#define QM_QNUM_V1                      4096
  12#define QM_QNUM_V2                      1024
  13#define QM_MAX_VFS_NUM_V2               63
  14
  15/* qm user domain */
  16#define QM_ARUSER_M_CFG_1               0x100088
  17#define AXUSER_SNOOP_ENABLE             BIT(30)
  18#define AXUSER_CMD_TYPE                 GENMASK(14, 12)
  19#define AXUSER_CMD_SMMU_NORMAL          1
  20#define AXUSER_NS                       BIT(6)
  21#define AXUSER_NO                       BIT(5)
  22#define AXUSER_FP                       BIT(4)
  23#define AXUSER_SSV                      BIT(0)
  24#define AXUSER_BASE                     (AXUSER_SNOOP_ENABLE |          \
  25                                        FIELD_PREP(AXUSER_CMD_TYPE,     \
  26                                        AXUSER_CMD_SMMU_NORMAL) |       \
  27                                        AXUSER_NS | AXUSER_NO | AXUSER_FP)
  28#define QM_ARUSER_M_CFG_ENABLE          0x100090
  29#define ARUSER_M_CFG_ENABLE             0xfffffffe
  30#define QM_AWUSER_M_CFG_1               0x100098
  31#define QM_AWUSER_M_CFG_ENABLE          0x1000a0
  32#define AWUSER_M_CFG_ENABLE             0xfffffffe
  33#define QM_WUSER_M_CFG_ENABLE           0x1000a8
  34#define WUSER_M_CFG_ENABLE              0xffffffff
  35
  36/* qm cache */
  37#define QM_CACHE_CTL                    0x100050
  38#define SQC_CACHE_ENABLE                BIT(0)
  39#define CQC_CACHE_ENABLE                BIT(1)
  40#define SQC_CACHE_WB_ENABLE             BIT(4)
  41#define SQC_CACHE_WB_THRD               GENMASK(10, 5)
  42#define CQC_CACHE_WB_ENABLE             BIT(11)
  43#define CQC_CACHE_WB_THRD               GENMASK(17, 12)
  44#define QM_AXI_M_CFG                    0x1000ac
  45#define AXI_M_CFG                       0xffff
  46#define QM_AXI_M_CFG_ENABLE             0x1000b0
  47#define AM_CFG_SINGLE_PORT_MAX_TRANS    0x300014
  48#define AXI_M_CFG_ENABLE                0xffffffff
  49#define QM_PEH_AXUSER_CFG               0x1000cc
  50#define QM_PEH_AXUSER_CFG_ENABLE        0x1000d0
  51#define PEH_AXUSER_CFG                  0x401001
  52#define PEH_AXUSER_CFG_ENABLE           0xffffffff
  53
  54#define QM_AXI_RRESP                    BIT(0)
  55#define QM_AXI_BRESP                    BIT(1)
  56#define QM_ECC_MBIT                     BIT(2)
  57#define QM_ECC_1BIT                     BIT(3)
  58#define QM_ACC_GET_TASK_TIMEOUT         BIT(4)
  59#define QM_ACC_DO_TASK_TIMEOUT          BIT(5)
  60#define QM_ACC_WB_NOT_READY_TIMEOUT     BIT(6)
  61#define QM_SQ_CQ_VF_INVALID             BIT(7)
  62#define QM_CQ_VF_INVALID                BIT(8)
  63#define QM_SQ_VF_INVALID                BIT(9)
  64#define QM_DB_TIMEOUT                   BIT(10)
  65#define QM_OF_FIFO_OF                   BIT(11)
  66#define QM_DB_RANDOM_INVALID            BIT(12)
  67#define QM_MAILBOX_TIMEOUT              BIT(13)
  68#define QM_FLR_TIMEOUT                  BIT(14)
  69
  70#define QM_BASE_NFE     (QM_AXI_RRESP | QM_AXI_BRESP | QM_ECC_MBIT | \
  71                         QM_ACC_GET_TASK_TIMEOUT | QM_DB_TIMEOUT | \
  72                         QM_OF_FIFO_OF | QM_DB_RANDOM_INVALID | \
  73                         QM_MAILBOX_TIMEOUT | QM_FLR_TIMEOUT)
  74#define QM_BASE_CE                      QM_ECC_1BIT
  75
  76#define QM_Q_DEPTH                      1024
  77#define QM_MIN_QNUM                     2
  78#define HISI_ACC_SGL_SGE_NR_MAX         255
  79#define QM_SHAPER_CFG                   0x100164
  80#define QM_SHAPER_ENABLE                BIT(30)
  81#define QM_SHAPER_TYPE1_OFFSET          10
  82
  83/* page number for queue file region */
  84#define QM_DOORBELL_PAGE_NR             1
  85
  86/* uacce mode of the driver */
  87#define UACCE_MODE_NOUACCE              0 /* don't use uacce */
  88#define UACCE_MODE_SVA                  1 /* use uacce sva mode */
  89#define UACCE_MODE_DESC "0(default) means only register to crypto, 1 means both register to crypto and uacce"
  90
  91enum qm_stop_reason {
  92        QM_NORMAL,
  93        QM_SOFT_RESET,
  94        QM_FLR,
  95};
  96
  97enum qm_state {
  98        QM_INIT = 0,
  99        QM_START,
 100        QM_CLOSE,
 101        QM_STOP,
 102};
 103
 104enum qp_state {
 105        QP_INIT = 1,
 106        QP_START,
 107        QP_STOP,
 108        QP_CLOSE,
 109};
 110
 111enum qm_hw_ver {
 112        QM_HW_UNKNOWN = -1,
 113        QM_HW_V1 = 0x20,
 114        QM_HW_V2 = 0x21,
 115        QM_HW_V3 = 0x30,
 116};
 117
 118enum qm_fun_type {
 119        QM_HW_PF,
 120        QM_HW_VF,
 121};
 122
 123enum qm_debug_file {
 124        CURRENT_QM,
 125        CURRENT_Q,
 126        CLEAR_ENABLE,
 127        DEBUG_FILE_NUM,
 128};
 129
 130struct qm_dfx {
 131        atomic64_t err_irq_cnt;
 132        atomic64_t aeq_irq_cnt;
 133        atomic64_t abnormal_irq_cnt;
 134        atomic64_t create_qp_err_cnt;
 135        atomic64_t mb_err_cnt;
 136};
 137
 138struct debugfs_file {
 139        enum qm_debug_file index;
 140        struct mutex lock;
 141        struct qm_debug *debug;
 142};
 143
 144struct qm_debug {
 145        u32 curr_qm_qp_num;
 146        u32 sqe_mask_offset;
 147        u32 sqe_mask_len;
 148        struct qm_dfx dfx;
 149        struct dentry *debug_root;
 150        struct dentry *qm_d;
 151        struct debugfs_file files[DEBUG_FILE_NUM];
 152};
 153
 154struct qm_shaper_factor {
 155        u32 func_qos;
 156        u64 cir_b;
 157        u64 cir_u;
 158        u64 cir_s;
 159        u64 cbs_s;
 160};
 161
 162struct qm_dma {
 163        void *va;
 164        dma_addr_t dma;
 165        size_t size;
 166};
 167
 168struct hisi_qm_status {
 169        u32 eq_head;
 170        bool eqc_phase;
 171        u32 aeq_head;
 172        bool aeqc_phase;
 173        atomic_t flags;
 174        int stop_reason;
 175};
 176
 177struct hisi_qm;
 178
 179struct hisi_qm_err_info {
 180        char *acpi_rst;
 181        u32 msi_wr_port;
 182        u32 ecc_2bits_mask;
 183        u32 dev_ce_mask;
 184        u32 ce;
 185        u32 nfe;
 186        u32 fe;
 187};
 188
 189struct hisi_qm_err_status {
 190        u32 is_qm_ecc_mbit;
 191        u32 is_dev_ecc_mbit;
 192};
 193
 194struct hisi_qm_err_ini {
 195        int (*hw_init)(struct hisi_qm *qm);
 196        void (*hw_err_enable)(struct hisi_qm *qm);
 197        void (*hw_err_disable)(struct hisi_qm *qm);
 198        u32 (*get_dev_hw_err_status)(struct hisi_qm *qm);
 199        void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts);
 200        void (*open_axi_master_ooo)(struct hisi_qm *qm);
 201        void (*close_axi_master_ooo)(struct hisi_qm *qm);
 202        void (*open_sva_prefetch)(struct hisi_qm *qm);
 203        void (*close_sva_prefetch)(struct hisi_qm *qm);
 204        void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts);
 205        void (*err_info_init)(struct hisi_qm *qm);
 206};
 207
 208struct hisi_qm_list {
 209        struct mutex lock;
 210        struct list_head list;
 211        int (*register_to_crypto)(struct hisi_qm *qm);
 212        void (*unregister_from_crypto)(struct hisi_qm *qm);
 213};
 214
 215struct hisi_qm {
 216        enum qm_hw_ver ver;
 217        enum qm_fun_type fun_type;
 218        const char *dev_name;
 219        struct pci_dev *pdev;
 220        void __iomem *io_base;
 221        void __iomem *db_io_base;
 222        u32 sqe_size;
 223        u32 qp_base;
 224        u32 qp_num;
 225        u32 qp_in_used;
 226        u32 ctrl_qp_num;
 227        u32 max_qp_num;
 228        u32 vfs_num;
 229        u32 db_interval;
 230        struct list_head list;
 231        struct hisi_qm_list *qm_list;
 232
 233        struct qm_dma qdma;
 234        struct qm_sqc *sqc;
 235        struct qm_cqc *cqc;
 236        struct qm_eqe *eqe;
 237        struct qm_aeqe *aeqe;
 238        dma_addr_t sqc_dma;
 239        dma_addr_t cqc_dma;
 240        dma_addr_t eqe_dma;
 241        dma_addr_t aeqe_dma;
 242
 243        struct hisi_qm_status status;
 244        const struct hisi_qm_err_ini *err_ini;
 245        struct hisi_qm_err_info err_info;
 246        struct hisi_qm_err_status err_status;
 247        unsigned long misc_ctl; /* driver removing and reset sched */
 248
 249        struct rw_semaphore qps_lock;
 250        struct idr qp_idr;
 251        struct hisi_qp *qp_array;
 252
 253        struct mutex mailbox_lock;
 254
 255        const struct hisi_qm_hw_ops *ops;
 256
 257        struct qm_debug debug;
 258
 259        u32 error_mask;
 260
 261        struct workqueue_struct *wq;
 262        struct work_struct work;
 263        struct work_struct rst_work;
 264        struct work_struct cmd_process;
 265
 266        const char *algs;
 267        bool use_sva;
 268        bool is_frozen;
 269
 270        /* doorbell isolation enable */
 271        bool use_db_isolation;
 272        resource_size_t phys_base;
 273        resource_size_t db_phys_base;
 274        struct uacce_device *uacce;
 275        int mode;
 276        struct qm_shaper_factor *factor;
 277        u32 mb_qos;
 278        u32 type_rate;
 279};
 280
 281struct hisi_qp_status {
 282        atomic_t used;
 283        u16 sq_tail;
 284        u16 cq_head;
 285        bool cqc_phase;
 286        atomic_t flags;
 287};
 288
 289struct hisi_qp_ops {
 290        int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm);
 291};
 292
 293struct hisi_qp {
 294        u32 qp_id;
 295        u8 alg_type;
 296        u8 req_type;
 297
 298        struct qm_dma qdma;
 299        void *sqe;
 300        struct qm_cqe *cqe;
 301        dma_addr_t sqe_dma;
 302        dma_addr_t cqe_dma;
 303
 304        struct hisi_qp_status qp_status;
 305        struct hisi_qp_ops *hw_ops;
 306        void *qp_ctx;
 307        void (*req_cb)(struct hisi_qp *qp, void *data);
 308        void (*event_cb)(struct hisi_qp *qp);
 309
 310        struct hisi_qm *qm;
 311        bool is_resetting;
 312        bool is_in_kernel;
 313        u16 pasid;
 314        struct uacce_queue *uacce_q;
 315};
 316
 317static inline int q_num_set(const char *val, const struct kernel_param *kp,
 318                            unsigned int device)
 319{
 320        struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
 321                                              device, NULL);
 322        u32 n, q_num;
 323        int ret;
 324
 325        if (!val)
 326                return -EINVAL;
 327
 328        if (!pdev) {
 329                q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2);
 330                pr_info("No device found currently, suppose queue number is %u\n",
 331                        q_num);
 332        } else {
 333                if (pdev->revision == QM_HW_V1)
 334                        q_num = QM_QNUM_V1;
 335                else
 336                        q_num = QM_QNUM_V2;
 337        }
 338
 339        ret = kstrtou32(val, 10, &n);
 340        if (ret || n < QM_MIN_QNUM || n > q_num)
 341                return -EINVAL;
 342
 343        return param_set_int(val, kp);
 344}
 345
 346static inline int vfs_num_set(const char *val, const struct kernel_param *kp)
 347{
 348        u32 n;
 349        int ret;
 350
 351        if (!val)
 352                return -EINVAL;
 353
 354        ret = kstrtou32(val, 10, &n);
 355        if (ret < 0)
 356                return ret;
 357
 358        if (n > QM_MAX_VFS_NUM_V2)
 359                return -EINVAL;
 360
 361        return param_set_int(val, kp);
 362}
 363
 364static inline int mode_set(const char *val, const struct kernel_param *kp)
 365{
 366        u32 n;
 367        int ret;
 368
 369        if (!val)
 370                return -EINVAL;
 371
 372        ret = kstrtou32(val, 10, &n);
 373        if (ret != 0 || (n != UACCE_MODE_SVA &&
 374                         n != UACCE_MODE_NOUACCE))
 375                return -EINVAL;
 376
 377        return param_set_int(val, kp);
 378}
 379
 380static inline int uacce_mode_set(const char *val, const struct kernel_param *kp)
 381{
 382        return mode_set(val, kp);
 383}
 384
 385static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list)
 386{
 387        INIT_LIST_HEAD(&qm_list->list);
 388        mutex_init(&qm_list->lock);
 389}
 390
 391int hisi_qm_init(struct hisi_qm *qm);
 392void hisi_qm_uninit(struct hisi_qm *qm);
 393int hisi_qm_start(struct hisi_qm *qm);
 394int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r);
 395struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type);
 396int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg);
 397int hisi_qm_stop_qp(struct hisi_qp *qp);
 398void hisi_qm_release_qp(struct hisi_qp *qp);
 399int hisi_qp_send(struct hisi_qp *qp, const void *msg);
 400int hisi_qm_get_free_qp_num(struct hisi_qm *qm);
 401int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number);
 402void hisi_qm_debug_init(struct hisi_qm *qm);
 403enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev);
 404void hisi_qm_debug_regs_clear(struct hisi_qm *qm);
 405int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs);
 406int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen);
 407int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs);
 408void hisi_qm_dev_err_init(struct hisi_qm *qm);
 409void hisi_qm_dev_err_uninit(struct hisi_qm *qm);
 410pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
 411                                          pci_channel_state_t state);
 412pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev);
 413void hisi_qm_reset_prepare(struct pci_dev *pdev);
 414void hisi_qm_reset_done(struct pci_dev *pdev);
 415
 416struct hisi_acc_sgl_pool;
 417struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
 418        struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool,
 419        u32 index, dma_addr_t *hw_sgl_dma);
 420void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
 421                           struct hisi_acc_hw_sgl *hw_sgl);
 422struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
 423                                                   u32 count, u32 sge_nr);
 424void hisi_acc_free_sgl_pool(struct device *dev,
 425                            struct hisi_acc_sgl_pool *pool);
 426int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
 427                           u8 alg_type, int node, struct hisi_qp **qps);
 428void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num);
 429void hisi_qm_dev_shutdown(struct pci_dev *pdev);
 430void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
 431int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
 432void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
 433#endif
 434