linux/drivers/net/ipa/ipa_cmd.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2
   3/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
   4 * Copyright (C) 2019-2021 Linaro Ltd.
   5 */
   6
   7#include <linux/types.h>
   8#include <linux/device.h>
   9#include <linux/slab.h>
  10#include <linux/bitfield.h>
  11#include <linux/dma-direction.h>
  12
  13#include "gsi.h"
  14#include "gsi_trans.h"
  15#include "ipa.h"
  16#include "ipa_endpoint.h"
  17#include "ipa_table.h"
  18#include "ipa_cmd.h"
  19#include "ipa_mem.h"
  20
  21/**
  22 * DOC:  IPA Immediate Commands
  23 *
  24 * The AP command TX endpoint is used to issue immediate commands to the IPA.
  25 * An immediate command is generally used to request the IPA do something
  26 * other than data transfer to another endpoint.
  27 *
  28 * Immediate commands are represented by GSI transactions just like other
  29 * transfer requests, represented by a single GSI TRE.  Each immediate
  30 * command has a well-defined format, having a payload of a known length.
  31 * This allows the transfer element's length field to be used to hold an
  32 * immediate command's opcode.  The payload for a command resides in DRAM
  33 * and is described by a single scatterlist entry in its transaction.
  34 * Commands do not require a transaction completion callback.  To commit
  35 * an immediate command transaction, either gsi_trans_commit_wait() or
  36 * gsi_trans_commit_wait_timeout() is used.
  37 */
  38
  39/* Some commands can wait until indicated pipeline stages are clear */
  40enum pipeline_clear_options {
  41        pipeline_clear_hps              = 0x0,
  42        pipeline_clear_src_grp          = 0x1,
  43        pipeline_clear_full             = 0x2,
  44};
  45
  46/* IPA_CMD_IP_V{4,6}_{FILTER,ROUTING}_INIT */
  47
  48struct ipa_cmd_hw_ip_fltrt_init {
  49        __le64 hash_rules_addr;
  50        __le64 flags;
  51        __le64 nhash_rules_addr;
  52};
  53
  54/* Field masks for ipa_cmd_hw_ip_fltrt_init structure fields */
  55#define IP_FLTRT_FLAGS_HASH_SIZE_FMASK                  GENMASK_ULL(11, 0)
  56#define IP_FLTRT_FLAGS_HASH_ADDR_FMASK                  GENMASK_ULL(27, 12)
  57#define IP_FLTRT_FLAGS_NHASH_SIZE_FMASK                 GENMASK_ULL(39, 28)
  58#define IP_FLTRT_FLAGS_NHASH_ADDR_FMASK                 GENMASK_ULL(55, 40)
  59
  60/* IPA_CMD_HDR_INIT_LOCAL */
  61
  62struct ipa_cmd_hw_hdr_init_local {
  63        __le64 hdr_table_addr;
  64        __le32 flags;
  65        __le32 reserved;
  66};
  67
  68/* Field masks for ipa_cmd_hw_hdr_init_local structure fields */
  69#define HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK           GENMASK(11, 0)
  70#define HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK             GENMASK(27, 12)
  71
  72/* IPA_CMD_REGISTER_WRITE */
  73
  74/* For IPA v4.0+, the pipeline clear options are encoded in the opcode */
  75#define REGISTER_WRITE_OPCODE_SKIP_CLEAR_FMASK          GENMASK(8, 8)
  76#define REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK        GENMASK(10, 9)
  77
  78struct ipa_cmd_register_write {
  79        __le16 flags;           /* Unused/reserved prior to IPA v4.0 */
  80        __le16 offset;
  81        __le32 value;
  82        __le32 value_mask;
  83        __le32 clear_options;   /* Unused/reserved for IPA v4.0+ */
  84};
  85
  86/* Field masks for ipa_cmd_register_write structure fields */
  87/* The next field is present for IPA v4.0+ */
  88#define REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK          GENMASK(14, 11)
  89/* The next field is not present for IPA v4.0+ */
  90#define REGISTER_WRITE_FLAGS_SKIP_CLEAR_FMASK           GENMASK(15, 15)
  91
  92/* The next field and its values are not present for IPA v4.0+ */
  93#define REGISTER_WRITE_CLEAR_OPTIONS_FMASK              GENMASK(1, 0)
  94
  95/* IPA_CMD_IP_PACKET_INIT */
  96
  97struct ipa_cmd_ip_packet_init {
  98        u8 dest_endpoint;
  99        u8 reserved[7];
 100};
 101
 102/* Field masks for ipa_cmd_ip_packet_init dest_endpoint field */
 103#define IPA_PACKET_INIT_DEST_ENDPOINT_FMASK             GENMASK(4, 0)
 104
 105/* IPA_CMD_DMA_SHARED_MEM */
 106
 107/* For IPA v4.0+, this opcode gets modified with pipeline clear options */
 108
 109#define DMA_SHARED_MEM_OPCODE_SKIP_CLEAR_FMASK          GENMASK(8, 8)
 110#define DMA_SHARED_MEM_OPCODE_CLEAR_OPTION_FMASK        GENMASK(10, 9)
 111
 112struct ipa_cmd_hw_dma_mem_mem {
 113        __le16 clear_after_read; /* 0 or DMA_SHARED_MEM_CLEAR_AFTER_READ */
 114        __le16 size;
 115        __le16 local_addr;
 116        __le16 flags;
 117        __le64 system_addr;
 118};
 119
 120/* Flag allowing atomic clear of target region after reading data (v4.0+)*/
 121#define DMA_SHARED_MEM_CLEAR_AFTER_READ                 GENMASK(15, 15)
 122
 123/* Field masks for ipa_cmd_hw_dma_mem_mem structure fields */
 124#define DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK            GENMASK(0, 0)
 125/* The next two fields are not present for IPA v4.0+ */
 126#define DMA_SHARED_MEM_FLAGS_SKIP_CLEAR_FMASK           GENMASK(1, 1)
 127#define DMA_SHARED_MEM_FLAGS_CLEAR_OPTIONS_FMASK        GENMASK(3, 2)
 128
 129/* IPA_CMD_IP_PACKET_TAG_STATUS */
 130
 131struct ipa_cmd_ip_packet_tag_status {
 132        __le64 tag;
 133};
 134
 135#define IP_PACKET_TAG_STATUS_TAG_FMASK                  GENMASK_ULL(63, 16)
 136
 137/* Immediate command payload */
 138union ipa_cmd_payload {
 139        struct ipa_cmd_hw_ip_fltrt_init table_init;
 140        struct ipa_cmd_hw_hdr_init_local hdr_init_local;
 141        struct ipa_cmd_register_write register_write;
 142        struct ipa_cmd_ip_packet_init ip_packet_init;
 143        struct ipa_cmd_hw_dma_mem_mem dma_shared_mem;
 144        struct ipa_cmd_ip_packet_tag_status ip_packet_tag_status;
 145};
 146
 147static void ipa_cmd_validate_build(void)
 148{
 149        /* The sizes of a filter and route tables need to fit into fields
 150         * in the ipa_cmd_hw_ip_fltrt_init structure.  Although hashed tables
 151         * might not be used, non-hashed and hashed tables have the same
 152         * maximum size.  IPv4 and IPv6 filter tables have the same number
 153         * of entries, as and IPv4 and IPv6 route tables have the same number
 154         * of entries.
 155         */
 156#define TABLE_SIZE      (TABLE_COUNT_MAX * sizeof(__le64))
 157#define TABLE_COUNT_MAX max_t(u32, IPA_ROUTE_COUNT_MAX, IPA_FILTER_COUNT_MAX)
 158        BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK));
 159        BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
 160#undef TABLE_COUNT_MAX
 161#undef TABLE_SIZE
 162}
 163
 164#ifdef IPA_VALIDATE
 165
 166/* Validate a memory region holding a table */
 167bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
 168                         bool route, bool ipv6, bool hashed)
 169{
 170        struct device *dev = &ipa->pdev->dev;
 171        u32 offset_max;
 172
 173        offset_max = hashed ? field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK)
 174                            : field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
 175        if (mem->offset > offset_max ||
 176            ipa->mem_offset > offset_max - mem->offset) {
 177                dev_err(dev, "IPv%c %s%s table region offset too large\n",
 178                        ipv6 ? '6' : '4', hashed ? "hashed " : "",
 179                        route ? "route" : "filter");
 180                dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
 181                        ipa->mem_offset, mem->offset, offset_max);
 182
 183                return false;
 184        }
 185
 186        if (mem->offset > ipa->mem_size ||
 187            mem->size > ipa->mem_size - mem->offset) {
 188                dev_err(dev, "IPv%c %s%s table region out of range\n",
 189                        ipv6 ? '6' : '4', hashed ? "hashed " : "",
 190                        route ? "route" : "filter");
 191                dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
 192                        mem->offset, mem->size, ipa->mem_size);
 193
 194                return false;
 195        }
 196
 197        return true;
 198}
 199
 200/* Validate the memory region that holds headers */
 201static bool ipa_cmd_header_valid(struct ipa *ipa)
 202{
 203        const struct ipa_mem *mem = &ipa->mem[IPA_MEM_MODEM_HEADER];
 204        struct device *dev = &ipa->pdev->dev;
 205        u32 offset_max;
 206        u32 size_max;
 207        u32 size;
 208
 209        /* In ipa_cmd_hdr_init_local_add() we record the offset and size
 210         * of the header table memory area.  Make sure the offset and size
 211         * fit in the fields that need to hold them, and that the entire
 212         * range is within the overall IPA memory range.
 213         */
 214        offset_max = field_max(HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
 215        if (mem->offset > offset_max ||
 216            ipa->mem_offset > offset_max - mem->offset) {
 217                dev_err(dev, "header table region offset too large\n");
 218                dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
 219                        ipa->mem_offset, mem->offset, offset_max);
 220
 221                return false;
 222        }
 223
 224        size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
 225        size = ipa->mem[IPA_MEM_MODEM_HEADER].size;
 226        size += ipa->mem[IPA_MEM_AP_HEADER].size;
 227
 228        if (size > size_max) {
 229                dev_err(dev, "header table region size too large\n");
 230                dev_err(dev, "    (0x%04x > 0x%08x)\n", size, size_max);
 231
 232                return false;
 233        }
 234        if (size > ipa->mem_size || mem->offset > ipa->mem_size - size) {
 235                dev_err(dev, "header table region out of range\n");
 236                dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
 237                        mem->offset, size, ipa->mem_size);
 238
 239                return false;
 240        }
 241
 242        return true;
 243}
 244
 245/* Indicate whether an offset can be used with a register_write command */
 246static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
 247                                                const char *name, u32 offset)
 248{
 249        struct ipa_cmd_register_write *payload;
 250        struct device *dev = &ipa->pdev->dev;
 251        u32 offset_max;
 252        u32 bit_count;
 253
 254        /* The maximum offset in a register_write immediate command depends
 255         * on the version of IPA.  A 16 bit offset is always supported,
 256         * but starting with IPA v4.0 some additional high-order bits are
 257         * allowed.
 258         */
 259        bit_count = BITS_PER_BYTE * sizeof(payload->offset);
 260        if (ipa->version >= IPA_VERSION_4_0)
 261                bit_count += hweight32(REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
 262        BUILD_BUG_ON(bit_count > 32);
 263        offset_max = ~0U >> (32 - bit_count);
 264
 265        /* Make sure the offset can be represented by the field(s)
 266         * that holds it.  Also make sure the offset is not outside
 267         * the overall IPA memory range.
 268         */
 269        if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
 270                dev_err(dev, "%s offset too large 0x%04x + 0x%04x > 0x%04x)\n",
 271                        name, ipa->mem_offset, offset, offset_max);
 272                return false;
 273        }
 274
 275        return true;
 276}
 277
 278/* Check whether offsets passed to register_write are valid */
 279static bool ipa_cmd_register_write_valid(struct ipa *ipa)
 280{
 281        const char *name;
 282        u32 offset;
 283
 284        /* If hashed tables are supported, ensure the hash flush register
 285         * offset will fit in a register write IPA immediate command.
 286         */
 287        if (ipa_table_hash_support(ipa)) {
 288                offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version);
 289                name = "filter/route hash flush";
 290                if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
 291                        return false;
 292        }
 293
 294        /* Each endpoint can have a status endpoint associated with it,
 295         * and this is recorded in an endpoint register.  If the modem
 296         * crashes, we reset the status endpoint for all modem endpoints
 297         * using a register write IPA immediate command.  Make sure the
 298         * worst case (highest endpoint number) offset of that endpoint
 299         * fits in the register write command field(s) that must hold it.
 300         */
 301        offset = IPA_REG_ENDP_STATUS_N_OFFSET(IPA_ENDPOINT_COUNT - 1);
 302        name = "maximal endpoint status";
 303        if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
 304                return false;
 305
 306        return true;
 307}
 308
 309bool ipa_cmd_data_valid(struct ipa *ipa)
 310{
 311        if (!ipa_cmd_header_valid(ipa))
 312                return false;
 313
 314        if (!ipa_cmd_register_write_valid(ipa))
 315                return false;
 316
 317        return true;
 318}
 319
 320#endif /* IPA_VALIDATE */
 321
 322int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max)
 323{
 324        struct gsi_trans_info *trans_info = &channel->trans_info;
 325        struct device *dev = channel->gsi->dev;
 326        int ret;
 327
 328        /* This is as good a place as any to validate build constants */
 329        ipa_cmd_validate_build();
 330
 331        /* Even though command payloads are allocated one at a time,
 332         * a single transaction can require up to tlv_count of them,
 333         * so we treat them as if that many can be allocated at once.
 334         */
 335        ret = gsi_trans_pool_init_dma(dev, &trans_info->cmd_pool,
 336                                      sizeof(union ipa_cmd_payload),
 337                                      tre_max, channel->tlv_count);
 338        if (ret)
 339                return ret;
 340
 341        /* Each TRE needs a command info structure */
 342        ret = gsi_trans_pool_init(&trans_info->info_pool,
 343                                   sizeof(struct ipa_cmd_info),
 344                                   tre_max, channel->tlv_count);
 345        if (ret)
 346                gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool);
 347
 348        return ret;
 349}
 350
 351void ipa_cmd_pool_exit(struct gsi_channel *channel)
 352{
 353        struct gsi_trans_info *trans_info = &channel->trans_info;
 354        struct device *dev = channel->gsi->dev;
 355
 356        gsi_trans_pool_exit(&trans_info->info_pool);
 357        gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool);
 358}
 359
 360static union ipa_cmd_payload *
 361ipa_cmd_payload_alloc(struct ipa *ipa, dma_addr_t *addr)
 362{
 363        struct gsi_trans_info *trans_info;
 364        struct ipa_endpoint *endpoint;
 365
 366        endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
 367        trans_info = &ipa->gsi.channel[endpoint->channel_id].trans_info;
 368
 369        return gsi_trans_pool_alloc_dma(&trans_info->cmd_pool, addr);
 370}
 371
 372/* If hash_size is 0, hash_offset and hash_addr ignored. */
 373void ipa_cmd_table_init_add(struct gsi_trans *trans,
 374                            enum ipa_cmd_opcode opcode, u16 size, u32 offset,
 375                            dma_addr_t addr, u16 hash_size, u32 hash_offset,
 376                            dma_addr_t hash_addr)
 377{
 378        struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
 379        enum dma_data_direction direction = DMA_TO_DEVICE;
 380        struct ipa_cmd_hw_ip_fltrt_init *payload;
 381        union ipa_cmd_payload *cmd_payload;
 382        dma_addr_t payload_addr;
 383        u64 val;
 384
 385        /* Record the non-hash table offset and size */
 386        offset += ipa->mem_offset;
 387        val = u64_encode_bits(offset, IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
 388        val |= u64_encode_bits(size, IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
 389
 390        /* The hash table offset and address are zero if its size is 0 */
 391        if (hash_size) {
 392                /* Record the hash table offset and size */
 393                hash_offset += ipa->mem_offset;
 394                val |= u64_encode_bits(hash_offset,
 395                                       IP_FLTRT_FLAGS_HASH_ADDR_FMASK);
 396                val |= u64_encode_bits(hash_size,
 397                                       IP_FLTRT_FLAGS_HASH_SIZE_FMASK);
 398        }
 399
 400        cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
 401        payload = &cmd_payload->table_init;
 402
 403        /* Fill in all offsets and sizes and the non-hash table address */
 404        if (hash_size)
 405                payload->hash_rules_addr = cpu_to_le64(hash_addr);
 406        payload->flags = cpu_to_le64(val);
 407        payload->nhash_rules_addr = cpu_to_le64(addr);
 408
 409        gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
 410                          direction, opcode);
 411}
 412
 413/* Initialize header space in IPA-local memory */
 414void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size,
 415                                dma_addr_t addr)
 416{
 417        struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
 418        enum ipa_cmd_opcode opcode = IPA_CMD_HDR_INIT_LOCAL;
 419        enum dma_data_direction direction = DMA_TO_DEVICE;
 420        struct ipa_cmd_hw_hdr_init_local *payload;
 421        union ipa_cmd_payload *cmd_payload;
 422        dma_addr_t payload_addr;
 423        u32 flags;
 424
 425        offset += ipa->mem_offset;
 426
 427        /* With this command we tell the IPA where in its local memory the
 428         * header tables reside.  The content of the buffer provided is
 429         * also written via DMA into that space.  The IPA hardware owns
 430         * the table, but the AP must initialize it.
 431         */
 432        cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
 433        payload = &cmd_payload->hdr_init_local;
 434
 435        payload->hdr_table_addr = cpu_to_le64(addr);
 436        flags = u32_encode_bits(size, HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
 437        flags |= u32_encode_bits(offset, HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
 438        payload->flags = cpu_to_le32(flags);
 439
 440        gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
 441                          direction, opcode);
 442}
 443
 444void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value,
 445                                u32 mask, bool clear_full)
 446{
 447        struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
 448        struct ipa_cmd_register_write *payload;
 449        union ipa_cmd_payload *cmd_payload;
 450        u32 opcode = IPA_CMD_REGISTER_WRITE;
 451        dma_addr_t payload_addr;
 452        u32 clear_option;
 453        u32 options;
 454        u16 flags;
 455
 456        /* pipeline_clear_src_grp is not used */
 457        clear_option = clear_full ? pipeline_clear_full : pipeline_clear_hps;
 458
 459        /* IPA v4.0+ represents the pipeline clear options in the opcode.  It
 460         * also supports a larger offset by encoding additional high-order
 461         * bits in the payload flags field.
 462         */
 463        if (ipa->version >= IPA_VERSION_4_0) {
 464                u16 offset_high;
 465                u32 val;
 466
 467                /* Opcode encodes pipeline clear options */
 468                /* SKIP_CLEAR is always 0 (don't skip pipeline clear) */
 469                val = u16_encode_bits(clear_option,
 470                                      REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK);
 471                opcode |= val;
 472
 473                /* Extract the high 4 bits from the offset */
 474                offset_high = (u16)u32_get_bits(offset, GENMASK(19, 16));
 475                offset &= (1 << 16) - 1;
 476
 477                /* Extract the top 4 bits and encode it into the flags field */
 478                flags = u16_encode_bits(offset_high,
 479                                REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
 480                options = 0;    /* reserved */
 481
 482        } else {
 483                flags = 0;      /* SKIP_CLEAR flag is always 0 */
 484                options = u16_encode_bits(clear_option,
 485                                          REGISTER_WRITE_CLEAR_OPTIONS_FMASK);
 486        }
 487
 488        cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
 489        payload = &cmd_payload->register_write;
 490
 491        payload->flags = cpu_to_le16(flags);
 492        payload->offset = cpu_to_le16((u16)offset);
 493        payload->value = cpu_to_le32(value);
 494        payload->value_mask = cpu_to_le32(mask);
 495        payload->clear_options = cpu_to_le32(options);
 496
 497        gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
 498                          DMA_NONE, opcode);
 499}
 500
 501/* Skip IP packet processing on the next data transfer on a TX channel */
 502static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
 503{
 504        struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
 505        enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_INIT;
 506        enum dma_data_direction direction = DMA_TO_DEVICE;
 507        struct ipa_cmd_ip_packet_init *payload;
 508        union ipa_cmd_payload *cmd_payload;
 509        dma_addr_t payload_addr;
 510
 511        /* assert(endpoint_id <
 512                  field_max(IPA_PACKET_INIT_DEST_ENDPOINT_FMASK)); */
 513
 514        cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
 515        payload = &cmd_payload->ip_packet_init;
 516
 517        payload->dest_endpoint = u8_encode_bits(endpoint_id,
 518                                        IPA_PACKET_INIT_DEST_ENDPOINT_FMASK);
 519
 520        gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
 521                          direction, opcode);
 522}
 523
 524/* Use a DMA command to read or write a block of IPA-resident memory */
 525void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
 526                                dma_addr_t addr, bool toward_ipa)
 527{
 528        struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
 529        enum ipa_cmd_opcode opcode = IPA_CMD_DMA_SHARED_MEM;
 530        struct ipa_cmd_hw_dma_mem_mem *payload;
 531        union ipa_cmd_payload *cmd_payload;
 532        enum dma_data_direction direction;
 533        dma_addr_t payload_addr;
 534        u16 flags;
 535
 536        /* size and offset must fit in 16 bit fields */
 537        /* assert(size > 0 && size <= U16_MAX); */
 538        /* assert(offset <= U16_MAX && ipa->mem_offset <= U16_MAX - offset); */
 539
 540        offset += ipa->mem_offset;
 541
 542        cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
 543        payload = &cmd_payload->dma_shared_mem;
 544
 545        /* payload->clear_after_read was reserved prior to IPA v4.0.  It's
 546         * never needed for current code, so it's 0 regardless of version.
 547         */
 548        payload->size = cpu_to_le16(size);
 549        payload->local_addr = cpu_to_le16(offset);
 550        /* payload->flags:
 551         *   direction:         0 = write to IPA, 1 read from IPA
 552         * Starting at v4.0 these are reserved; either way, all zero:
 553         *   pipeline clear:    0 = wait for pipeline clear (don't skip)
 554         *   clear_options:     0 = pipeline_clear_hps
 555         * Instead, for v4.0+ these are encoded in the opcode.  But again
 556         * since both values are 0 we won't bother OR'ing them in.
 557         */
 558        flags = toward_ipa ? 0 : DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK;
 559        payload->flags = cpu_to_le16(flags);
 560        payload->system_addr = cpu_to_le64(addr);
 561
 562        direction = toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
 563
 564        gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
 565                          direction, opcode);
 566}
 567
 568static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans)
 569{
 570        struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
 571        enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_TAG_STATUS;
 572        enum dma_data_direction direction = DMA_TO_DEVICE;
 573        struct ipa_cmd_ip_packet_tag_status *payload;
 574        union ipa_cmd_payload *cmd_payload;
 575        dma_addr_t payload_addr;
 576
 577        /* assert(tag <= field_max(IP_PACKET_TAG_STATUS_TAG_FMASK)); */
 578
 579        cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
 580        payload = &cmd_payload->ip_packet_tag_status;
 581
 582        payload->tag = le64_encode_bits(0, IP_PACKET_TAG_STATUS_TAG_FMASK);
 583
 584        gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
 585                          direction, opcode);
 586}
 587
 588/* Issue a small command TX data transfer */
 589static void ipa_cmd_transfer_add(struct gsi_trans *trans)
 590{
 591        struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
 592        enum dma_data_direction direction = DMA_TO_DEVICE;
 593        enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
 594        union ipa_cmd_payload *payload;
 595        dma_addr_t payload_addr;
 596
 597        /* Just transfer a zero-filled payload structure */
 598        payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
 599
 600        gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
 601                          direction, opcode);
 602}
 603
 604/* Add immediate commands to a transaction to clear the hardware pipeline */
 605void ipa_cmd_pipeline_clear_add(struct gsi_trans *trans)
 606{
 607        struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
 608        struct ipa_endpoint *endpoint;
 609
 610        /* This will complete when the transfer is received */
 611        reinit_completion(&ipa->completion);
 612
 613        /* Issue a no-op register write command (mask 0 means no write) */
 614        ipa_cmd_register_write_add(trans, 0, 0, 0, true);
 615
 616        /* Send a data packet through the IPA pipeline.  The packet_init
 617         * command says to send the next packet directly to the exception
 618         * endpoint without any other IPA processing.  The tag_status
 619         * command requests that status be generated on completion of
 620         * that transfer, and that it will be tagged with a value.
 621         * Finally, the transfer command sends a small packet of data
 622         * (instead of a command) using the command endpoint.
 623         */
 624        endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
 625        ipa_cmd_ip_packet_init_add(trans, endpoint->endpoint_id);
 626        ipa_cmd_ip_tag_status_add(trans);
 627        ipa_cmd_transfer_add(trans);
 628}
 629
 630/* Returns the number of commands required to clear the pipeline */
 631u32 ipa_cmd_pipeline_clear_count(void)
 632{
 633        return 4;
 634}
 635
 636void ipa_cmd_pipeline_clear_wait(struct ipa *ipa)
 637{
 638        wait_for_completion(&ipa->completion);
 639}
 640
 641void ipa_cmd_pipeline_clear(struct ipa *ipa)
 642{
 643        u32 count = ipa_cmd_pipeline_clear_count();
 644        struct gsi_trans *trans;
 645
 646        trans = ipa_cmd_trans_alloc(ipa, count);
 647        if (trans) {
 648                ipa_cmd_pipeline_clear_add(trans);
 649                gsi_trans_commit_wait(trans);
 650                ipa_cmd_pipeline_clear_wait(ipa);
 651        } else {
 652                dev_err(&ipa->pdev->dev,
 653                        "error allocating %u entry tag transaction\n", count);
 654        }
 655}
 656
 657static struct ipa_cmd_info *
 658ipa_cmd_info_alloc(struct ipa_endpoint *endpoint, u32 tre_count)
 659{
 660        struct gsi_channel *channel;
 661
 662        channel = &endpoint->ipa->gsi.channel[endpoint->channel_id];
 663
 664        return gsi_trans_pool_alloc(&channel->trans_info.info_pool, tre_count);
 665}
 666
 667/* Allocate a transaction for the command TX endpoint */
 668struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count)
 669{
 670        struct ipa_endpoint *endpoint;
 671        struct gsi_trans *trans;
 672
 673        endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
 674
 675        trans = gsi_channel_trans_alloc(&ipa->gsi, endpoint->channel_id,
 676                                        tre_count, DMA_NONE);
 677        if (trans)
 678                trans->info = ipa_cmd_info_alloc(endpoint, tre_count);
 679
 680        return trans;
 681}
 682