linux/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
   3
   4#include <linux/bitfield.h>
   5#include <linux/netdevice.h>
   6#include <linux/skbuff.h>
   7#include <linux/workqueue.h>
   8#include <net/dst_metadata.h>
   9
  10#include "main.h"
  11#include "../nfp_net.h"
  12#include "../nfp_net_repr.h"
  13#include "./cmsg.h"
  14
  15static struct nfp_flower_cmsg_hdr *
  16nfp_flower_cmsg_get_hdr(struct sk_buff *skb)
  17{
  18        return (struct nfp_flower_cmsg_hdr *)skb->data;
  19}
  20
  21struct sk_buff *
  22nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size,
  23                      enum nfp_flower_cmsg_type_port type, gfp_t flag)
  24{
  25        struct nfp_flower_cmsg_hdr *ch;
  26        struct sk_buff *skb;
  27
  28        size += NFP_FLOWER_CMSG_HLEN;
  29
  30        skb = nfp_app_ctrl_msg_alloc(app, size, flag);
  31        if (!skb)
  32                return NULL;
  33
  34        ch = nfp_flower_cmsg_get_hdr(skb);
  35        ch->pad = 0;
  36        ch->version = NFP_FLOWER_CMSG_VER1;
  37        ch->type = type;
  38        skb_put(skb, size);
  39
  40        return skb;
  41}
  42
  43struct sk_buff *
  44nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports)
  45{
  46        struct nfp_flower_cmsg_mac_repr *msg;
  47        struct sk_buff *skb;
  48
  49        skb = nfp_flower_cmsg_alloc(app, struct_size(msg, ports, num_ports),
  50                                    NFP_FLOWER_CMSG_TYPE_MAC_REPR, GFP_KERNEL);
  51        if (!skb)
  52                return NULL;
  53
  54        msg = nfp_flower_cmsg_get_data(skb);
  55        memset(msg->reserved, 0, sizeof(msg->reserved));
  56        msg->num_ports = num_ports;
  57
  58        return skb;
  59}
  60
  61void
  62nfp_flower_cmsg_mac_repr_add(struct sk_buff *skb, unsigned int idx,
  63                             unsigned int nbi, unsigned int nbi_port,
  64                             unsigned int phys_port)
  65{
  66        struct nfp_flower_cmsg_mac_repr *msg;
  67
  68        msg = nfp_flower_cmsg_get_data(skb);
  69        msg->ports[idx].idx = idx;
  70        msg->ports[idx].info = nbi & NFP_FLOWER_CMSG_MAC_REPR_NBI;
  71        msg->ports[idx].nbi_port = nbi_port;
  72        msg->ports[idx].phys_port = phys_port;
  73}
  74
  75int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok,
  76                            unsigned int mtu, bool mtu_only)
  77{
  78        struct nfp_flower_cmsg_portmod *msg;
  79        struct sk_buff *skb;
  80
  81        skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg),
  82                                    NFP_FLOWER_CMSG_TYPE_PORT_MOD, GFP_KERNEL);
  83        if (!skb)
  84                return -ENOMEM;
  85
  86        msg = nfp_flower_cmsg_get_data(skb);
  87        msg->portnum = cpu_to_be32(repr->dst->u.port_info.port_id);
  88        msg->reserved = 0;
  89        msg->info = carrier_ok;
  90
  91        if (mtu_only)
  92                msg->info |= NFP_FLOWER_CMSG_PORTMOD_MTU_CHANGE_ONLY;
  93
  94        msg->mtu = cpu_to_be16(mtu);
  95
  96        nfp_ctrl_tx(repr->app->ctrl, skb);
  97
  98        return 0;
  99}
 100
 101int nfp_flower_cmsg_portreify(struct nfp_repr *repr, bool exists)
 102{
 103        struct nfp_flower_cmsg_portreify *msg;
 104        struct sk_buff *skb;
 105
 106        skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg),
 107                                    NFP_FLOWER_CMSG_TYPE_PORT_REIFY,
 108                                    GFP_KERNEL);
 109        if (!skb)
 110                return -ENOMEM;
 111
 112        msg = nfp_flower_cmsg_get_data(skb);
 113        msg->portnum = cpu_to_be32(repr->dst->u.port_info.port_id);
 114        msg->reserved = 0;
 115        msg->info = cpu_to_be16(exists);
 116
 117        nfp_ctrl_tx(repr->app->ctrl, skb);
 118
 119        return 0;
 120}
 121
 122static bool
 123nfp_flower_process_mtu_ack(struct nfp_app *app, struct sk_buff *skb)
 124{
 125        struct nfp_flower_priv *app_priv = app->priv;
 126        struct nfp_flower_cmsg_portmod *msg;
 127
 128        msg = nfp_flower_cmsg_get_data(skb);
 129
 130        if (!(msg->info & NFP_FLOWER_CMSG_PORTMOD_MTU_CHANGE_ONLY))
 131                return false;
 132
 133        spin_lock_bh(&app_priv->mtu_conf.lock);
 134        if (!app_priv->mtu_conf.requested_val ||
 135            app_priv->mtu_conf.portnum != be32_to_cpu(msg->portnum) ||
 136            be16_to_cpu(msg->mtu) != app_priv->mtu_conf.requested_val) {
 137                /* Not an ack for requested MTU change. */
 138                spin_unlock_bh(&app_priv->mtu_conf.lock);
 139                return false;
 140        }
 141
 142        app_priv->mtu_conf.ack = true;
 143        app_priv->mtu_conf.requested_val = 0;
 144        wake_up(&app_priv->mtu_conf.wait_q);
 145        spin_unlock_bh(&app_priv->mtu_conf.lock);
 146
 147        return true;
 148}
 149
 150static void
 151nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
 152{
 153        struct nfp_flower_cmsg_portmod *msg;
 154        struct net_device *netdev;
 155        bool link;
 156
 157        msg = nfp_flower_cmsg_get_data(skb);
 158        link = msg->info & NFP_FLOWER_CMSG_PORTMOD_INFO_LINK;
 159
 160        rtnl_lock();
 161        rcu_read_lock();
 162        netdev = nfp_app_dev_get(app, be32_to_cpu(msg->portnum), NULL);
 163        rcu_read_unlock();
 164        if (!netdev) {
 165                nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n",
 166                                     be32_to_cpu(msg->portnum));
 167                rtnl_unlock();
 168                return;
 169        }
 170
 171        if (link) {
 172                u16 mtu = be16_to_cpu(msg->mtu);
 173
 174                netif_carrier_on(netdev);
 175
 176                /* An MTU of 0 from the firmware should be ignored */
 177                if (mtu)
 178                        dev_set_mtu(netdev, mtu);
 179        } else {
 180                netif_carrier_off(netdev);
 181        }
 182        rtnl_unlock();
 183}
 184
 185static void
 186nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb)
 187{
 188        struct nfp_flower_priv *priv = app->priv;
 189        struct nfp_flower_cmsg_portreify *msg;
 190        bool exists;
 191
 192        msg = nfp_flower_cmsg_get_data(skb);
 193
 194        rcu_read_lock();
 195        exists = !!nfp_app_dev_get(app, be32_to_cpu(msg->portnum), NULL);
 196        rcu_read_unlock();
 197        if (!exists) {
 198                nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n",
 199                                     be32_to_cpu(msg->portnum));
 200                return;
 201        }
 202
 203        atomic_inc(&priv->reify_replies);
 204        wake_up(&priv->reify_wait_queue);
 205}
 206
 207static void
 208nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
 209{
 210        unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
 211        struct nfp_flower_cmsg_merge_hint *msg;
 212        struct nfp_fl_payload *sub_flows[2];
 213        int err, i, flow_cnt;
 214
 215        msg = nfp_flower_cmsg_get_data(skb);
 216        /* msg->count starts at 0 and always assumes at least 1 entry. */
 217        flow_cnt = msg->count + 1;
 218
 219        if (msg_len < struct_size(msg, flow, flow_cnt)) {
 220                nfp_flower_cmsg_warn(app, "Merge hint ctrl msg too short - %d bytes but expect %zd\n",
 221                                     msg_len, struct_size(msg, flow, flow_cnt));
 222                return;
 223        }
 224
 225        if (flow_cnt != 2) {
 226                nfp_flower_cmsg_warn(app, "Merge hint contains %d flows - two are expected\n",
 227                                     flow_cnt);
 228                return;
 229        }
 230
 231        rtnl_lock();
 232        for (i = 0; i < flow_cnt; i++) {
 233                u32 ctx = be32_to_cpu(msg->flow[i].host_ctx);
 234
 235                sub_flows[i] = nfp_flower_get_fl_payload_from_ctx(app, ctx);
 236                if (!sub_flows[i]) {
 237                        nfp_flower_cmsg_warn(app, "Invalid flow in merge hint\n");
 238                        goto err_rtnl_unlock;
 239                }
 240        }
 241
 242        err = nfp_flower_merge_offloaded_flows(app, sub_flows[0], sub_flows[1]);
 243        /* Only warn on memory fail. Hint veto will not break functionality. */
 244        if (err == -ENOMEM)
 245                nfp_flower_cmsg_warn(app, "Flow merge memory fail.\n");
 246
 247err_rtnl_unlock:
 248        rtnl_unlock();
 249}
 250
 251static void
 252nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
 253{
 254        struct nfp_flower_priv *app_priv = app->priv;
 255        struct nfp_flower_cmsg_hdr *cmsg_hdr;
 256        enum nfp_flower_cmsg_type_port type;
 257        bool skb_stored = false;
 258
 259        cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
 260
 261        type = cmsg_hdr->type;
 262        switch (type) {
 263        case NFP_FLOWER_CMSG_TYPE_PORT_MOD:
 264                nfp_flower_cmsg_portmod_rx(app, skb);
 265                break;
 266        case NFP_FLOWER_CMSG_TYPE_MERGE_HINT:
 267                if (app_priv->flower_en_feats & NFP_FL_ENABLE_FLOW_MERGE) {
 268                        nfp_flower_cmsg_merge_hint_rx(app, skb);
 269                        break;
 270                }
 271                goto err_default;
 272        case NFP_FLOWER_CMSG_TYPE_NO_NEIGH:
 273                nfp_tunnel_request_route_v4(app, skb);
 274                break;
 275        case NFP_FLOWER_CMSG_TYPE_NO_NEIGH_V6:
 276                nfp_tunnel_request_route_v6(app, skb);
 277                break;
 278        case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS:
 279                nfp_tunnel_keep_alive(app, skb);
 280                break;
 281        case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS_V6:
 282                nfp_tunnel_keep_alive_v6(app, skb);
 283                break;
 284        case NFP_FLOWER_CMSG_TYPE_QOS_STATS:
 285                nfp_flower_stats_rlim_reply(app, skb);
 286                break;
 287        case NFP_FLOWER_CMSG_TYPE_LAG_CONFIG:
 288                if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG) {
 289                        skb_stored = nfp_flower_lag_unprocessed_msg(app, skb);
 290                        break;
 291                }
 292                fallthrough;
 293        default:
 294err_default:
 295                nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n",
 296                                     type);
 297                goto out;
 298        }
 299
 300        if (!skb_stored)
 301                dev_consume_skb_any(skb);
 302        return;
 303out:
 304        dev_kfree_skb_any(skb);
 305}
 306
 307void nfp_flower_cmsg_process_rx(struct work_struct *work)
 308{
 309        struct sk_buff_head cmsg_joined;
 310        struct nfp_flower_priv *priv;
 311        struct sk_buff *skb;
 312
 313        priv = container_of(work, struct nfp_flower_priv, cmsg_work);
 314        skb_queue_head_init(&cmsg_joined);
 315
 316        spin_lock_bh(&priv->cmsg_skbs_high.lock);
 317        skb_queue_splice_tail_init(&priv->cmsg_skbs_high, &cmsg_joined);
 318        spin_unlock_bh(&priv->cmsg_skbs_high.lock);
 319
 320        spin_lock_bh(&priv->cmsg_skbs_low.lock);
 321        skb_queue_splice_tail_init(&priv->cmsg_skbs_low, &cmsg_joined);
 322        spin_unlock_bh(&priv->cmsg_skbs_low.lock);
 323
 324        while ((skb = __skb_dequeue(&cmsg_joined)))
 325                nfp_flower_cmsg_process_one_rx(priv->app, skb);
 326}
 327
 328static void
 329nfp_flower_queue_ctl_msg(struct nfp_app *app, struct sk_buff *skb, int type)
 330{
 331        struct nfp_flower_priv *priv = app->priv;
 332        struct sk_buff_head *skb_head;
 333
 334        if (type == NFP_FLOWER_CMSG_TYPE_PORT_MOD)
 335                skb_head = &priv->cmsg_skbs_high;
 336        else
 337                skb_head = &priv->cmsg_skbs_low;
 338
 339        if (skb_queue_len(skb_head) >= NFP_FLOWER_WORKQ_MAX_SKBS) {
 340                nfp_flower_cmsg_warn(app, "Dropping queued control messages\n");
 341                dev_kfree_skb_any(skb);
 342                return;
 343        }
 344
 345        skb_queue_tail(skb_head, skb);
 346        schedule_work(&priv->cmsg_work);
 347}
 348
 349void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
 350{
 351        struct nfp_flower_cmsg_hdr *cmsg_hdr;
 352
 353        cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
 354
 355        if (unlikely(cmsg_hdr->version != NFP_FLOWER_CMSG_VER1)) {
 356                nfp_flower_cmsg_warn(app, "Cannot handle repr control version %u\n",
 357                                     cmsg_hdr->version);
 358                dev_kfree_skb_any(skb);
 359                return;
 360        }
 361
 362        if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_FLOW_STATS) {
 363                /* We need to deal with stats updates from HW asap */
 364                nfp_flower_rx_flow_stats(app, skb);
 365                dev_consume_skb_any(skb);
 366        } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_PORT_MOD &&
 367                   nfp_flower_process_mtu_ack(app, skb)) {
 368                /* Handle MTU acks outside wq to prevent RTNL conflict. */
 369                dev_consume_skb_any(skb);
 370        } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH ||
 371                   cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6) {
 372                /* Acks from the NFP that the route is added - ignore. */
 373                dev_consume_skb_any(skb);
 374        } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY) {
 375                /* Handle REIFY acks outside wq to prevent RTNL conflict. */
 376                nfp_flower_cmsg_portreify_rx(app, skb);
 377                dev_consume_skb_any(skb);
 378        } else {
 379                nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type);
 380        }
 381}
 382