1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <net/ipv6.h>
35
36#include "cxgb4.h"
37#include "t4_regs.h"
38#include "t4_tcb.h"
39#include "t4_values.h"
40#include "clip_tbl.h"
41#include "l2t.h"
42#include "smt.h"
43#include "t4fw_api.h"
44#include "cxgb4_filter.h"
45
46static inline bool is_field_set(u32 val, u32 mask)
47{
48 return val || mask;
49}
50
51static inline bool unsupported(u32 conf, u32 conf_mask, u32 val, u32 mask)
52{
53 return !(conf & conf_mask) && is_field_set(val, mask);
54}
55
56static int set_tcb_field(struct adapter *adap, struct filter_entry *f,
57 unsigned int ftid, u16 word, u64 mask, u64 val,
58 int no_reply)
59{
60 struct cpl_set_tcb_field *req;
61 struct sk_buff *skb;
62
63 skb = alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_ATOMIC);
64 if (!skb)
65 return -ENOMEM;
66
67 req = (struct cpl_set_tcb_field *)__skb_put_zero(skb, sizeof(*req));
68 INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, ftid);
69 req->reply_ctrl = htons(REPLY_CHAN_V(0) |
70 QUEUENO_V(adap->sge.fw_evtq.abs_id) |
71 NO_REPLY_V(no_reply));
72 req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(ftid));
73 req->mask = cpu_to_be64(mask);
74 req->val = cpu_to_be64(val);
75 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
76 t4_ofld_send(adap, skb);
77 return 0;
78}
79
80
81
82static int set_tcb_tflag(struct adapter *adap, struct filter_entry *f,
83 unsigned int ftid, unsigned int bit_pos,
84 unsigned int val, int no_reply)
85{
86 return set_tcb_field(adap, f, ftid, TCB_T_FLAGS_W, 1ULL << bit_pos,
87 (unsigned long long)val << bit_pos, no_reply);
88}
89
90static void mk_abort_req_ulp(struct cpl_abort_req *abort_req, unsigned int tid)
91{
92 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
93 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
94
95 txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
96 txpkt->len = htonl(DIV_ROUND_UP(sizeof(*abort_req), 16));
97 sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
98 sc->len = htonl(sizeof(*abort_req) - sizeof(struct work_request_hdr));
99 OPCODE_TID(abort_req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
100 abort_req->rsvd0 = htonl(0);
101 abort_req->rsvd1 = 0;
102 abort_req->cmd = CPL_ABORT_NO_RST;
103}
104
105static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl, unsigned int tid)
106{
107 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
108 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
109
110 txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
111 txpkt->len = htonl(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
112 sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
113 sc->len = htonl(sizeof(*abort_rpl) - sizeof(struct work_request_hdr));
114 OPCODE_TID(abort_rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
115 abort_rpl->rsvd0 = htonl(0);
116 abort_rpl->rsvd1 = 0;
117 abort_rpl->cmd = CPL_ABORT_NO_RST;
118}
119
120static void mk_set_tcb_ulp(struct filter_entry *f,
121 struct cpl_set_tcb_field *req,
122 unsigned int word, u64 mask, u64 val,
123 u8 cookie, int no_reply)
124{
125 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
126 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
127
128 txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
129 txpkt->len = htonl(DIV_ROUND_UP(sizeof(*req), 16));
130 sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
131 sc->len = htonl(sizeof(*req) - sizeof(struct work_request_hdr));
132 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
133 req->reply_ctrl = htons(NO_REPLY_V(no_reply) | REPLY_CHAN_V(0) |
134 QUEUENO_V(0));
135 req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(cookie));
136 req->mask = cpu_to_be64(mask);
137 req->val = cpu_to_be64(val);
138 sc = (struct ulptx_idata *)(req + 1);
139 sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP));
140 sc->len = htonl(0);
141}
142
143static int configure_filter_smac(struct adapter *adap, struct filter_entry *f)
144{
145 int err;
146
147
148 err = set_tcb_field(adap, f, f->tid, TCB_SMAC_SEL_W,
149 TCB_SMAC_SEL_V(TCB_SMAC_SEL_M),
150 TCB_SMAC_SEL_V(f->smt->idx), 1);
151 if (err)
152 goto smac_err;
153
154 err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1);
155 if (!err)
156 return 0;
157
158smac_err:
159 dev_err(adap->pdev_dev, "filter %u smac config failed with error %u\n",
160 f->tid, err);
161 return err;
162}
163
164static void set_nat_params(struct adapter *adap, struct filter_entry *f,
165 unsigned int tid, bool dip, bool sip, bool dp,
166 bool sp)
167{
168 u8 *nat_lp = (u8 *)&f->fs.nat_lport;
169 u8 *nat_fp = (u8 *)&f->fs.nat_fport;
170
171 if (dip) {
172 if (f->fs.type) {
173 set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W,
174 WORD_MASK, f->fs.nat_lip[15] |
175 f->fs.nat_lip[14] << 8 |
176 f->fs.nat_lip[13] << 16 |
177 (u64)f->fs.nat_lip[12] << 24, 1);
178
179 set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 1,
180 WORD_MASK, f->fs.nat_lip[11] |
181 f->fs.nat_lip[10] << 8 |
182 f->fs.nat_lip[9] << 16 |
183 (u64)f->fs.nat_lip[8] << 24, 1);
184
185 set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 2,
186 WORD_MASK, f->fs.nat_lip[7] |
187 f->fs.nat_lip[6] << 8 |
188 f->fs.nat_lip[5] << 16 |
189 (u64)f->fs.nat_lip[4] << 24, 1);
190
191 set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 3,
192 WORD_MASK, f->fs.nat_lip[3] |
193 f->fs.nat_lip[2] << 8 |
194 f->fs.nat_lip[1] << 16 |
195 (u64)f->fs.nat_lip[0] << 24, 1);
196 } else {
197 set_tcb_field(adap, f, tid, TCB_RX_FRAG3_LEN_RAW_W,
198 WORD_MASK, f->fs.nat_lip[3] |
199 f->fs.nat_lip[2] << 8 |
200 f->fs.nat_lip[1] << 16 |
201 (u64)f->fs.nat_lip[0] << 24, 1);
202 }
203 }
204
205 if (sip) {
206 if (f->fs.type) {
207 set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W,
208 WORD_MASK, f->fs.nat_fip[15] |
209 f->fs.nat_fip[14] << 8 |
210 f->fs.nat_fip[13] << 16 |
211 (u64)f->fs.nat_fip[12] << 24, 1);
212
213 set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 1,
214 WORD_MASK, f->fs.nat_fip[11] |
215 f->fs.nat_fip[10] << 8 |
216 f->fs.nat_fip[9] << 16 |
217 (u64)f->fs.nat_fip[8] << 24, 1);
218
219 set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 2,
220 WORD_MASK, f->fs.nat_fip[7] |
221 f->fs.nat_fip[6] << 8 |
222 f->fs.nat_fip[5] << 16 |
223 (u64)f->fs.nat_fip[4] << 24, 1);
224
225 set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 3,
226 WORD_MASK, f->fs.nat_fip[3] |
227 f->fs.nat_fip[2] << 8 |
228 f->fs.nat_fip[1] << 16 |
229 (u64)f->fs.nat_fip[0] << 24, 1);
230
231 } else {
232 set_tcb_field(adap, f, tid,
233 TCB_RX_FRAG3_START_IDX_OFFSET_RAW_W,
234 WORD_MASK, f->fs.nat_fip[3] |
235 f->fs.nat_fip[2] << 8 |
236 f->fs.nat_fip[1] << 16 |
237 (u64)f->fs.nat_fip[0] << 24, 1);
238 }
239 }
240
241 set_tcb_field(adap, f, tid, TCB_PDU_HDR_LEN_W, WORD_MASK,
242 (dp ? (nat_lp[1] | nat_lp[0] << 8) : 0) |
243 (sp ? (nat_fp[1] << 16 | (u64)nat_fp[0] << 24) : 0),
244 1);
245}
246
247
248static int validate_filter(struct net_device *dev,
249 struct ch_filter_specification *fs)
250{
251 struct adapter *adapter = netdev2adap(dev);
252 u32 fconf, iconf;
253
254
255 iconf = adapter->params.tp.ingress_config;
256 fconf = fs->hash ? adapter->params.tp.filter_mask :
257 adapter->params.tp.vlan_pri_map;
258
259 if (unsupported(fconf, FCOE_F, fs->val.fcoe, fs->mask.fcoe) ||
260 unsupported(fconf, PORT_F, fs->val.iport, fs->mask.iport) ||
261 unsupported(fconf, TOS_F, fs->val.tos, fs->mask.tos) ||
262 unsupported(fconf, ETHERTYPE_F, fs->val.ethtype,
263 fs->mask.ethtype) ||
264 unsupported(fconf, MACMATCH_F, fs->val.macidx, fs->mask.macidx) ||
265 unsupported(fconf, MPSHITTYPE_F, fs->val.matchtype,
266 fs->mask.matchtype) ||
267 unsupported(fconf, FRAGMENTATION_F, fs->val.frag, fs->mask.frag) ||
268 unsupported(fconf, PROTOCOL_F, fs->val.proto, fs->mask.proto) ||
269 unsupported(fconf, VNIC_ID_F, fs->val.pfvf_vld,
270 fs->mask.pfvf_vld) ||
271 unsupported(fconf, VNIC_ID_F, fs->val.ovlan_vld,
272 fs->mask.ovlan_vld) ||
273 unsupported(fconf, VNIC_ID_F, fs->val.encap_vld,
274 fs->mask.encap_vld) ||
275 unsupported(fconf, VLAN_F, fs->val.ivlan_vld, fs->mask.ivlan_vld))
276 return -EOPNOTSUPP;
277
278
279
280
281
282
283
284
285 if ((is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) &&
286 is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld)) ||
287 (is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) &&
288 is_field_set(fs->val.encap_vld, fs->mask.encap_vld)) ||
289 (is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld) &&
290 is_field_set(fs->val.encap_vld, fs->mask.encap_vld)))
291 return -EOPNOTSUPP;
292 if (unsupported(iconf, VNIC_F, fs->val.pfvf_vld, fs->mask.pfvf_vld) ||
293 (is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld) &&
294 (iconf & VNIC_F)))
295 return -EOPNOTSUPP;
296 if (fs->val.pf > 0x7 || fs->val.vf > 0x7f)
297 return -ERANGE;
298 fs->mask.pf &= 0x7;
299 fs->mask.vf &= 0x7f;
300
301
302
303
304
305 if (fs->action == FILTER_SWITCH &&
306 fs->eport >= adapter->params.nports)
307 return -ERANGE;
308
309
310 if (fs->val.iport >= adapter->params.nports)
311 return -ERANGE;
312
313
314 if (is_t4(adapter->params.chip) &&
315 fs->action == FILTER_SWITCH &&
316 (fs->newvlan == VLAN_REMOVE ||
317 fs->newvlan == VLAN_REWRITE))
318 return -EOPNOTSUPP;
319
320 if (fs->val.encap_vld &&
321 CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
322 return -EOPNOTSUPP;
323 return 0;
324}
325
326static int get_filter_steerq(struct net_device *dev,
327 struct ch_filter_specification *fs)
328{
329 struct adapter *adapter = netdev2adap(dev);
330 int iq;
331
332
333
334
335
336
337 if (!fs->dirsteer) {
338 if (fs->iq)
339 return -EINVAL;
340 iq = 0;
341 } else {
342 struct port_info *pi = netdev_priv(dev);
343
344
345
346
347 if (fs->iq < pi->nqsets)
348 iq = adapter->sge.ethrxq[pi->first_qset +
349 fs->iq].rspq.abs_id;
350 else
351 iq = fs->iq;
352 }
353
354 return iq;
355}
356
357static int get_filter_count(struct adapter *adapter, unsigned int fidx,
358 u64 *pkts, u64 *bytes, bool hash)
359{
360 unsigned int tcb_base, tcbaddr;
361 unsigned int word_offset;
362 struct filter_entry *f;
363 __be64 be64_byte_count;
364 int ret;
365
366 tcb_base = t4_read_reg(adapter, TP_CMM_TCB_BASE_A);
367 if (is_hashfilter(adapter) && hash) {
368 if (tid_out_of_range(&adapter->tids, fidx))
369 return -E2BIG;
370 f = adapter->tids.tid_tab[fidx - adapter->tids.tid_base];
371 if (!f)
372 return -EINVAL;
373 } else {
374 if ((fidx != (adapter->tids.nftids + adapter->tids.nsftids +
375 adapter->tids.nhpftids - 1)) &&
376 fidx >= (adapter->tids.nftids + adapter->tids.nhpftids))
377 return -E2BIG;
378
379 if (fidx < adapter->tids.nhpftids)
380 f = &adapter->tids.hpftid_tab[fidx];
381 else
382 f = &adapter->tids.ftid_tab[fidx -
383 adapter->tids.nhpftids];
384 if (!f->valid)
385 return -EINVAL;
386 }
387 tcbaddr = tcb_base + f->tid * TCB_SIZE;
388
389 spin_lock(&adapter->win0_lock);
390 if (is_t4(adapter->params.chip)) {
391 __be64 be64_count;
392
393
394 *bytes = 0;
395
396
397 word_offset = 4;
398 ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
399 tcbaddr + (word_offset * sizeof(__be32)),
400 sizeof(be64_count),
401 (__be32 *)&be64_count,
402 T4_MEMORY_READ);
403 if (ret < 0)
404 goto out;
405 *pkts = be64_to_cpu(be64_count);
406 } else {
407 __be32 be32_count;
408
409
410 word_offset = 4;
411 ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
412 tcbaddr + (word_offset * sizeof(__be32)),
413 sizeof(be64_byte_count),
414 &be64_byte_count,
415 T4_MEMORY_READ);
416 if (ret < 0)
417 goto out;
418 *bytes = be64_to_cpu(be64_byte_count);
419
420
421 word_offset = 6;
422 ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
423 tcbaddr + (word_offset * sizeof(__be32)),
424 sizeof(be32_count),
425 &be32_count,
426 T4_MEMORY_READ);
427 if (ret < 0)
428 goto out;
429 *pkts = (u64)be32_to_cpu(be32_count);
430 }
431
432out:
433 spin_unlock(&adapter->win0_lock);
434 return ret;
435}
436
437int cxgb4_get_filter_counters(struct net_device *dev, unsigned int fidx,
438 u64 *hitcnt, u64 *bytecnt, bool hash)
439{
440 struct adapter *adapter = netdev2adap(dev);
441
442 return get_filter_count(adapter, fidx, hitcnt, bytecnt, hash);
443}
444
445static bool cxgb4_filter_prio_in_range(struct tid_info *t, u32 idx, u8 nslots,
446 u32 prio)
447{
448 struct filter_entry *prev_tab, *next_tab, *prev_fe, *next_fe;
449 u32 prev_ftid, next_ftid;
450
451
452
453
454
455
456
457
458
459
460
461 if (idx < t->nhpftids) {
462
463
464
465 if (test_bit(idx, t->hpftid_bmap))
466 return false;
467
468 next_tab = t->hpftid_tab;
469 next_ftid = find_next_bit(t->hpftid_bmap, t->nhpftids, idx);
470 if (next_ftid >= t->nhpftids) {
471
472
473
474
475 next_ftid = find_first_bit(t->ftid_bmap, t->nftids);
476 if (next_ftid >= t->nftids)
477 next_ftid = idx;
478 else
479 next_tab = t->ftid_tab;
480 }
481
482
483
484
485
486
487 prev_ftid = find_last_bit(t->hpftid_bmap, idx);
488 if (prev_ftid >= idx)
489 prev_ftid = idx;
490
491 prev_tab = t->hpftid_tab;
492 } else {
493 idx -= t->nhpftids;
494
495
496
497
498 if (test_bit(idx, t->ftid_bmap))
499 return false;
500
501 prev_tab = t->ftid_tab;
502 prev_ftid = find_last_bit(t->ftid_bmap, idx);
503 if (prev_ftid >= idx) {
504
505
506
507
508 prev_ftid = find_last_bit(t->hpftid_bmap, t->nhpftids);
509 if (prev_ftid >= t->nhpftids)
510 prev_ftid = idx;
511 else
512 prev_tab = t->hpftid_tab;
513 }
514
515
516
517
518
519
520
521 next_ftid = find_next_bit(t->ftid_bmap, t->nftids, idx);
522 if (next_ftid >= t->nftids)
523 next_ftid = idx;
524
525 next_tab = t->ftid_tab;
526 }
527
528 next_fe = &next_tab[next_ftid];
529
530
531
532
533
534
535 prev_fe = &prev_tab[prev_ftid & ~(nslots - 1)];
536 if (!prev_fe->fs.type)
537 prev_fe = &prev_tab[prev_ftid];
538
539 if ((prev_fe->valid && prev_fe->fs.tc_prio > prio) ||
540 (next_fe->valid && next_fe->fs.tc_prio < prio))
541 return false;
542
543 return true;
544}
545
546int cxgb4_get_free_ftid(struct net_device *dev, u8 family, bool hash_en,
547 u32 tc_prio)
548{
549 struct adapter *adap = netdev2adap(dev);
550 struct tid_info *t = &adap->tids;
551 u32 bmap_ftid, max_ftid;
552 struct filter_entry *f;
553 unsigned long *bmap;
554 bool found = false;
555 u8 i, cnt, n;
556 int ftid = 0;
557
558
559
560
561 n = 1;
562 if (family == PF_INET6) {
563 n++;
564 if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6)
565 n += 2;
566 }
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598 spin_lock_bh(&t->ftid_lock);
599
600 ftid = (tc_prio <= t->nhpftids) ? 0 : t->nhpftids;
601 max_ftid = t->nftids + t->nhpftids;
602 while (ftid < max_ftid) {
603 if (ftid < t->nhpftids) {
604
605
606
607
608
609
610
611
612 if ((t->tc_hash_tids_max_prio &&
613 tc_prio > t->tc_hash_tids_max_prio) ||
614 (ftid + n) > t->nhpftids) {
615 ftid = t->nhpftids;
616 continue;
617 }
618
619 bmap = t->hpftid_bmap;
620 bmap_ftid = ftid;
621 } else if (hash_en) {
622
623
624
625 ftid = find_last_bit(t->hpftid_bmap, t->nhpftids);
626 if (ftid < t->nhpftids) {
627 f = &t->hpftid_tab[ftid];
628 if (f->valid && tc_prio < f->fs.tc_prio)
629 break;
630 }
631
632
633
634
635 ftid = find_first_bit(t->ftid_bmap, t->nftids);
636 if (ftid < t->nftids) {
637 f = &t->ftid_tab[ftid];
638 if (f->valid && tc_prio > f->fs.tc_prio)
639 break;
640 }
641
642 found = true;
643 ftid = t->nhpftids;
644 goto out_unlock;
645 } else {
646
647
648
649
650
651 if (t->tc_hash_tids_max_prio &&
652 tc_prio < t->tc_hash_tids_max_prio)
653 break;
654
655 if (ftid + n > max_ftid)
656 break;
657
658 bmap = t->ftid_bmap;
659 bmap_ftid = ftid - t->nhpftids;
660 }
661
662 cnt = 0;
663 for (i = 0; i < n; i++) {
664 if (test_bit(bmap_ftid + i, bmap))
665 break;
666 cnt++;
667 }
668
669 if (cnt == n) {
670
671
672
673 if (cxgb4_filter_prio_in_range(t, ftid, n,
674 tc_prio)) {
675 ftid &= ~(n - 1);
676 found = true;
677 break;
678 }
679 }
680
681 ftid += n;
682 }
683
684out_unlock:
685 spin_unlock_bh(&t->ftid_lock);
686 return found ? ftid : -ENOMEM;
687}
688
689static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family,
690 unsigned int chip_ver)
691{
692 spin_lock_bh(&t->ftid_lock);
693
694 if (test_bit(fidx, t->ftid_bmap)) {
695 spin_unlock_bh(&t->ftid_lock);
696 return -EBUSY;
697 }
698
699 if (family == PF_INET) {
700 __set_bit(fidx, t->ftid_bmap);
701 } else {
702 if (chip_ver < CHELSIO_T6)
703 bitmap_allocate_region(t->ftid_bmap, fidx, 2);
704 else
705 bitmap_allocate_region(t->ftid_bmap, fidx, 1);
706 }
707
708 spin_unlock_bh(&t->ftid_lock);
709 return 0;
710}
711
712static int cxgb4_set_hpftid(struct tid_info *t, int fidx, int family)
713{
714 spin_lock_bh(&t->ftid_lock);
715
716 if (test_bit(fidx, t->hpftid_bmap)) {
717 spin_unlock_bh(&t->ftid_lock);
718 return -EBUSY;
719 }
720
721 if (family == PF_INET)
722 __set_bit(fidx, t->hpftid_bmap);
723 else
724 bitmap_allocate_region(t->hpftid_bmap, fidx, 1);
725
726 spin_unlock_bh(&t->ftid_lock);
727 return 0;
728}
729
730static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family,
731 unsigned int chip_ver)
732{
733 spin_lock_bh(&t->ftid_lock);
734 if (family == PF_INET) {
735 __clear_bit(fidx, t->ftid_bmap);
736 } else {
737 if (chip_ver < CHELSIO_T6)
738 bitmap_release_region(t->ftid_bmap, fidx, 2);
739 else
740 bitmap_release_region(t->ftid_bmap, fidx, 1);
741 }
742 spin_unlock_bh(&t->ftid_lock);
743}
744
745static void cxgb4_clear_hpftid(struct tid_info *t, int fidx, int family)
746{
747 spin_lock_bh(&t->ftid_lock);
748
749 if (family == PF_INET)
750 __clear_bit(fidx, t->hpftid_bmap);
751 else
752 bitmap_release_region(t->hpftid_bmap, fidx, 1);
753
754 spin_unlock_bh(&t->ftid_lock);
755}
756
757
758static int del_filter_wr(struct adapter *adapter, int fidx)
759{
760 struct fw_filter_wr *fwr;
761 struct filter_entry *f;
762 struct sk_buff *skb;
763 unsigned int len;
764
765 if (fidx < adapter->tids.nhpftids)
766 f = &adapter->tids.hpftid_tab[fidx];
767 else
768 f = &adapter->tids.ftid_tab[fidx - adapter->tids.nhpftids];
769
770 len = sizeof(*fwr);
771
772 skb = alloc_skb(len, GFP_KERNEL);
773 if (!skb)
774 return -ENOMEM;
775
776 fwr = __skb_put(skb, len);
777 t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
778
779
780
781
782 f->pending = 1;
783 t4_mgmt_tx(adapter, skb);
784 return 0;
785}
786
787
788
789
790
791
792
793int set_filter_wr(struct adapter *adapter, int fidx)
794{
795 struct fw_filter2_wr *fwr;
796 struct filter_entry *f;
797 struct sk_buff *skb;
798
799 if (fidx < adapter->tids.nhpftids)
800 f = &adapter->tids.hpftid_tab[fidx];
801 else
802 f = &adapter->tids.ftid_tab[fidx - adapter->tids.nhpftids];
803
804 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
805 if (!skb)
806 return -ENOMEM;
807
808
809
810
811
812 if (f->fs.newdmac || f->fs.newvlan) {
813
814 f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
815 f->fs.eport, f->fs.dmac);
816 if (!f->l2t) {
817 kfree_skb(skb);
818 return -ENOMEM;
819 }
820 }
821
822
823
824
825 if (f->fs.newsmac) {
826 f->smt = cxgb4_smt_alloc_switching(f->dev, f->fs.smac);
827 if (!f->smt) {
828 if (f->l2t) {
829 cxgb4_l2t_release(f->l2t);
830 f->l2t = NULL;
831 }
832 kfree_skb(skb);
833 return -ENOMEM;
834 }
835 }
836
837 fwr = __skb_put_zero(skb, sizeof(*fwr));
838
839
840
841
842
843
844
845
846
847 if (adapter->params.filter2_wr_support)
848 fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER2_WR));
849 else
850 fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
851 fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr) / 16));
852 fwr->tid_to_iq =
853 htonl(FW_FILTER_WR_TID_V(f->tid) |
854 FW_FILTER_WR_RQTYPE_V(f->fs.type) |
855 FW_FILTER_WR_NOREPLY_V(0) |
856 FW_FILTER_WR_IQ_V(f->fs.iq));
857 fwr->del_filter_to_l2tix =
858 htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
859 FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
860 FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
861 FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
862 FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
863 FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
864 FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
865 FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
866 FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
867 f->fs.newvlan == VLAN_REWRITE) |
868 FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
869 f->fs.newvlan == VLAN_REWRITE) |
870 FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
871 FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
872 FW_FILTER_WR_PRIO_V(f->fs.prio) |
873 FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
874 fwr->ethtype = htons(f->fs.val.ethtype);
875 fwr->ethtypem = htons(f->fs.mask.ethtype);
876 fwr->frag_to_ovlan_vldm =
877 (FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
878 FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
879 FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
880 FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
881 FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
882 FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
883 if (f->fs.newsmac)
884 fwr->smac_sel = f->smt->idx;
885 fwr->rx_chan_rx_rpl_iq =
886 htons(FW_FILTER_WR_RX_CHAN_V(0) |
887 FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
888 fwr->maci_to_matchtypem =
889 htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
890 FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
891 FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
892 FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
893 FW_FILTER_WR_PORT_V(f->fs.val.iport) |
894 FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
895 FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
896 FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
897 fwr->ptcl = f->fs.val.proto;
898 fwr->ptclm = f->fs.mask.proto;
899 fwr->ttyp = f->fs.val.tos;
900 fwr->ttypm = f->fs.mask.tos;
901 fwr->ivlan = htons(f->fs.val.ivlan);
902 fwr->ivlanm = htons(f->fs.mask.ivlan);
903 fwr->ovlan = htons(f->fs.val.ovlan);
904 fwr->ovlanm = htons(f->fs.mask.ovlan);
905 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
906 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
907 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
908 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
909 fwr->lp = htons(f->fs.val.lport);
910 fwr->lpm = htons(f->fs.mask.lport);
911 fwr->fp = htons(f->fs.val.fport);
912 fwr->fpm = htons(f->fs.mask.fport);
913
914 if (adapter->params.filter2_wr_support) {
915 u8 *nat_lp = (u8 *)&f->fs.nat_lport;
916 u8 *nat_fp = (u8 *)&f->fs.nat_fport;
917
918 fwr->natmode_to_ulp_type =
919 FW_FILTER2_WR_ULP_TYPE_V(f->fs.nat_mode ?
920 ULP_MODE_TCPDDP :
921 ULP_MODE_NONE) |
922 FW_FILTER2_WR_NATMODE_V(f->fs.nat_mode);
923 memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
924 memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
925 fwr->newlport = htons(nat_lp[1] | nat_lp[0] << 8);
926 fwr->newfport = htons(nat_fp[1] | nat_fp[0] << 8);
927 }
928
929
930
931
932 f->pending = 1;
933 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
934 t4_ofld_send(adapter, skb);
935 return 0;
936}
937
938
939int writable_filter(struct filter_entry *f)
940{
941 if (f->locked)
942 return -EPERM;
943 if (f->pending)
944 return -EBUSY;
945
946 return 0;
947}
948
949
950
951
952
953int delete_filter(struct adapter *adapter, unsigned int fidx)
954{
955 struct filter_entry *f;
956 int ret;
957
958 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids +
959 adapter->tids.nhpftids)
960 return -EINVAL;
961
962 if (fidx < adapter->tids.nhpftids)
963 f = &adapter->tids.hpftid_tab[fidx];
964 else
965 f = &adapter->tids.ftid_tab[fidx - adapter->tids.nhpftids];
966 ret = writable_filter(f);
967 if (ret)
968 return ret;
969 if (f->valid)
970 return del_filter_wr(adapter, fidx);
971
972 return 0;
973}
974
975
976
977
978void clear_filter(struct adapter *adap, struct filter_entry *f)
979{
980 struct port_info *pi = netdev_priv(f->dev);
981
982
983
984
985
986 if (f->l2t)
987 cxgb4_l2t_release(f->l2t);
988
989 if (f->smt)
990 cxgb4_smt_release(f->smt);
991
992 if (f->fs.val.encap_vld && f->fs.val.ovlan_vld)
993 t4_free_encap_mac_filt(adap, pi->viid,
994 f->fs.val.ovlan & 0x1ff, 0);
995
996 if ((f->fs.hash || is_t6(adap->params.chip)) && f->fs.type)
997 cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
998
999
1000
1001
1002
1003 memset(f, 0, sizeof(*f));
1004}
1005
1006void clear_all_filters(struct adapter *adapter)
1007{
1008 struct net_device *dev = adapter->port[0];
1009 unsigned int i;
1010
1011 if (adapter->tids.hpftid_tab) {
1012 struct filter_entry *f = &adapter->tids.hpftid_tab[0];
1013
1014 for (i = 0; i < adapter->tids.nhpftids; i++, f++)
1015 if (f->valid || f->pending)
1016 cxgb4_del_filter(dev, i, &f->fs);
1017 }
1018
1019 if (adapter->tids.ftid_tab) {
1020 struct filter_entry *f = &adapter->tids.ftid_tab[0];
1021 unsigned int max_ftid = adapter->tids.nftids +
1022 adapter->tids.nsftids +
1023 adapter->tids.nhpftids;
1024
1025
1026 for (i = adapter->tids.nhpftids; i < max_ftid; i++, f++)
1027 if (f->valid || f->pending)
1028 cxgb4_del_filter(dev, i, &f->fs);
1029 }
1030
1031
1032 if (is_hashfilter(adapter) && adapter->tids.tid_tab) {
1033 struct filter_entry *f;
1034 unsigned int sb;
1035
1036 for (i = adapter->tids.hash_base;
1037 i <= adapter->tids.ntids; i++) {
1038 f = (struct filter_entry *)
1039 adapter->tids.tid_tab[i];
1040
1041 if (f && (f->valid || f->pending))
1042 cxgb4_del_filter(dev, f->tid, &f->fs);
1043 }
1044
1045 sb = adapter->tids.stid_base;
1046 for (i = 0; i < sb; i++) {
1047 f = (struct filter_entry *)adapter->tids.tid_tab[i];
1048
1049 if (f && (f->valid || f->pending))
1050 cxgb4_del_filter(dev, f->tid, &f->fs);
1051 }
1052 }
1053}
1054
1055
1056static void fill_default_mask(struct ch_filter_specification *fs)
1057{
1058 unsigned int lip = 0, lip_mask = 0;
1059 unsigned int fip = 0, fip_mask = 0;
1060 unsigned int i;
1061
1062 if (fs->val.iport && !fs->mask.iport)
1063 fs->mask.iport |= ~0;
1064 if (fs->val.fcoe && !fs->mask.fcoe)
1065 fs->mask.fcoe |= ~0;
1066 if (fs->val.matchtype && !fs->mask.matchtype)
1067 fs->mask.matchtype |= ~0;
1068 if (fs->val.macidx && !fs->mask.macidx)
1069 fs->mask.macidx |= ~0;
1070 if (fs->val.ethtype && !fs->mask.ethtype)
1071 fs->mask.ethtype |= ~0;
1072 if (fs->val.ivlan && !fs->mask.ivlan)
1073 fs->mask.ivlan |= ~0;
1074 if (fs->val.ovlan && !fs->mask.ovlan)
1075 fs->mask.ovlan |= ~0;
1076 if (fs->val.frag && !fs->mask.frag)
1077 fs->mask.frag |= ~0;
1078 if (fs->val.tos && !fs->mask.tos)
1079 fs->mask.tos |= ~0;
1080 if (fs->val.proto && !fs->mask.proto)
1081 fs->mask.proto |= ~0;
1082 if (fs->val.pfvf_vld && !fs->mask.pfvf_vld)
1083 fs->mask.pfvf_vld |= ~0;
1084 if (fs->val.pf && !fs->mask.pf)
1085 fs->mask.pf |= ~0;
1086 if (fs->val.vf && !fs->mask.vf)
1087 fs->mask.vf |= ~0;
1088
1089 for (i = 0; i < ARRAY_SIZE(fs->val.lip); i++) {
1090 lip |= fs->val.lip[i];
1091 lip_mask |= fs->mask.lip[i];
1092 fip |= fs->val.fip[i];
1093 fip_mask |= fs->mask.fip[i];
1094 }
1095
1096 if (lip && !lip_mask)
1097 memset(fs->mask.lip, ~0, sizeof(fs->mask.lip));
1098
1099 if (fip && !fip_mask)
1100 memset(fs->mask.fip, ~0, sizeof(fs->mask.lip));
1101
1102 if (fs->val.lport && !fs->mask.lport)
1103 fs->mask.lport = ~0;
1104 if (fs->val.fport && !fs->mask.fport)
1105 fs->mask.fport = ~0;
1106}
1107
1108static bool is_addr_all_mask(u8 *ipmask, int family)
1109{
1110 if (family == AF_INET) {
1111 struct in_addr *addr;
1112
1113 addr = (struct in_addr *)ipmask;
1114 if (addr->s_addr == htonl(0xffffffff))
1115 return true;
1116 } else if (family == AF_INET6) {
1117 struct in6_addr *addr6;
1118
1119 addr6 = (struct in6_addr *)ipmask;
1120 if (addr6->s6_addr32[0] == htonl(0xffffffff) &&
1121 addr6->s6_addr32[1] == htonl(0xffffffff) &&
1122 addr6->s6_addr32[2] == htonl(0xffffffff) &&
1123 addr6->s6_addr32[3] == htonl(0xffffffff))
1124 return true;
1125 }
1126 return false;
1127}
1128
1129static bool is_inaddr_any(u8 *ip, int family)
1130{
1131 int addr_type;
1132
1133 if (family == AF_INET) {
1134 struct in_addr *addr;
1135
1136 addr = (struct in_addr *)ip;
1137 if (addr->s_addr == htonl(INADDR_ANY))
1138 return true;
1139 } else if (family == AF_INET6) {
1140 struct in6_addr *addr6;
1141
1142 addr6 = (struct in6_addr *)ip;
1143 addr_type = ipv6_addr_type((const struct in6_addr *)
1144 &addr6);
1145 if (addr_type == IPV6_ADDR_ANY)
1146 return true;
1147 }
1148 return false;
1149}
1150
1151bool is_filter_exact_match(struct adapter *adap,
1152 struct ch_filter_specification *fs)
1153{
1154 struct tp_params *tp = &adap->params.tp;
1155 u64 hash_filter_mask = tp->hash_filter_mask;
1156 u64 ntuple_mask = 0;
1157
1158 if (!is_hashfilter(adap))
1159 return false;
1160
1161 if ((atomic_read(&adap->tids.hash_tids_in_use) +
1162 atomic_read(&adap->tids.tids_in_use)) >=
1163 (adap->tids.nhash + (adap->tids.stid_base - adap->tids.tid_base)))
1164 return false;
1165
1166
1167 if (fs->mask.encap_vld)
1168 return false;
1169
1170 if (fs->type) {
1171 if (is_inaddr_any(fs->val.fip, AF_INET6) ||
1172 !is_addr_all_mask(fs->mask.fip, AF_INET6))
1173 return false;
1174
1175 if (is_inaddr_any(fs->val.lip, AF_INET6) ||
1176 !is_addr_all_mask(fs->mask.lip, AF_INET6))
1177 return false;
1178 } else {
1179 if (is_inaddr_any(fs->val.fip, AF_INET) ||
1180 !is_addr_all_mask(fs->mask.fip, AF_INET))
1181 return false;
1182
1183 if (is_inaddr_any(fs->val.lip, AF_INET) ||
1184 !is_addr_all_mask(fs->mask.lip, AF_INET))
1185 return false;
1186 }
1187
1188 if (!fs->val.lport || fs->mask.lport != 0xffff)
1189 return false;
1190
1191 if (!fs->val.fport || fs->mask.fport != 0xffff)
1192 return false;
1193
1194
1195 if (tp->fcoe_shift >= 0)
1196 ntuple_mask |= (u64)fs->mask.fcoe << tp->fcoe_shift;
1197
1198 if (tp->port_shift >= 0)
1199 ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
1200
1201 if (tp->vnic_shift >= 0) {
1202 if ((adap->params.tp.ingress_config & VNIC_F))
1203 ntuple_mask |= (u64)fs->mask.pfvf_vld << tp->vnic_shift;
1204 else
1205 ntuple_mask |= (u64)fs->mask.ovlan_vld <<
1206 tp->vnic_shift;
1207 }
1208
1209 if (tp->vlan_shift >= 0)
1210 ntuple_mask |= (u64)fs->mask.ivlan << tp->vlan_shift;
1211
1212 if (tp->tos_shift >= 0)
1213 ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift;
1214
1215 if (tp->protocol_shift >= 0)
1216 ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
1217
1218 if (tp->ethertype_shift >= 0)
1219 ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
1220
1221 if (tp->macmatch_shift >= 0)
1222 ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
1223
1224 if (tp->matchtype_shift >= 0)
1225 ntuple_mask |= (u64)fs->mask.matchtype << tp->matchtype_shift;
1226
1227 if (tp->frag_shift >= 0)
1228 ntuple_mask |= (u64)fs->mask.frag << tp->frag_shift;
1229
1230 if (ntuple_mask != hash_filter_mask)
1231 return false;
1232
1233 return true;
1234}
1235
1236static u64 hash_filter_ntuple(struct ch_filter_specification *fs,
1237 struct net_device *dev)
1238{
1239 struct adapter *adap = netdev2adap(dev);
1240 struct tp_params *tp = &adap->params.tp;
1241 u64 ntuple = 0;
1242
1243
1244
1245
1246 if (tp->vlan_shift >= 0 && fs->mask.ivlan)
1247 ntuple |= (FT_VLAN_VLD_F | fs->val.ivlan) << tp->vlan_shift;
1248
1249 if (tp->port_shift >= 0 && fs->mask.iport)
1250 ntuple |= (u64)fs->val.iport << tp->port_shift;
1251
1252 if (tp->protocol_shift >= 0) {
1253 if (!fs->val.proto)
1254 ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
1255 else
1256 ntuple |= (u64)fs->val.proto << tp->protocol_shift;
1257 }
1258
1259 if (tp->tos_shift >= 0 && fs->mask.tos)
1260 ntuple |= (u64)(fs->val.tos) << tp->tos_shift;
1261
1262 if (tp->vnic_shift >= 0) {
1263 if ((adap->params.tp.ingress_config & USE_ENC_IDX_F) &&
1264 fs->mask.encap_vld)
1265 ntuple |= (u64)((fs->val.encap_vld << 16) |
1266 (fs->val.ovlan)) << tp->vnic_shift;
1267 else if ((adap->params.tp.ingress_config & VNIC_F) &&
1268 fs->mask.pfvf_vld)
1269 ntuple |= (u64)((fs->val.pfvf_vld << 16) |
1270 (fs->val.pf << 13) |
1271 (fs->val.vf)) << tp->vnic_shift;
1272 else
1273 ntuple |= (u64)((fs->val.ovlan_vld << 16) |
1274 (fs->val.ovlan)) << tp->vnic_shift;
1275 }
1276
1277 if (tp->macmatch_shift >= 0 && fs->mask.macidx)
1278 ntuple |= (u64)(fs->val.macidx) << tp->macmatch_shift;
1279
1280 if (tp->ethertype_shift >= 0 && fs->mask.ethtype)
1281 ntuple |= (u64)(fs->val.ethtype) << tp->ethertype_shift;
1282
1283 if (tp->matchtype_shift >= 0 && fs->mask.matchtype)
1284 ntuple |= (u64)(fs->val.matchtype) << tp->matchtype_shift;
1285
1286 if (tp->frag_shift >= 0 && fs->mask.frag)
1287 ntuple |= (u64)(fs->val.frag) << tp->frag_shift;
1288
1289 if (tp->fcoe_shift >= 0 && fs->mask.fcoe)
1290 ntuple |= (u64)(fs->val.fcoe) << tp->fcoe_shift;
1291 return ntuple;
1292}
1293
1294static void mk_act_open_req6(struct filter_entry *f, struct sk_buff *skb,
1295 unsigned int qid_filterid, struct adapter *adap)
1296{
1297 struct cpl_t6_act_open_req6 *t6req = NULL;
1298 struct cpl_act_open_req6 *req = NULL;
1299
1300 t6req = (struct cpl_t6_act_open_req6 *)__skb_put(skb, sizeof(*t6req));
1301 INIT_TP_WR(t6req, 0);
1302 req = (struct cpl_act_open_req6 *)t6req;
1303 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, qid_filterid));
1304 req->local_port = cpu_to_be16(f->fs.val.lport);
1305 req->peer_port = cpu_to_be16(f->fs.val.fport);
1306 req->local_ip_hi = *(__be64 *)(&f->fs.val.lip);
1307 req->local_ip_lo = *(((__be64 *)&f->fs.val.lip) + 1);
1308 req->peer_ip_hi = *(__be64 *)(&f->fs.val.fip);
1309 req->peer_ip_lo = *(((__be64 *)&f->fs.val.fip) + 1);
1310 req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE ||
1311 f->fs.newvlan == VLAN_REWRITE) |
1312 DELACK_V(f->fs.hitcnts) |
1313 L2T_IDX_V(f->l2t ? f->l2t->idx : 0) |
1314 SMAC_SEL_V((cxgb4_port_viid(f->dev) &
1315 0x7F) << 1) |
1316 TX_CHAN_V(f->fs.eport) |
1317 NO_CONG_V(f->fs.rpttid) |
1318 ULP_MODE_V(f->fs.nat_mode ?
1319 ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1320 TCAM_BYPASS_F | NON_OFFLOAD_F);
1321 t6req->params = cpu_to_be64(FILTER_TUPLE_V(hash_filter_ntuple(&f->fs,
1322 f->dev)));
1323 t6req->opt2 = htonl(RSS_QUEUE_VALID_F |
1324 RSS_QUEUE_V(f->fs.iq) |
1325 TX_QUEUE_V(f->fs.nat_mode) |
1326 T5_OPT_2_VALID_F |
1327 RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) |
1328 PACE_V((f->fs.maskhash) |
1329 ((f->fs.dirsteerhash) << 1)));
1330}
1331
1332static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
1333 unsigned int qid_filterid, struct adapter *adap)
1334{
1335 struct cpl_t6_act_open_req *t6req = NULL;
1336 struct cpl_act_open_req *req = NULL;
1337
1338 t6req = (struct cpl_t6_act_open_req *)__skb_put(skb, sizeof(*t6req));
1339 INIT_TP_WR(t6req, 0);
1340 req = (struct cpl_act_open_req *)t6req;
1341 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_filterid));
1342 req->local_port = cpu_to_be16(f->fs.val.lport);
1343 req->peer_port = cpu_to_be16(f->fs.val.fport);
1344 memcpy(&req->local_ip, f->fs.val.lip, 4);
1345 memcpy(&req->peer_ip, f->fs.val.fip, 4);
1346 req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE ||
1347 f->fs.newvlan == VLAN_REWRITE) |
1348 DELACK_V(f->fs.hitcnts) |
1349 L2T_IDX_V(f->l2t ? f->l2t->idx : 0) |
1350 SMAC_SEL_V((cxgb4_port_viid(f->dev) &
1351 0x7F) << 1) |
1352 TX_CHAN_V(f->fs.eport) |
1353 NO_CONG_V(f->fs.rpttid) |
1354 ULP_MODE_V(f->fs.nat_mode ?
1355 ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1356 TCAM_BYPASS_F | NON_OFFLOAD_F);
1357
1358 t6req->params = cpu_to_be64(FILTER_TUPLE_V(hash_filter_ntuple(&f->fs,
1359 f->dev)));
1360 t6req->opt2 = htonl(RSS_QUEUE_VALID_F |
1361 RSS_QUEUE_V(f->fs.iq) |
1362 TX_QUEUE_V(f->fs.nat_mode) |
1363 T5_OPT_2_VALID_F |
1364 RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) |
1365 PACE_V((f->fs.maskhash) |
1366 ((f->fs.dirsteerhash) << 1)));
1367}
1368
1369static int cxgb4_set_hash_filter(struct net_device *dev,
1370 struct ch_filter_specification *fs,
1371 struct filter_ctx *ctx)
1372{
1373 struct adapter *adapter = netdev2adap(dev);
1374 struct port_info *pi = netdev_priv(dev);
1375 struct tid_info *t = &adapter->tids;
1376 struct filter_entry *f;
1377 struct sk_buff *skb;
1378 int iq, atid, size;
1379 int ret = 0;
1380 u32 iconf;
1381
1382 fill_default_mask(fs);
1383 ret = validate_filter(dev, fs);
1384 if (ret)
1385 return ret;
1386
1387 iq = get_filter_steerq(dev, fs);
1388 if (iq < 0)
1389 return iq;
1390
1391 f = kzalloc(sizeof(*f), GFP_KERNEL);
1392 if (!f)
1393 return -ENOMEM;
1394
1395 f->fs = *fs;
1396 f->ctx = ctx;
1397 f->dev = dev;
1398 f->fs.iq = iq;
1399
1400
1401
1402
1403
1404 if (f->fs.newdmac || f->fs.newvlan) {
1405
1406 f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
1407 f->fs.eport, f->fs.dmac);
1408 if (!f->l2t) {
1409 ret = -ENOMEM;
1410 goto out_err;
1411 }
1412 }
1413
1414
1415
1416
1417 if (f->fs.newsmac) {
1418 f->smt = cxgb4_smt_alloc_switching(f->dev, f->fs.smac);
1419 if (!f->smt) {
1420 if (f->l2t) {
1421 cxgb4_l2t_release(f->l2t);
1422 f->l2t = NULL;
1423 }
1424 ret = -ENOMEM;
1425 goto free_l2t;
1426 }
1427 }
1428
1429 atid = cxgb4_alloc_atid(t, f);
1430 if (atid < 0) {
1431 ret = atid;
1432 goto free_smt;
1433 }
1434
1435 iconf = adapter->params.tp.ingress_config;
1436 if (iconf & VNIC_F) {
1437 f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf;
1438 f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
1439 f->fs.val.ovlan_vld = fs->val.pfvf_vld;
1440 f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
1441 } else if (iconf & USE_ENC_IDX_F) {
1442 if (f->fs.val.encap_vld) {
1443 struct port_info *pi = netdev_priv(f->dev);
1444 u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
1445
1446
1447 ret = t4_alloc_encap_mac_filt(adapter, pi->viid,
1448 match_all_mac,
1449 match_all_mac,
1450 f->fs.val.vni,
1451 f->fs.mask.vni,
1452 0, 1, 1);
1453 if (ret < 0)
1454 goto free_atid;
1455
1456 f->fs.val.ovlan = ret;
1457 f->fs.mask.ovlan = 0xffff;
1458 f->fs.val.ovlan_vld = 1;
1459 f->fs.mask.ovlan_vld = 1;
1460 }
1461 }
1462
1463 size = sizeof(struct cpl_t6_act_open_req);
1464 if (f->fs.type) {
1465 ret = cxgb4_clip_get(f->dev, (const u32 *)&f->fs.val.lip, 1);
1466 if (ret)
1467 goto free_mps;
1468
1469 skb = alloc_skb(size, GFP_KERNEL);
1470 if (!skb) {
1471 ret = -ENOMEM;
1472 goto free_clip;
1473 }
1474
1475 mk_act_open_req6(f, skb,
1476 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
1477 adapter);
1478 } else {
1479 skb = alloc_skb(size, GFP_KERNEL);
1480 if (!skb) {
1481 ret = -ENOMEM;
1482 goto free_mps;
1483 }
1484
1485 mk_act_open_req(f, skb,
1486 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
1487 adapter);
1488 }
1489
1490 f->pending = 1;
1491 set_wr_txq(skb, CPL_PRIORITY_SETUP, f->fs.val.iport & 0x3);
1492 t4_ofld_send(adapter, skb);
1493 return 0;
1494
1495free_clip:
1496 cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
1497
1498free_mps:
1499 if (f->fs.val.encap_vld && f->fs.val.ovlan_vld)
1500 t4_free_encap_mac_filt(adapter, pi->viid, f->fs.val.ovlan, 1);
1501
1502free_atid:
1503 cxgb4_free_atid(t, atid);
1504
1505free_smt:
1506 if (f->smt) {
1507 cxgb4_smt_release(f->smt);
1508 f->smt = NULL;
1509 }
1510
1511free_l2t:
1512 if (f->l2t) {
1513 cxgb4_l2t_release(f->l2t);
1514 f->l2t = NULL;
1515 }
1516
1517out_err:
1518 kfree(f);
1519 return ret;
1520}
1521
1522
1523
1524
1525
1526
1527
1528int __cxgb4_set_filter(struct net_device *dev, int ftid,
1529 struct ch_filter_specification *fs,
1530 struct filter_ctx *ctx)
1531{
1532 struct adapter *adapter = netdev2adap(dev);
1533 unsigned int max_fidx, fidx, chip_ver;
1534 int iq, ret, filter_id = ftid;
1535 struct filter_entry *f, *tab;
1536 u32 iconf;
1537
1538 chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
1539 if (fs->hash) {
1540 if (is_hashfilter(adapter))
1541 return cxgb4_set_hash_filter(dev, fs, ctx);
1542 netdev_err(dev, "%s: Exact-match filters only supported with Hash Filter configuration\n",
1543 __func__);
1544 return -EINVAL;
1545 }
1546
1547 max_fidx = adapter->tids.nftids + adapter->tids.nhpftids;
1548 if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
1549 filter_id >= max_fidx)
1550 return -E2BIG;
1551
1552 fill_default_mask(fs);
1553
1554 ret = validate_filter(dev, fs);
1555 if (ret)
1556 return ret;
1557
1558 iq = get_filter_steerq(dev, fs);
1559 if (iq < 0)
1560 return iq;
1561
1562 if (fs->prio) {
1563 tab = &adapter->tids.hpftid_tab[0];
1564 } else {
1565 tab = &adapter->tids.ftid_tab[0];
1566 filter_id = ftid - adapter->tids.nhpftids;
1567 }
1568
1569
1570
1571
1572
1573
1574
1575
1576 if (fs->type == 0) {
1577
1578
1579
1580
1581
1582
1583
1584 if (chip_ver < CHELSIO_T6)
1585 fidx = filter_id & ~0x3;
1586 else
1587 fidx = filter_id & ~0x1;
1588
1589 if (fidx != filter_id && tab[fidx].fs.type) {
1590 f = &tab[fidx];
1591 if (f->valid) {
1592 dev_err(adapter->pdev_dev,
1593 "Invalid location. IPv6 requires 4 slots and is occupying slots %u to %u\n",
1594 fidx, fidx + 3);
1595 return -EINVAL;
1596 }
1597 }
1598 } else {
1599 if (chip_ver < CHELSIO_T6) {
1600
1601
1602
1603 if (filter_id & 0x3) {
1604 dev_err(adapter->pdev_dev,
1605 "Invalid location. IPv6 must be aligned on a 4-slot boundary\n");
1606 return -EINVAL;
1607 }
1608
1609
1610
1611
1612 for (fidx = filter_id + 1; fidx < filter_id + 4;
1613 fidx++) {
1614 f = &tab[fidx];
1615 if (f->valid) {
1616 dev_err(adapter->pdev_dev,
1617 "Invalid location. IPv6 requires 4 slots and an IPv4 filter exists at %u\n",
1618 fidx);
1619 return -EBUSY;
1620 }
1621 }
1622 } else {
1623
1624
1625
1626 if (filter_id & 0x1)
1627 return -EINVAL;
1628
1629 fidx = filter_id + 1;
1630 f = &tab[fidx];
1631 if (f->valid) {
1632 pr_err("%s: IPv6 filter requires 2 indices. IPv4 filter already present at %d. Please remove IPv4 filter first.\n",
1633 __func__, fidx);
1634 return -EBUSY;
1635 }
1636 }
1637 }
1638
1639
1640
1641
1642 f = &tab[filter_id];
1643 if (f->valid)
1644 return -EBUSY;
1645
1646 if (fs->prio) {
1647 fidx = filter_id + adapter->tids.hpftid_base;
1648 ret = cxgb4_set_hpftid(&adapter->tids, filter_id,
1649 fs->type ? PF_INET6 : PF_INET);
1650 } else {
1651 fidx = filter_id + adapter->tids.ftid_base;
1652 ret = cxgb4_set_ftid(&adapter->tids, filter_id,
1653 fs->type ? PF_INET6 : PF_INET,
1654 chip_ver);
1655 }
1656
1657 if (ret)
1658 return ret;
1659
1660
1661 ret = writable_filter(f);
1662 if (ret)
1663 goto free_tid;
1664
1665 if (is_t6(adapter->params.chip) && fs->type &&
1666 ipv6_addr_type((const struct in6_addr *)fs->val.lip) !=
1667 IPV6_ADDR_ANY) {
1668 ret = cxgb4_clip_get(dev, (const u32 *)&fs->val.lip, 1);
1669 if (ret)
1670 goto free_tid;
1671 }
1672
1673
1674
1675
1676
1677
1678 f->fs = *fs;
1679 f->fs.iq = iq;
1680 f->dev = dev;
1681
1682 iconf = adapter->params.tp.ingress_config;
1683 if (iconf & VNIC_F) {
1684 f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf;
1685 f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
1686 f->fs.val.ovlan_vld = fs->val.pfvf_vld;
1687 f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
1688 } else if (iconf & USE_ENC_IDX_F) {
1689 if (f->fs.val.encap_vld) {
1690 struct port_info *pi = netdev_priv(f->dev);
1691 u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
1692
1693
1694 ret = t4_alloc_encap_mac_filt(adapter, pi->viid,
1695 match_all_mac,
1696 match_all_mac,
1697 f->fs.val.vni,
1698 f->fs.mask.vni,
1699 0, 1, 1);
1700 if (ret < 0)
1701 goto free_tid;
1702
1703 f->fs.val.ovlan = ret;
1704 f->fs.mask.ovlan = 0x1ff;
1705 f->fs.val.ovlan_vld = 1;
1706 f->fs.mask.ovlan_vld = 1;
1707 }
1708 }
1709
1710
1711
1712
1713 f->ctx = ctx;
1714 f->tid = fidx;
1715 ret = set_filter_wr(adapter, ftid);
1716 if (ret)
1717 goto free_tid;
1718
1719 return ret;
1720
1721free_tid:
1722 if (f->fs.prio)
1723 cxgb4_clear_hpftid(&adapter->tids, filter_id,
1724 fs->type ? PF_INET6 : PF_INET);
1725 else
1726 cxgb4_clear_ftid(&adapter->tids, filter_id,
1727 fs->type ? PF_INET6 : PF_INET,
1728 chip_ver);
1729
1730 clear_filter(adapter, f);
1731 return ret;
1732}
1733
1734static int cxgb4_del_hash_filter(struct net_device *dev, int filter_id,
1735 struct filter_ctx *ctx)
1736{
1737 struct adapter *adapter = netdev2adap(dev);
1738 struct tid_info *t = &adapter->tids;
1739 struct cpl_abort_req *abort_req;
1740 struct cpl_abort_rpl *abort_rpl;
1741 struct cpl_set_tcb_field *req;
1742 struct ulptx_idata *aligner;
1743 struct work_request_hdr *wr;
1744 struct filter_entry *f;
1745 struct sk_buff *skb;
1746 unsigned int wrlen;
1747 int ret;
1748
1749 netdev_dbg(dev, "%s: filter_id = %d ; nftids = %d\n",
1750 __func__, filter_id, adapter->tids.nftids);
1751
1752 if (tid_out_of_range(t, filter_id))
1753 return -E2BIG;
1754
1755 f = lookup_tid(t, filter_id);
1756 if (!f) {
1757 netdev_err(dev, "%s: no filter entry for filter_id = %d",
1758 __func__, filter_id);
1759 return -EINVAL;
1760 }
1761
1762 ret = writable_filter(f);
1763 if (ret)
1764 return ret;
1765
1766 if (!f->valid)
1767 return -EINVAL;
1768
1769 f->ctx = ctx;
1770 f->pending = 1;
1771 wrlen = roundup(sizeof(*wr) + (sizeof(*req) + sizeof(*aligner))
1772 + sizeof(*abort_req) + sizeof(*abort_rpl), 16);
1773 skb = alloc_skb(wrlen, GFP_KERNEL);
1774 if (!skb) {
1775 netdev_err(dev, "%s: could not allocate skb ..\n", __func__);
1776 return -ENOMEM;
1777 }
1778 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1779 req = (struct cpl_set_tcb_field *)__skb_put(skb, wrlen);
1780 INIT_ULPTX_WR(req, wrlen, 0, 0);
1781 wr = (struct work_request_hdr *)req;
1782 wr++;
1783 req = (struct cpl_set_tcb_field *)wr;
1784 mk_set_tcb_ulp(f, req, TCB_RSS_INFO_W, TCB_RSS_INFO_V(TCB_RSS_INFO_M),
1785 TCB_RSS_INFO_V(adapter->sge.fw_evtq.abs_id), 0, 1);
1786 aligner = (struct ulptx_idata *)(req + 1);
1787 abort_req = (struct cpl_abort_req *)(aligner + 1);
1788 mk_abort_req_ulp(abort_req, f->tid);
1789 abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
1790 mk_abort_rpl_ulp(abort_rpl, f->tid);
1791 t4_ofld_send(adapter, skb);
1792 return 0;
1793}
1794
1795
1796
1797
1798
1799
1800int __cxgb4_del_filter(struct net_device *dev, int filter_id,
1801 struct ch_filter_specification *fs,
1802 struct filter_ctx *ctx)
1803{
1804 struct adapter *adapter = netdev2adap(dev);
1805 unsigned int max_fidx, chip_ver;
1806 struct filter_entry *f;
1807 int ret;
1808
1809 chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
1810 if (fs && fs->hash) {
1811 if (is_hashfilter(adapter))
1812 return cxgb4_del_hash_filter(dev, filter_id, ctx);
1813 netdev_err(dev, "%s: Exact-match filters only supported with Hash Filter configuration\n",
1814 __func__);
1815 return -EINVAL;
1816 }
1817
1818 max_fidx = adapter->tids.nftids + adapter->tids.nhpftids;
1819 if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
1820 filter_id >= max_fidx)
1821 return -E2BIG;
1822
1823 if (filter_id < adapter->tids.nhpftids)
1824 f = &adapter->tids.hpftid_tab[filter_id];
1825 else
1826 f = &adapter->tids.ftid_tab[filter_id - adapter->tids.nhpftids];
1827
1828 ret = writable_filter(f);
1829 if (ret)
1830 return ret;
1831
1832 if (f->valid) {
1833 f->ctx = ctx;
1834 if (f->fs.prio)
1835 cxgb4_clear_hpftid(&adapter->tids,
1836 f->tid - adapter->tids.hpftid_base,
1837 f->fs.type ? PF_INET6 : PF_INET);
1838 else
1839 cxgb4_clear_ftid(&adapter->tids,
1840 f->tid - adapter->tids.ftid_base,
1841 f->fs.type ? PF_INET6 : PF_INET,
1842 chip_ver);
1843 return del_filter_wr(adapter, filter_id);
1844 }
1845
1846
1847
1848
1849
1850 if (ctx) {
1851 ctx->result = 0;
1852 complete(&ctx->completion);
1853 }
1854 return ret;
1855}
1856
1857int cxgb4_set_filter(struct net_device *dev, int filter_id,
1858 struct ch_filter_specification *fs)
1859{
1860 struct filter_ctx ctx;
1861 int ret;
1862
1863 init_completion(&ctx.completion);
1864
1865 ret = __cxgb4_set_filter(dev, filter_id, fs, &ctx);
1866 if (ret)
1867 goto out;
1868
1869
1870 ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
1871 if (!ret)
1872 return -ETIMEDOUT;
1873
1874 ret = ctx.result;
1875out:
1876 return ret;
1877}
1878
1879int cxgb4_del_filter(struct net_device *dev, int filter_id,
1880 struct ch_filter_specification *fs)
1881{
1882 struct filter_ctx ctx;
1883 int ret;
1884
1885 if (netdev2adap(dev)->flags & CXGB4_SHUTTING_DOWN)
1886 return 0;
1887
1888 init_completion(&ctx.completion);
1889
1890 ret = __cxgb4_del_filter(dev, filter_id, fs, &ctx);
1891 if (ret)
1892 goto out;
1893
1894
1895 ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
1896 if (!ret)
1897 return -ETIMEDOUT;
1898
1899 ret = ctx.result;
1900out:
1901 return ret;
1902}
1903
1904static int configure_filter_tcb(struct adapter *adap, unsigned int tid,
1905 struct filter_entry *f)
1906{
1907 if (f->fs.hitcnts) {
1908 set_tcb_field(adap, f, tid, TCB_TIMESTAMP_W,
1909 TCB_TIMESTAMP_V(TCB_TIMESTAMP_M),
1910 TCB_TIMESTAMP_V(0ULL),
1911 1);
1912 set_tcb_field(adap, f, tid, TCB_RTT_TS_RECENT_AGE_W,
1913 TCB_RTT_TS_RECENT_AGE_V(TCB_RTT_TS_RECENT_AGE_M),
1914 TCB_RTT_TS_RECENT_AGE_V(0ULL),
1915 1);
1916 }
1917
1918 if (f->fs.newdmac)
1919 set_tcb_tflag(adap, f, tid, TF_CCTRL_ECE_S, 1,
1920 1);
1921
1922 if (f->fs.newvlan == VLAN_INSERT ||
1923 f->fs.newvlan == VLAN_REWRITE)
1924 set_tcb_tflag(adap, f, tid, TF_CCTRL_RFR_S, 1,
1925 1);
1926 if (f->fs.newsmac)
1927 configure_filter_smac(adap, f);
1928
1929 if (f->fs.nat_mode) {
1930 switch (f->fs.nat_mode) {
1931 case NAT_MODE_DIP:
1932 set_nat_params(adap, f, tid, true, false, false, false);
1933 break;
1934
1935 case NAT_MODE_DIP_DP:
1936 set_nat_params(adap, f, tid, true, false, true, false);
1937 break;
1938
1939 case NAT_MODE_DIP_DP_SIP:
1940 set_nat_params(adap, f, tid, true, true, true, false);
1941 break;
1942 case NAT_MODE_DIP_DP_SP:
1943 set_nat_params(adap, f, tid, true, false, true, true);
1944 break;
1945
1946 case NAT_MODE_SIP_SP:
1947 set_nat_params(adap, f, tid, false, true, false, true);
1948 break;
1949
1950 case NAT_MODE_DIP_SIP_SP:
1951 set_nat_params(adap, f, tid, true, true, false, true);
1952 break;
1953
1954 case NAT_MODE_ALL:
1955 set_nat_params(adap, f, tid, true, true, true, true);
1956 break;
1957
1958 default:
1959 pr_err("%s: Invalid NAT mode: %d\n",
1960 __func__, f->fs.nat_mode);
1961 return -EINVAL;
1962 }
1963 }
1964 return 0;
1965}
1966
1967void hash_del_filter_rpl(struct adapter *adap,
1968 const struct cpl_abort_rpl_rss *rpl)
1969{
1970 unsigned int status = rpl->status;
1971 struct tid_info *t = &adap->tids;
1972 unsigned int tid = GET_TID(rpl);
1973 struct filter_ctx *ctx = NULL;
1974 struct filter_entry *f;
1975
1976 dev_dbg(adap->pdev_dev, "%s: status = %u; tid = %u\n",
1977 __func__, status, tid);
1978
1979 f = lookup_tid(t, tid);
1980 if (!f) {
1981 dev_err(adap->pdev_dev, "%s:could not find filter entry",
1982 __func__);
1983 return;
1984 }
1985 ctx = f->ctx;
1986 f->ctx = NULL;
1987 clear_filter(adap, f);
1988 cxgb4_remove_tid(t, 0, tid, 0);
1989 kfree(f);
1990 if (ctx) {
1991 ctx->result = 0;
1992 complete(&ctx->completion);
1993 }
1994}
1995
1996void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
1997{
1998 unsigned int ftid = TID_TID_G(AOPEN_ATID_G(ntohl(rpl->atid_status)));
1999 unsigned int status = AOPEN_STATUS_G(ntohl(rpl->atid_status));
2000 struct tid_info *t = &adap->tids;
2001 unsigned int tid = GET_TID(rpl);
2002 struct filter_ctx *ctx = NULL;
2003 struct filter_entry *f;
2004
2005 dev_dbg(adap->pdev_dev, "%s: tid = %u; atid = %u; status = %u\n",
2006 __func__, tid, ftid, status);
2007
2008 f = lookup_atid(t, ftid);
2009 if (!f) {
2010 dev_err(adap->pdev_dev, "%s:could not find filter entry",
2011 __func__);
2012 return;
2013 }
2014 ctx = f->ctx;
2015 f->ctx = NULL;
2016
2017 switch (status) {
2018 case CPL_ERR_NONE:
2019 f->tid = tid;
2020 f->pending = 0;
2021 f->valid = 1;
2022 cxgb4_insert_tid(t, f, f->tid, 0);
2023 cxgb4_free_atid(t, ftid);
2024 if (ctx) {
2025 ctx->tid = f->tid;
2026 ctx->result = 0;
2027 }
2028 if (configure_filter_tcb(adap, tid, f)) {
2029 clear_filter(adap, f);
2030 cxgb4_remove_tid(t, 0, tid, 0);
2031 kfree(f);
2032 if (ctx) {
2033 ctx->result = -EINVAL;
2034 complete(&ctx->completion);
2035 }
2036 return;
2037 }
2038 switch (f->fs.action) {
2039 case FILTER_PASS:
2040 if (f->fs.dirsteer)
2041 set_tcb_tflag(adap, f, tid,
2042 TF_DIRECT_STEER_S, 1, 1);
2043 break;
2044 case FILTER_DROP:
2045 set_tcb_tflag(adap, f, tid, TF_DROP_S, 1, 1);
2046 break;
2047 case FILTER_SWITCH:
2048 set_tcb_tflag(adap, f, tid, TF_LPBK_S, 1, 1);
2049 break;
2050 }
2051
2052 break;
2053
2054 default:
2055 if (status != CPL_ERR_TCAM_FULL)
2056 dev_err(adap->pdev_dev, "%s: filter creation PROBLEM; status = %u\n",
2057 __func__, status);
2058
2059 if (ctx) {
2060 if (status == CPL_ERR_TCAM_FULL)
2061 ctx->result = -ENOSPC;
2062 else
2063 ctx->result = -EINVAL;
2064 }
2065 clear_filter(adap, f);
2066 cxgb4_free_atid(t, ftid);
2067 kfree(f);
2068 }
2069 if (ctx)
2070 complete(&ctx->completion);
2071}
2072
2073
2074void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
2075{
2076 unsigned int tid = GET_TID(rpl);
2077 struct filter_entry *f = NULL;
2078 unsigned int max_fidx;
2079 int idx;
2080
2081 max_fidx = adap->tids.nftids + adap->tids.nsftids;
2082
2083 if (adap->tids.ftid_tab) {
2084 idx = tid - adap->tids.hpftid_base;
2085 if (idx < adap->tids.nhpftids) {
2086 f = &adap->tids.hpftid_tab[idx];
2087 } else {
2088
2089 idx = tid - adap->tids.ftid_base;
2090 if (idx >= max_fidx)
2091 return;
2092 f = &adap->tids.ftid_tab[idx];
2093 idx += adap->tids.nhpftids;
2094 }
2095
2096 if (f->tid != tid)
2097 return;
2098 }
2099
2100
2101 if (f) {
2102 unsigned int ret = TCB_COOKIE_G(rpl->cookie);
2103 struct filter_ctx *ctx;
2104
2105
2106
2107
2108 ctx = f->ctx;
2109 f->ctx = NULL;
2110
2111 if (ret == FW_FILTER_WR_FLT_DELETED) {
2112
2113
2114
2115 clear_filter(adap, f);
2116 if (ctx)
2117 ctx->result = 0;
2118 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
2119 f->pending = 0;
2120 f->valid = 1;
2121 if (ctx) {
2122 ctx->result = 0;
2123 ctx->tid = idx;
2124 }
2125 } else {
2126
2127
2128
2129 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
2130 idx, ret);
2131 clear_filter(adap, f);
2132 if (ctx)
2133 ctx->result = -EINVAL;
2134 }
2135 if (ctx)
2136 complete(&ctx->completion);
2137 }
2138}
2139
2140void init_hash_filter(struct adapter *adap)
2141{
2142 u32 reg;
2143
2144
2145
2146
2147 if (is_t6(adap->params.chip)) {
2148 if (is_offload(adap)) {
2149 if (!(t4_read_reg(adap, TP_GLOBAL_CONFIG_A)
2150 & ACTIVEFILTERCOUNTS_F)) {
2151 dev_err(adap->pdev_dev, "Invalid hash filter + ofld config\n");
2152 return;
2153 }
2154 } else {
2155 reg = t4_read_reg(adap, LE_DB_RSP_CODE_0_A);
2156 if (TCAM_ACTV_HIT_G(reg) != 4) {
2157 dev_err(adap->pdev_dev, "Invalid hash filter config\n");
2158 return;
2159 }
2160
2161 reg = t4_read_reg(adap, LE_DB_RSP_CODE_1_A);
2162 if (HASH_ACTV_HIT_G(reg) != 4) {
2163 dev_err(adap->pdev_dev, "Invalid hash filter config\n");
2164 return;
2165 }
2166 }
2167
2168 } else {
2169 dev_err(adap->pdev_dev, "Hash filter supported only on T6\n");
2170 return;
2171 }
2172
2173 adap->params.hash_filter = 1;
2174}
2175