1
2
3
4#include "ice_common.h"
5#include "ice_flex_pipe.h"
6#include "ice_flow.h"
7
8
9
10
11static const struct ice_tunnel_type_scan tnls[] = {
12 { TNL_VXLAN, "TNL_VXLAN_PF" },
13 { TNL_GENEVE, "TNL_GENEVE_PF" },
14 { TNL_LAST, "" }
15};
16
17static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
18
19 {
20 ICE_SID_XLT0_SW,
21 ICE_SID_XLT_KEY_BUILDER_SW,
22 ICE_SID_XLT1_SW,
23 ICE_SID_XLT2_SW,
24 ICE_SID_PROFID_TCAM_SW,
25 ICE_SID_PROFID_REDIR_SW,
26 ICE_SID_FLD_VEC_SW,
27 ICE_SID_CDID_KEY_BUILDER_SW,
28 ICE_SID_CDID_REDIR_SW
29 },
30
31
32 {
33 ICE_SID_XLT0_ACL,
34 ICE_SID_XLT_KEY_BUILDER_ACL,
35 ICE_SID_XLT1_ACL,
36 ICE_SID_XLT2_ACL,
37 ICE_SID_PROFID_TCAM_ACL,
38 ICE_SID_PROFID_REDIR_ACL,
39 ICE_SID_FLD_VEC_ACL,
40 ICE_SID_CDID_KEY_BUILDER_ACL,
41 ICE_SID_CDID_REDIR_ACL
42 },
43
44
45 {
46 ICE_SID_XLT0_FD,
47 ICE_SID_XLT_KEY_BUILDER_FD,
48 ICE_SID_XLT1_FD,
49 ICE_SID_XLT2_FD,
50 ICE_SID_PROFID_TCAM_FD,
51 ICE_SID_PROFID_REDIR_FD,
52 ICE_SID_FLD_VEC_FD,
53 ICE_SID_CDID_KEY_BUILDER_FD,
54 ICE_SID_CDID_REDIR_FD
55 },
56
57
58 {
59 ICE_SID_XLT0_RSS,
60 ICE_SID_XLT_KEY_BUILDER_RSS,
61 ICE_SID_XLT1_RSS,
62 ICE_SID_XLT2_RSS,
63 ICE_SID_PROFID_TCAM_RSS,
64 ICE_SID_PROFID_REDIR_RSS,
65 ICE_SID_FLD_VEC_RSS,
66 ICE_SID_CDID_KEY_BUILDER_RSS,
67 ICE_SID_CDID_REDIR_RSS
68 },
69
70
71 {
72 ICE_SID_XLT0_PE,
73 ICE_SID_XLT_KEY_BUILDER_PE,
74 ICE_SID_XLT1_PE,
75 ICE_SID_XLT2_PE,
76 ICE_SID_PROFID_TCAM_PE,
77 ICE_SID_PROFID_REDIR_PE,
78 ICE_SID_FLD_VEC_PE,
79 ICE_SID_CDID_KEY_BUILDER_PE,
80 ICE_SID_CDID_REDIR_PE
81 }
82};
83
84
85
86
87
88
89
90
91
92static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
93{
94 return ice_sect_lkup[blk][sect];
95}
96
97
98
99
100
101
102
103static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
104{
105 struct ice_buf_hdr *hdr;
106 u16 section_count;
107 u16 data_end;
108
109 hdr = (struct ice_buf_hdr *)buf->buf;
110
111 section_count = le16_to_cpu(hdr->section_count);
112 if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
113 return NULL;
114
115 data_end = le16_to_cpu(hdr->data_end);
116 if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
117 return NULL;
118
119 return hdr;
120}
121
122
123
124
125
126
127
128static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
129{
130 struct ice_nvm_table *nvms;
131
132 nvms = (struct ice_nvm_table *)
133 (ice_seg->device_table +
134 le32_to_cpu(ice_seg->device_table_count));
135
136 return (__force struct ice_buf_table *)
137 (nvms->vers + le32_to_cpu(nvms->table_count));
138}
139
140
141
142
143
144
145
146
147
148
149
150
151
152static struct ice_buf_hdr *
153ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
154{
155 if (ice_seg) {
156 state->buf_table = ice_find_buf_table(ice_seg);
157 if (!state->buf_table)
158 return NULL;
159
160 state->buf_idx = 0;
161 return ice_pkg_val_buf(state->buf_table->buf_array);
162 }
163
164 if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count))
165 return ice_pkg_val_buf(state->buf_table->buf_array +
166 state->buf_idx);
167 else
168 return NULL;
169}
170
171
172
173
174
175
176
177
178
179static bool
180ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
181{
182 if (!ice_seg && !state->buf)
183 return false;
184
185 if (!ice_seg && state->buf)
186 if (++state->sect_idx < le16_to_cpu(state->buf->section_count))
187 return true;
188
189 state->buf = ice_pkg_enum_buf(ice_seg, state);
190 if (!state->buf)
191 return false;
192
193
194 state->sect_idx = 0;
195 return true;
196}
197
198
199
200
201
202
203
204
205
206
207
208
209
210static void *
211ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
212 u32 sect_type)
213{
214 u16 offset, size;
215
216 if (ice_seg)
217 state->type = sect_type;
218
219 if (!ice_pkg_advance_sect(ice_seg, state))
220 return NULL;
221
222
223 while (state->buf->section_entry[state->sect_idx].type !=
224 cpu_to_le32(state->type))
225 if (!ice_pkg_advance_sect(NULL, state))
226 return NULL;
227
228
229 offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
230 if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
231 return NULL;
232
233 size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size);
234 if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
235 return NULL;
236
237
238 if (offset + size > ICE_PKG_BUF_SIZE)
239 return NULL;
240
241 state->sect_type =
242 le32_to_cpu(state->buf->section_entry[state->sect_idx].type);
243
244
245 state->sect = ((u8 *)state->buf) +
246 le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
247
248 return state->sect;
249}
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276static void *
277ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
278 u32 sect_type, u32 *offset,
279 void *(*handler)(u32 sect_type, void *section,
280 u32 index, u32 *offset))
281{
282 void *entry;
283
284 if (ice_seg) {
285 if (!handler)
286 return NULL;
287
288 if (!ice_pkg_enum_section(ice_seg, state, sect_type))
289 return NULL;
290
291 state->entry_idx = 0;
292 state->handler = handler;
293 } else {
294 state->entry_idx++;
295 }
296
297 if (!state->handler)
298 return NULL;
299
300
301 entry = state->handler(state->sect_type, state->sect, state->entry_idx,
302 offset);
303 if (!entry) {
304
305 if (!ice_pkg_enum_section(NULL, state, 0))
306 return NULL;
307
308 state->entry_idx = 0;
309 entry = state->handler(state->sect_type, state->sect,
310 state->entry_idx, offset);
311 }
312
313 return entry;
314}
315
316
317
318
319
320
321
322
323
324
325
326static void *
327ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
328{
329 struct ice_boost_tcam_section *boost;
330
331 if (!section)
332 return NULL;
333
334 if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
335 return NULL;
336
337
338 if (index > ICE_MAX_BST_TCAMS_IN_BUF)
339 return NULL;
340
341 if (offset)
342 *offset = 0;
343
344 boost = section;
345 if (index >= le16_to_cpu(boost->count))
346 return NULL;
347
348 return boost->tcam + index;
349}
350
351
352
353
354
355
356
357
358
359
360
361static enum ice_status
362ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
363 struct ice_boost_tcam_entry **entry)
364{
365 struct ice_boost_tcam_entry *tcam;
366 struct ice_pkg_enum state;
367
368 memset(&state, 0, sizeof(state));
369
370 if (!ice_seg)
371 return ICE_ERR_PARAM;
372
373 do {
374 tcam = ice_pkg_enum_entry(ice_seg, &state,
375 ICE_SID_RXPARSER_BOOST_TCAM, NULL,
376 ice_boost_tcam_handler);
377 if (tcam && le16_to_cpu(tcam->addr) == addr) {
378 *entry = tcam;
379 return 0;
380 }
381
382 ice_seg = NULL;
383 } while (tcam);
384
385 *entry = NULL;
386 return ICE_ERR_CFG;
387}
388
389
390
391
392
393
394
395
396
397
398
399static void *
400ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index,
401 u32 *offset)
402{
403 struct ice_label_section *labels;
404
405 if (!section)
406 return NULL;
407
408
409 if (index > ICE_MAX_LABELS_IN_BUF)
410 return NULL;
411
412 if (offset)
413 *offset = 0;
414
415 labels = section;
416 if (index >= le16_to_cpu(labels->count))
417 return NULL;
418
419 return labels->label + index;
420}
421
422
423
424
425
426
427
428
429
430
431
432
433
434static char *
435ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
436 u16 *value)
437{
438 struct ice_label *label;
439
440
441 if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
442 return NULL;
443
444 label = ice_pkg_enum_entry(ice_seg, state, type, NULL,
445 ice_label_enum_handler);
446 if (!label)
447 return NULL;
448
449 *value = le16_to_cpu(label->value);
450 return label->name;
451}
452
453
454
455
456
457
458
459
460
461
462
463static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
464{
465 struct ice_pkg_enum state;
466 char *label_name;
467 u16 val;
468 int i;
469
470 memset(&hw->tnl, 0, sizeof(hw->tnl));
471 memset(&state, 0, sizeof(state));
472
473 if (!ice_seg)
474 return;
475
476 label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
477 &val);
478
479 while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
480 for (i = 0; tnls[i].type != TNL_LAST; i++) {
481 size_t len = strlen(tnls[i].label_prefix);
482
483
484 if (strncmp(label_name, tnls[i].label_prefix, len))
485 continue;
486
487
488
489
490
491 if ((label_name[len] - '0') == hw->pf_id) {
492 hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
493 hw->tnl.tbl[hw->tnl.count].valid = false;
494 hw->tnl.tbl[hw->tnl.count].boost_addr = val;
495 hw->tnl.tbl[hw->tnl.count].port = 0;
496 hw->tnl.count++;
497 break;
498 }
499 }
500
501 label_name = ice_enum_labels(NULL, 0, &state, &val);
502 }
503
504
505 for (i = 0; i < hw->tnl.count; i++) {
506 ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
507 &hw->tnl.tbl[i].boost_entry);
508 if (hw->tnl.tbl[i].boost_entry) {
509 hw->tnl.tbl[i].valid = true;
510 if (hw->tnl.tbl[i].type < __TNL_TYPE_CNT)
511 hw->tnl.valid_count[hw->tnl.tbl[i].type]++;
512 }
513 }
514}
515
516
517
518#define ICE_DC_KEY 0x1
519#define ICE_DC_KEYINV 0x1
520#define ICE_NM_KEY 0x0
521#define ICE_NM_KEYINV 0x0
522#define ICE_0_KEY 0x1
523#define ICE_0_KEYINV 0x0
524#define ICE_1_KEY 0x0
525#define ICE_1_KEYINV 0x1
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552static enum ice_status
553ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
554 u8 *key_inv)
555{
556 u8 in_key = *key, in_key_inv = *key_inv;
557 u8 i;
558
559
560 if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
561 return ICE_ERR_CFG;
562
563 *key = 0;
564 *key_inv = 0;
565
566
567 for (i = 0; i < 8; i++) {
568 *key >>= 1;
569 *key_inv >>= 1;
570
571 if (!(valid & 0x1)) {
572 *key |= (in_key & 0x1) << 7;
573 *key_inv |= (in_key_inv & 0x1) << 7;
574 } else if (dont_care & 0x1) {
575 *key |= ICE_DC_KEY << 7;
576 *key_inv |= ICE_DC_KEYINV << 7;
577 } else if (nvr_mtch & 0x1) {
578 *key |= ICE_NM_KEY << 7;
579 *key_inv |= ICE_NM_KEYINV << 7;
580 } else if (val & 0x01) {
581 *key |= ICE_1_KEY << 7;
582 *key_inv |= ICE_1_KEYINV << 7;
583 } else {
584 *key |= ICE_0_KEY << 7;
585 *key_inv |= ICE_0_KEYINV << 7;
586 }
587
588 dont_care >>= 1;
589 nvr_mtch >>= 1;
590 valid >>= 1;
591 val >>= 1;
592 in_key >>= 1;
593 in_key_inv >>= 1;
594 }
595
596 return 0;
597}
598
599
600
601
602
603
604
605
606
607
608
609static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
610{
611 u16 count = 0;
612 u16 i;
613
614
615 for (i = 0; i < size; i++) {
616
617 if (!mask[i])
618 continue;
619
620
621
622
623
624 if (count == max)
625 return false;
626
627
628 count += hweight8(mask[i]);
629 if (count > max)
630 return false;
631 }
632
633 return true;
634}
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654static enum ice_status
655ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
656 u16 len)
657{
658 u16 half_size;
659 u16 i;
660
661
662 if (size % 2)
663 return ICE_ERR_CFG;
664
665 half_size = size / 2;
666 if (off + len > half_size)
667 return ICE_ERR_CFG;
668
669
670
671
672
673#define ICE_NVR_MTCH_BITS_MAX 1
674 if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
675 return ICE_ERR_CFG;
676
677 for (i = 0; i < len; i++)
678 if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
679 dc ? dc[i] : 0, nm ? nm[i] : 0,
680 key + off + i, key + half_size + off + i))
681 return ICE_ERR_CFG;
682
683 return 0;
684}
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702static enum ice_status
703ice_acquire_global_cfg_lock(struct ice_hw *hw,
704 enum ice_aq_res_access_type access)
705{
706 enum ice_status status;
707
708 status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
709 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
710
711 if (!status)
712 mutex_lock(&ice_global_cfg_lock_sw);
713 else if (status == ICE_ERR_AQ_NO_WORK)
714 ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n");
715
716 return status;
717}
718
719
720
721
722
723
724
725static void ice_release_global_cfg_lock(struct ice_hw *hw)
726{
727 mutex_unlock(&ice_global_cfg_lock_sw);
728 ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
729}
730
731
732
733
734
735
736
737
738static enum ice_status
739ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
740{
741 return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
742 ICE_CHANGE_LOCK_TIMEOUT);
743}
744
745
746
747
748
749
750
751static void ice_release_change_lock(struct ice_hw *hw)
752{
753 ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
754}
755
756
757
758
759
760
761
762
763
764
765
766
767
768static enum ice_status
769ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
770 u16 buf_size, bool last_buf, u32 *error_offset,
771 u32 *error_info, struct ice_sq_cd *cd)
772{
773 struct ice_aqc_download_pkg *cmd;
774 struct ice_aq_desc desc;
775 enum ice_status status;
776
777 if (error_offset)
778 *error_offset = 0;
779 if (error_info)
780 *error_info = 0;
781
782 cmd = &desc.params.download_pkg;
783 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
784 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
785
786 if (last_buf)
787 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
788
789 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
790 if (status == ICE_ERR_AQ_ERROR) {
791
792 struct ice_aqc_download_pkg_resp *resp;
793
794 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
795 if (error_offset)
796 *error_offset = le32_to_cpu(resp->error_offset);
797 if (error_info)
798 *error_info = le32_to_cpu(resp->error_info);
799 }
800
801 return status;
802}
803
804
805
806
807
808
809
810
811
812
813
814
815
816static enum ice_status
817ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
818 bool last_buf, u32 *error_offset, u32 *error_info,
819 struct ice_sq_cd *cd)
820{
821 struct ice_aqc_download_pkg *cmd;
822 struct ice_aq_desc desc;
823 enum ice_status status;
824
825 if (error_offset)
826 *error_offset = 0;
827 if (error_info)
828 *error_info = 0;
829
830 cmd = &desc.params.download_pkg;
831 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
832 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
833
834 if (last_buf)
835 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
836
837 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
838 if (status == ICE_ERR_AQ_ERROR) {
839
840 struct ice_aqc_download_pkg_resp *resp;
841
842 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
843 if (error_offset)
844 *error_offset = le32_to_cpu(resp->error_offset);
845 if (error_info)
846 *error_info = le32_to_cpu(resp->error_info);
847 }
848
849 return status;
850}
851
852
853
854
855
856
857
858
859
860
861
862static struct ice_generic_seg_hdr *
863ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
864 struct ice_pkg_hdr *pkg_hdr)
865{
866 u32 i;
867
868 ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
869 pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
870 pkg_hdr->pkg_format_ver.update,
871 pkg_hdr->pkg_format_ver.draft);
872
873
874 for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) {
875 struct ice_generic_seg_hdr *seg;
876
877 seg = (struct ice_generic_seg_hdr *)
878 ((u8 *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i]));
879
880 if (le32_to_cpu(seg->seg_type) == seg_type)
881 return seg;
882 }
883
884 return NULL;
885}
886
887
888
889
890
891
892
893
894
895static enum ice_status
896ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
897{
898 enum ice_status status;
899 u32 offset, info, i;
900
901 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
902 if (status)
903 return status;
904
905 for (i = 0; i < count; i++) {
906 struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
907 bool last = ((i + 1) == count);
908
909 status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end),
910 last, &offset, &info, NULL);
911
912 if (status) {
913 ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n",
914 status, offset, info);
915 break;
916 }
917 }
918
919 ice_release_change_lock(hw);
920
921 return status;
922}
923
924
925
926
927
928
929
930
931
932
933
934static enum ice_status
935ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
936{
937 enum ice_status status;
938 struct ice_buf_hdr *bh;
939 u32 offset, info, i;
940
941 if (!bufs || !count)
942 return ICE_ERR_PARAM;
943
944
945
946
947
948 bh = (struct ice_buf_hdr *)bufs;
949 if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
950 return 0;
951
952
953
954
955 hw->pkg_dwnld_status = ICE_AQ_RC_OK;
956
957 status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
958 if (status) {
959 if (status == ICE_ERR_AQ_NO_WORK)
960 hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST;
961 else
962 hw->pkg_dwnld_status = hw->adminq.sq_last_status;
963 return status;
964 }
965
966 for (i = 0; i < count; i++) {
967 bool last = ((i + 1) == count);
968
969 if (!last) {
970
971 bh = (struct ice_buf_hdr *)(bufs + i + 1);
972
973
974
975
976
977 if (le16_to_cpu(bh->section_count))
978 if (le32_to_cpu(bh->section_entry[0].type) &
979 ICE_METADATA_BUF)
980 last = true;
981 }
982
983 bh = (struct ice_buf_hdr *)(bufs + i);
984
985 status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
986 &offset, &info, NULL);
987
988
989 hw->pkg_dwnld_status = hw->adminq.sq_last_status;
990 if (status) {
991 ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
992 status, offset, info);
993
994 break;
995 }
996
997 if (last)
998 break;
999 }
1000
1001 ice_release_global_cfg_lock(hw);
1002
1003 return status;
1004}
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015static enum ice_status
1016ice_aq_get_pkg_info_list(struct ice_hw *hw,
1017 struct ice_aqc_get_pkg_info_resp *pkg_info,
1018 u16 buf_size, struct ice_sq_cd *cd)
1019{
1020 struct ice_aq_desc desc;
1021
1022 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
1023
1024 return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
1025}
1026
1027
1028
1029
1030
1031
1032
1033
1034static enum ice_status
1035ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
1036{
1037 struct ice_buf_table *ice_buf_tbl;
1038
1039 ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
1040 ice_seg->hdr.seg_format_ver.major,
1041 ice_seg->hdr.seg_format_ver.minor,
1042 ice_seg->hdr.seg_format_ver.update,
1043 ice_seg->hdr.seg_format_ver.draft);
1044
1045 ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
1046 le32_to_cpu(ice_seg->hdr.seg_type),
1047 le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
1048
1049 ice_buf_tbl = ice_find_buf_table(ice_seg);
1050
1051 ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
1052 le32_to_cpu(ice_buf_tbl->buf_count));
1053
1054 return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
1055 le32_to_cpu(ice_buf_tbl->buf_count));
1056}
1057
1058
1059
1060
1061
1062
1063
1064
1065static enum ice_status
1066ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
1067{
1068 struct ice_generic_seg_hdr *seg_hdr;
1069
1070 if (!pkg_hdr)
1071 return ICE_ERR_PARAM;
1072
1073 seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
1074 if (seg_hdr) {
1075 struct ice_meta_sect *meta;
1076 struct ice_pkg_enum state;
1077
1078 memset(&state, 0, sizeof(state));
1079
1080
1081 meta = ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state,
1082 ICE_SID_METADATA);
1083 if (!meta) {
1084 ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n");
1085 return ICE_ERR_CFG;
1086 }
1087
1088 hw->pkg_ver = meta->ver;
1089 memcpy(hw->pkg_name, meta->name, sizeof(meta->name));
1090
1091 ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
1092 meta->ver.major, meta->ver.minor, meta->ver.update,
1093 meta->ver.draft, meta->name);
1094
1095 hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
1096 memcpy(hw->ice_seg_id, seg_hdr->seg_id,
1097 sizeof(hw->ice_seg_id));
1098
1099 ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
1100 seg_hdr->seg_format_ver.major,
1101 seg_hdr->seg_format_ver.minor,
1102 seg_hdr->seg_format_ver.update,
1103 seg_hdr->seg_format_ver.draft,
1104 seg_hdr->seg_id);
1105 } else {
1106 ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n");
1107 return ICE_ERR_CFG;
1108 }
1109
1110 return 0;
1111}
1112
1113
1114
1115
1116
1117
1118
1119static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
1120{
1121 struct ice_aqc_get_pkg_info_resp *pkg_info;
1122 enum ice_status status;
1123 u16 size;
1124 u32 i;
1125
1126 size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
1127 pkg_info = kzalloc(size, GFP_KERNEL);
1128 if (!pkg_info)
1129 return ICE_ERR_NO_MEMORY;
1130
1131 status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
1132 if (status)
1133 goto init_pkg_free_alloc;
1134
1135 for (i = 0; i < le32_to_cpu(pkg_info->count); i++) {
1136#define ICE_PKG_FLAG_COUNT 4
1137 char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
1138 u8 place = 0;
1139
1140 if (pkg_info->pkg_info[i].is_active) {
1141 flags[place++] = 'A';
1142 hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
1143 hw->active_track_id =
1144 le32_to_cpu(pkg_info->pkg_info[i].track_id);
1145 memcpy(hw->active_pkg_name,
1146 pkg_info->pkg_info[i].name,
1147 sizeof(pkg_info->pkg_info[i].name));
1148 hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
1149 }
1150 if (pkg_info->pkg_info[i].is_active_at_boot)
1151 flags[place++] = 'B';
1152 if (pkg_info->pkg_info[i].is_modified)
1153 flags[place++] = 'M';
1154 if (pkg_info->pkg_info[i].is_in_nvm)
1155 flags[place++] = 'N';
1156
1157 ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
1158 i, pkg_info->pkg_info[i].ver.major,
1159 pkg_info->pkg_info[i].ver.minor,
1160 pkg_info->pkg_info[i].ver.update,
1161 pkg_info->pkg_info[i].ver.draft,
1162 pkg_info->pkg_info[i].name, flags);
1163 }
1164
1165init_pkg_free_alloc:
1166 kfree(pkg_info);
1167
1168 return status;
1169}
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
1180{
1181 u32 seg_count;
1182 u32 i;
1183
1184 if (len < struct_size(pkg, seg_offset, 1))
1185 return ICE_ERR_BUF_TOO_SHORT;
1186
1187 if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
1188 pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
1189 pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
1190 pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
1191 return ICE_ERR_CFG;
1192
1193
1194 seg_count = le32_to_cpu(pkg->seg_count);
1195 if (seg_count < 1)
1196 return ICE_ERR_CFG;
1197
1198
1199 if (len < struct_size(pkg, seg_offset, seg_count))
1200 return ICE_ERR_BUF_TOO_SHORT;
1201
1202
1203 for (i = 0; i < seg_count; i++) {
1204 u32 off = le32_to_cpu(pkg->seg_offset[i]);
1205 struct ice_generic_seg_hdr *seg;
1206
1207
1208 if (len < off + sizeof(*seg))
1209 return ICE_ERR_BUF_TOO_SHORT;
1210
1211 seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
1212
1213
1214 if (len < off + le32_to_cpu(seg->seg_size))
1215 return ICE_ERR_BUF_TOO_SHORT;
1216 }
1217
1218 return 0;
1219}
1220
1221
1222
1223
1224
1225
1226
1227
1228void ice_free_seg(struct ice_hw *hw)
1229{
1230 if (hw->pkg_copy) {
1231 devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy);
1232 hw->pkg_copy = NULL;
1233 hw->pkg_size = 0;
1234 }
1235 hw->seg = NULL;
1236}
1237
1238
1239
1240
1241
1242static void ice_init_pkg_regs(struct ice_hw *hw)
1243{
1244#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
1245#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
1246#define ICE_SW_BLK_IDX 0
1247
1248
1249 wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
1250 wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
1251}
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
1263{
1264 if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
1265 pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
1266 return ICE_ERR_NOT_SUPPORTED;
1267
1268 return 0;
1269}
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279static enum ice_status
1280ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
1281 struct ice_seg **seg)
1282{
1283 struct ice_aqc_get_pkg_info_resp *pkg;
1284 enum ice_status status;
1285 u16 size;
1286 u32 i;
1287
1288
1289 status = ice_chk_pkg_version(&hw->pkg_ver);
1290 if (status) {
1291 ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
1292 return status;
1293 }
1294
1295
1296 *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE,
1297 ospkg);
1298 if (!*seg) {
1299 ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
1300 return ICE_ERR_CFG;
1301 }
1302
1303
1304 size = struct_size(pkg, pkg_info, ICE_PKG_CNT);
1305 pkg = kzalloc(size, GFP_KERNEL);
1306 if (!pkg)
1307 return ICE_ERR_NO_MEMORY;
1308
1309 status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL);
1310 if (status)
1311 goto fw_ddp_compat_free_alloc;
1312
1313 for (i = 0; i < le32_to_cpu(pkg->count); i++) {
1314
1315 if (!pkg->pkg_info[i].is_in_nvm)
1316 continue;
1317 if ((*seg)->hdr.seg_format_ver.major !=
1318 pkg->pkg_info[i].ver.major ||
1319 (*seg)->hdr.seg_format_ver.minor >
1320 pkg->pkg_info[i].ver.minor) {
1321 status = ICE_ERR_FW_DDP_MISMATCH;
1322 ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n");
1323 }
1324
1325 break;
1326 }
1327fw_ddp_compat_free_alloc:
1328 kfree(pkg);
1329 return status;
1330}
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
1358{
1359 struct ice_pkg_hdr *pkg;
1360 enum ice_status status;
1361 struct ice_seg *seg;
1362
1363 if (!buf || !len)
1364 return ICE_ERR_PARAM;
1365
1366 pkg = (struct ice_pkg_hdr *)buf;
1367 status = ice_verify_pkg(pkg, len);
1368 if (status) {
1369 ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
1370 status);
1371 return status;
1372 }
1373
1374
1375 status = ice_init_pkg_info(hw, pkg);
1376 if (status)
1377 return status;
1378
1379
1380
1381
1382 status = ice_chk_pkg_compat(hw, pkg, &seg);
1383 if (status)
1384 return status;
1385
1386
1387 ice_init_pkg_hints(hw, seg);
1388 status = ice_download_pkg(hw, seg);
1389 if (status == ICE_ERR_AQ_NO_WORK) {
1390 ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n");
1391 status = 0;
1392 }
1393
1394
1395
1396
1397 if (!status) {
1398 status = ice_get_pkg_info(hw);
1399 if (!status)
1400 status = ice_chk_pkg_version(&hw->active_pkg_ver);
1401 }
1402
1403 if (!status) {
1404 hw->seg = seg;
1405
1406
1407
1408
1409 ice_init_pkg_regs(hw);
1410 ice_fill_blk_tbls(hw);
1411 } else {
1412 ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
1413 status);
1414 }
1415
1416 return status;
1417}
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
1443{
1444 enum ice_status status;
1445 u8 *buf_copy;
1446
1447 if (!buf || !len)
1448 return ICE_ERR_PARAM;
1449
1450 buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL);
1451
1452 status = ice_init_pkg(hw, buf_copy, len);
1453 if (status) {
1454
1455 devm_kfree(ice_hw_to_dev(hw), buf_copy);
1456 } else {
1457
1458 hw->pkg_copy = buf_copy;
1459 hw->pkg_size = len;
1460 }
1461
1462 return status;
1463}
1464
1465
1466
1467
1468
1469
1470
1471
1472static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
1473{
1474 struct ice_buf_build *bld;
1475 struct ice_buf_hdr *buf;
1476
1477 bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL);
1478 if (!bld)
1479 return NULL;
1480
1481 buf = (struct ice_buf_hdr *)bld;
1482 buf->data_end = cpu_to_le16(offsetof(struct ice_buf_hdr,
1483 section_entry));
1484 return bld;
1485}
1486
1487
1488
1489
1490
1491
1492
1493
1494static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
1495{
1496 devm_kfree(ice_hw_to_dev(hw), bld);
1497}
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512static enum ice_status
1513ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
1514{
1515 struct ice_buf_hdr *buf;
1516 u16 section_count;
1517 u16 data_end;
1518
1519 if (!bld)
1520 return ICE_ERR_PARAM;
1521
1522 buf = (struct ice_buf_hdr *)&bld->buf;
1523
1524
1525 section_count = le16_to_cpu(buf->section_count);
1526 if (section_count > 0)
1527 return ICE_ERR_CFG;
1528
1529 if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
1530 return ICE_ERR_CFG;
1531 bld->reserved_section_table_entries += count;
1532
1533 data_end = le16_to_cpu(buf->data_end) +
1534 flex_array_size(buf, section_entry, count);
1535 buf->data_end = cpu_to_le16(data_end);
1536
1537 return 0;
1538}
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552static void *
1553ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
1554{
1555 struct ice_buf_hdr *buf;
1556 u16 sect_count;
1557 u16 data_end;
1558
1559 if (!bld || !type || !size)
1560 return NULL;
1561
1562 buf = (struct ice_buf_hdr *)&bld->buf;
1563
1564
1565 data_end = le16_to_cpu(buf->data_end);
1566
1567
1568 data_end = ALIGN(data_end, 4);
1569
1570 if ((data_end + size) > ICE_MAX_S_DATA_END)
1571 return NULL;
1572
1573
1574 sect_count = le16_to_cpu(buf->section_count);
1575 if (sect_count < bld->reserved_section_table_entries) {
1576 void *section_ptr = ((u8 *)buf) + data_end;
1577
1578 buf->section_entry[sect_count].offset = cpu_to_le16(data_end);
1579 buf->section_entry[sect_count].size = cpu_to_le16(size);
1580 buf->section_entry[sect_count].type = cpu_to_le32(type);
1581
1582 data_end += size;
1583 buf->data_end = cpu_to_le16(data_end);
1584
1585 buf->section_count = cpu_to_le16(sect_count + 1);
1586 return section_ptr;
1587 }
1588
1589
1590 return NULL;
1591}
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
1604{
1605 struct ice_buf_hdr *buf;
1606
1607 if (!bld)
1608 return 0;
1609
1610 buf = (struct ice_buf_hdr *)&bld->buf;
1611 return le16_to_cpu(buf->section_count);
1612}
1613
1614
1615
1616
1617
1618
1619
1620static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
1621{
1622 if (!bld)
1623 return NULL;
1624
1625 return &bld->buf;
1626}
1627
1628
1629
1630
1631
1632
1633bool
1634ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port)
1635{
1636 bool res = false;
1637 u16 i;
1638
1639 mutex_lock(&hw->tnl_lock);
1640
1641 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1642 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port) {
1643 *port = hw->tnl.tbl[i].port;
1644 res = true;
1645 break;
1646 }
1647
1648 mutex_unlock(&hw->tnl_lock);
1649
1650 return res;
1651}
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type,
1664 u16 idx)
1665{
1666 u16 i;
1667
1668 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
1669 if (hw->tnl.tbl[i].valid &&
1670 hw->tnl.tbl[i].type == type &&
1671 idx--)
1672 return i;
1673
1674 WARN_ON_ONCE(1);
1675 return 0;
1676}
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689static enum ice_status
1690ice_create_tunnel(struct ice_hw *hw, u16 index,
1691 enum ice_tunnel_type type, u16 port)
1692{
1693 struct ice_boost_tcam_section *sect_rx, *sect_tx;
1694 enum ice_status status = ICE_ERR_MAX_LIMIT;
1695 struct ice_buf_build *bld;
1696
1697 mutex_lock(&hw->tnl_lock);
1698
1699 bld = ice_pkg_buf_alloc(hw);
1700 if (!bld) {
1701 status = ICE_ERR_NO_MEMORY;
1702 goto ice_create_tunnel_end;
1703 }
1704
1705
1706 if (ice_pkg_buf_reserve_section(bld, 2))
1707 goto ice_create_tunnel_err;
1708
1709 sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
1710 struct_size(sect_rx, tcam, 1));
1711 if (!sect_rx)
1712 goto ice_create_tunnel_err;
1713 sect_rx->count = cpu_to_le16(1);
1714
1715 sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
1716 struct_size(sect_tx, tcam, 1));
1717 if (!sect_tx)
1718 goto ice_create_tunnel_err;
1719 sect_tx->count = cpu_to_le16(1);
1720
1721
1722 memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
1723 sizeof(*sect_rx->tcam));
1724
1725
1726
1727
1728 ice_set_key((u8 *)§_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
1729 (u8 *)&port, NULL, NULL, NULL,
1730 (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key),
1731 sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
1732
1733
1734 memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam));
1735
1736 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
1737 if (!status)
1738 hw->tnl.tbl[index].port = port;
1739
1740ice_create_tunnel_err:
1741 ice_pkg_buf_free(hw, bld);
1742
1743ice_create_tunnel_end:
1744 mutex_unlock(&hw->tnl_lock);
1745
1746 return status;
1747}
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760static enum ice_status
1761ice_destroy_tunnel(struct ice_hw *hw, u16 index, enum ice_tunnel_type type,
1762 u16 port)
1763{
1764 struct ice_boost_tcam_section *sect_rx, *sect_tx;
1765 enum ice_status status = ICE_ERR_MAX_LIMIT;
1766 struct ice_buf_build *bld;
1767
1768 mutex_lock(&hw->tnl_lock);
1769
1770 if (WARN_ON(!hw->tnl.tbl[index].valid ||
1771 hw->tnl.tbl[index].type != type ||
1772 hw->tnl.tbl[index].port != port)) {
1773 status = ICE_ERR_OUT_OF_RANGE;
1774 goto ice_destroy_tunnel_end;
1775 }
1776
1777 bld = ice_pkg_buf_alloc(hw);
1778 if (!bld) {
1779 status = ICE_ERR_NO_MEMORY;
1780 goto ice_destroy_tunnel_end;
1781 }
1782
1783
1784 if (ice_pkg_buf_reserve_section(bld, 2))
1785 goto ice_destroy_tunnel_err;
1786
1787 sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
1788 struct_size(sect_rx, tcam, 1));
1789 if (!sect_rx)
1790 goto ice_destroy_tunnel_err;
1791 sect_rx->count = cpu_to_le16(1);
1792
1793 sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
1794 struct_size(sect_tx, tcam, 1));
1795 if (!sect_tx)
1796 goto ice_destroy_tunnel_err;
1797 sect_tx->count = cpu_to_le16(1);
1798
1799
1800
1801
1802 memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
1803 sizeof(*sect_rx->tcam));
1804 memcpy(sect_tx->tcam, hw->tnl.tbl[index].boost_entry,
1805 sizeof(*sect_tx->tcam));
1806
1807 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
1808 if (!status)
1809 hw->tnl.tbl[index].port = 0;
1810
1811ice_destroy_tunnel_err:
1812 ice_pkg_buf_free(hw, bld);
1813
1814ice_destroy_tunnel_end:
1815 mutex_unlock(&hw->tnl_lock);
1816
1817 return status;
1818}
1819
1820int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
1821 unsigned int idx, struct udp_tunnel_info *ti)
1822{
1823 struct ice_netdev_priv *np = netdev_priv(netdev);
1824 struct ice_vsi *vsi = np->vsi;
1825 struct ice_pf *pf = vsi->back;
1826 enum ice_tunnel_type tnl_type;
1827 enum ice_status status;
1828 u16 index;
1829
1830 tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
1831 index = ice_tunnel_idx_to_entry(&pf->hw, idx, tnl_type);
1832
1833 status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port));
1834 if (status) {
1835 netdev_err(netdev, "Error adding UDP tunnel - %s\n",
1836 ice_stat_str(status));
1837 return -EIO;
1838 }
1839
1840 udp_tunnel_nic_set_port_priv(netdev, table, idx, index);
1841 return 0;
1842}
1843
1844int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
1845 unsigned int idx, struct udp_tunnel_info *ti)
1846{
1847 struct ice_netdev_priv *np = netdev_priv(netdev);
1848 struct ice_vsi *vsi = np->vsi;
1849 struct ice_pf *pf = vsi->back;
1850 enum ice_tunnel_type tnl_type;
1851 enum ice_status status;
1852
1853 tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
1854
1855 status = ice_destroy_tunnel(&pf->hw, ti->hw_priv, tnl_type,
1856 ntohs(ti->port));
1857 if (status) {
1858 netdev_err(netdev, "Error removing UDP tunnel - %s\n",
1859 ice_stat_str(status));
1860 return -EIO;
1861 }
1862
1863 return 0;
1864}
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879static enum ice_status
1880ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
1881{
1882 if (ptype >= ICE_XLT1_CNT || !ptg)
1883 return ICE_ERR_PARAM;
1884
1885 *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
1886 return 0;
1887}
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
1899{
1900 hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
1901}
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913static enum ice_status
1914ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
1915{
1916 struct ice_ptg_ptype **ch;
1917 struct ice_ptg_ptype *p;
1918
1919 if (ptype > ICE_XLT1_CNT - 1)
1920 return ICE_ERR_PARAM;
1921
1922 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
1923 return ICE_ERR_DOES_NOT_EXIST;
1924
1925
1926 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
1927 return ICE_ERR_CFG;
1928
1929
1930 p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
1931 ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
1932 while (p) {
1933 if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
1934 *ch = p->next_ptype;
1935 break;
1936 }
1937
1938 ch = &p->next_ptype;
1939 p = p->next_ptype;
1940 }
1941
1942 hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
1943 hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
1944
1945 return 0;
1946}
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960static enum ice_status
1961ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
1962{
1963 enum ice_status status;
1964 u8 original_ptg;
1965
1966 if (ptype > ICE_XLT1_CNT - 1)
1967 return ICE_ERR_PARAM;
1968
1969 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
1970 return ICE_ERR_DOES_NOT_EXIST;
1971
1972 status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
1973 if (status)
1974 return status;
1975
1976
1977 if (original_ptg == ptg)
1978 return 0;
1979
1980
1981 if (original_ptg != ICE_DEFAULT_PTG)
1982 ice_ptg_remove_ptype(hw, blk, ptype, original_ptg);
1983
1984
1985 if (ptg == ICE_DEFAULT_PTG)
1986 return 0;
1987
1988
1989 hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
1990 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
1991 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype =
1992 &hw->blk[blk].xlt1.ptypes[ptype];
1993
1994 hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
1995 hw->blk[blk].xlt1.t[ptype] = ptg;
1996
1997 return 0;
1998}
1999
2000
2001struct ice_blk_size_details {
2002 u16 xlt1;
2003 u16 xlt2;
2004 u16 prof_tcam;
2005 u16 prof_id;
2006 u8 prof_cdid_bits;
2007 u16 prof_redir;
2008 u16 es;
2009 u16 fvw;
2010 u8 overwrite;
2011 u8 reverse;
2012};
2013
2014static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = {
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029 { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256, 0, 256, 256, 48,
2030 false, false },
2031 { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 32,
2032 false, false },
2033 { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
2034 false, true },
2035 { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
2036 true, true },
2037 { ICE_XLT1_CNT, ICE_XLT2_CNT, 64, 32, 0, 32, 32, 24,
2038 false, false },
2039};
2040
2041enum ice_sid_all {
2042 ICE_SID_XLT1_OFF = 0,
2043 ICE_SID_XLT2_OFF,
2044 ICE_SID_PR_OFF,
2045 ICE_SID_PR_REDIR_OFF,
2046 ICE_SID_ES_OFF,
2047 ICE_SID_OFF_COUNT,
2048};
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059static bool
2060ice_match_prop_lst(struct list_head *list1, struct list_head *list2)
2061{
2062 struct ice_vsig_prof *tmp1;
2063 struct ice_vsig_prof *tmp2;
2064 u16 chk_count = 0;
2065 u16 count = 0;
2066
2067
2068 list_for_each_entry(tmp1, list1, list)
2069 count++;
2070 list_for_each_entry(tmp2, list2, list)
2071 chk_count++;
2072
2073 if (!count || count != chk_count)
2074 return false;
2075
2076 tmp1 = list_first_entry(list1, struct ice_vsig_prof, list);
2077 tmp2 = list_first_entry(list2, struct ice_vsig_prof, list);
2078
2079
2080
2081
2082 while (count--) {
2083 if (tmp2->profile_cookie != tmp1->profile_cookie)
2084 return false;
2085
2086 tmp1 = list_next_entry(tmp1, list);
2087 tmp2 = list_next_entry(tmp2, list);
2088 }
2089
2090 return true;
2091}
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105static enum ice_status
2106ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
2107{
2108 if (!vsig || vsi >= ICE_MAX_VSI)
2109 return ICE_ERR_PARAM;
2110
2111
2112
2113
2114
2115 *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
2116
2117 return 0;
2118}
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2129{
2130 u16 idx = vsig & ICE_VSIG_IDX_M;
2131
2132 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) {
2133 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2134 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true;
2135 }
2136
2137 return ICE_VSIG_VALUE(idx, hw->pf_id);
2138}
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
2149{
2150 u16 i;
2151
2152 for (i = 1; i < ICE_MAX_VSIGS; i++)
2153 if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use)
2154 return ice_vsig_alloc_val(hw, blk, i);
2155
2156 return ICE_DEFAULT_VSIG;
2157}
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174static enum ice_status
2175ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
2176 struct list_head *chs, u16 *vsig)
2177{
2178 struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
2179 u16 i;
2180
2181 for (i = 0; i < xlt2->count; i++)
2182 if (xlt2->vsig_tbl[i].in_use &&
2183 ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
2184 *vsig = ICE_VSIG_VALUE(i, hw->pf_id);
2185 return 0;
2186 }
2187
2188 return ICE_ERR_DOES_NOT_EXIST;
2189}
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200static enum ice_status
2201ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2202{
2203 struct ice_vsig_prof *dtmp, *del;
2204 struct ice_vsig_vsi *vsi_cur;
2205 u16 idx;
2206
2207 idx = vsig & ICE_VSIG_IDX_M;
2208 if (idx >= ICE_MAX_VSIGS)
2209 return ICE_ERR_PARAM;
2210
2211 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2212 return ICE_ERR_DOES_NOT_EXIST;
2213
2214 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
2215
2216 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2217
2218
2219
2220 if (vsi_cur) {
2221
2222 do {
2223 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
2224
2225 vsi_cur->vsig = ICE_DEFAULT_VSIG;
2226 vsi_cur->changed = 1;
2227 vsi_cur->next_vsi = NULL;
2228 vsi_cur = tmp;
2229 } while (vsi_cur);
2230
2231
2232 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
2233 }
2234
2235
2236 list_for_each_entry_safe(del, dtmp,
2237 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
2238 list) {
2239 list_del(&del->list);
2240 devm_kfree(ice_hw_to_dev(hw), del);
2241 }
2242
2243
2244
2245
2246 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2247
2248 return 0;
2249}
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261static enum ice_status
2262ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
2263{
2264 struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
2265 u16 idx;
2266
2267 idx = vsig & ICE_VSIG_IDX_M;
2268
2269 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
2270 return ICE_ERR_PARAM;
2271
2272 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2273 return ICE_ERR_DOES_NOT_EXIST;
2274
2275
2276 if (idx == ICE_DEFAULT_VSIG)
2277 return 0;
2278
2279 vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2280 if (!(*vsi_head))
2281 return ICE_ERR_CFG;
2282
2283 vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
2284 vsi_cur = (*vsi_head);
2285
2286
2287 while (vsi_cur) {
2288 if (vsi_tgt == vsi_cur) {
2289 (*vsi_head) = vsi_cur->next_vsi;
2290 break;
2291 }
2292 vsi_head = &vsi_cur->next_vsi;
2293 vsi_cur = vsi_cur->next_vsi;
2294 }
2295
2296
2297 if (!vsi_cur)
2298 return ICE_ERR_DOES_NOT_EXIST;
2299
2300 vsi_cur->vsig = ICE_DEFAULT_VSIG;
2301 vsi_cur->changed = 1;
2302 vsi_cur->next_vsi = NULL;
2303
2304 return 0;
2305}
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319static enum ice_status
2320ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
2321{
2322 struct ice_vsig_vsi *tmp;
2323 enum ice_status status;
2324 u16 orig_vsig, idx;
2325
2326 idx = vsig & ICE_VSIG_IDX_M;
2327
2328 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
2329 return ICE_ERR_PARAM;
2330
2331
2332
2333
2334 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
2335 vsig != ICE_DEFAULT_VSIG)
2336 return ICE_ERR_DOES_NOT_EXIST;
2337
2338 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
2339 if (status)
2340 return status;
2341
2342
2343 if (orig_vsig == vsig)
2344 return 0;
2345
2346 if (orig_vsig != ICE_DEFAULT_VSIG) {
2347
2348 status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig);
2349 if (status)
2350 return status;
2351 }
2352
2353 if (idx == ICE_DEFAULT_VSIG)
2354 return 0;
2355
2356
2357 hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
2358 hw->blk[blk].xlt2.vsis[vsi].changed = 1;
2359
2360
2361 tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2362 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi =
2363 &hw->blk[blk].xlt2.vsis[vsi];
2364 hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
2365 hw->blk[blk].xlt2.t[vsi] = vsig;
2366
2367 return 0;
2368}
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378static bool
2379ice_prof_has_mask_idx(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 idx,
2380 u16 mask)
2381{
2382 bool expect_no_mask = false;
2383 bool found = false;
2384 bool match = false;
2385 u16 i;
2386
2387
2388 if (mask == 0 || mask == 0xffff)
2389 expect_no_mask = true;
2390
2391
2392 for (i = hw->blk[blk].masks.first; i < hw->blk[blk].masks.first +
2393 hw->blk[blk].masks.count; i++)
2394 if (hw->blk[blk].es.mask_ena[prof] & BIT(i))
2395 if (hw->blk[blk].masks.masks[i].in_use &&
2396 hw->blk[blk].masks.masks[i].idx == idx) {
2397 found = true;
2398 if (hw->blk[blk].masks.masks[i].mask == mask)
2399 match = true;
2400 break;
2401 }
2402
2403 if (expect_no_mask) {
2404 if (found)
2405 return false;
2406 } else {
2407 if (!match)
2408 return false;
2409 }
2410
2411 return true;
2412}
2413
2414
2415
2416
2417
2418
2419
2420
2421static bool
2422ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks)
2423{
2424 u16 i;
2425
2426
2427 for (i = 0; i < hw->blk[blk].es.fvw; i++)
2428 if (!ice_prof_has_mask_idx(hw, blk, prof, i, masks[i]))
2429 return false;
2430
2431 return true;
2432}
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442static enum ice_status
2443ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
2444 struct ice_fv_word *fv, u16 *masks, u8 *prof_id)
2445{
2446 struct ice_es *es = &hw->blk[blk].es;
2447 u8 i;
2448
2449
2450
2451
2452 if (blk == ICE_BLK_FD)
2453 return ICE_ERR_DOES_NOT_EXIST;
2454
2455 for (i = 0; i < (u8)es->count; i++) {
2456 u16 off = i * es->fvw;
2457
2458 if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
2459 continue;
2460
2461
2462 if (masks && !ice_prof_has_mask(hw, blk, i, masks))
2463 continue;
2464
2465 *prof_id = i;
2466 return 0;
2467 }
2468
2469 return ICE_ERR_DOES_NOT_EXIST;
2470}
2471
2472
2473
2474
2475
2476
2477static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
2478{
2479 switch (blk) {
2480 case ICE_BLK_FD:
2481 *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID;
2482 break;
2483 case ICE_BLK_RSS:
2484 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
2485 break;
2486 default:
2487 return false;
2488 }
2489 return true;
2490}
2491
2492
2493
2494
2495
2496
2497static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
2498{
2499 switch (blk) {
2500 case ICE_BLK_FD:
2501 *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM;
2502 break;
2503 case ICE_BLK_RSS:
2504 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
2505 break;
2506 default:
2507 return false;
2508 }
2509 return true;
2510}
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522static enum ice_status
2523ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
2524 u16 *tcam_idx)
2525{
2526 u16 res_type;
2527
2528 if (!ice_tcam_ent_rsrc_type(blk, &res_type))
2529 return ICE_ERR_PARAM;
2530
2531 return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx);
2532}
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542static enum ice_status
2543ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
2544{
2545 u16 res_type;
2546
2547 if (!ice_tcam_ent_rsrc_type(blk, &res_type))
2548 return ICE_ERR_PARAM;
2549
2550 return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
2551}
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562static enum ice_status
2563ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
2564{
2565 enum ice_status status;
2566 u16 res_type;
2567 u16 get_prof;
2568
2569 if (!ice_prof_id_rsrc_type(blk, &res_type))
2570 return ICE_ERR_PARAM;
2571
2572 status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
2573 if (!status)
2574 *prof_id = (u8)get_prof;
2575
2576 return status;
2577}
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587static enum ice_status
2588ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
2589{
2590 u16 tmp_prof_id = (u16)prof_id;
2591 u16 res_type;
2592
2593 if (!ice_prof_id_rsrc_type(blk, &res_type))
2594 return ICE_ERR_PARAM;
2595
2596 return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
2597}
2598
2599
2600
2601
2602
2603
2604
2605static enum ice_status
2606ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
2607{
2608 if (prof_id > hw->blk[blk].es.count)
2609 return ICE_ERR_PARAM;
2610
2611 hw->blk[blk].es.ref_count[prof_id]++;
2612
2613 return 0;
2614}
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624static void
2625ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx,
2626 u16 idx, u16 mask)
2627{
2628 u32 offset;
2629 u32 val;
2630
2631 switch (blk) {
2632 case ICE_BLK_RSS:
2633 offset = GLQF_HMASK(mask_idx);
2634 val = (idx << GLQF_HMASK_MSK_INDEX_S) & GLQF_HMASK_MSK_INDEX_M;
2635 val |= (mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M;
2636 break;
2637 case ICE_BLK_FD:
2638 offset = GLQF_FDMASK(mask_idx);
2639 val = (idx << GLQF_FDMASK_MSK_INDEX_S) & GLQF_FDMASK_MSK_INDEX_M;
2640 val |= (mask << GLQF_FDMASK_MASK_S) & GLQF_FDMASK_MASK_M;
2641 break;
2642 default:
2643 ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
2644 blk);
2645 return;
2646 }
2647
2648 wr32(hw, offset, val);
2649 ice_debug(hw, ICE_DBG_PKG, "write mask, blk %d (%d): %x = %x\n",
2650 blk, idx, offset, val);
2651}
2652
2653
2654
2655
2656
2657
2658
2659
2660static void
2661ice_write_prof_mask_enable_res(struct ice_hw *hw, enum ice_block blk,
2662 u16 prof_id, u32 enable_mask)
2663{
2664 u32 offset;
2665
2666 switch (blk) {
2667 case ICE_BLK_RSS:
2668 offset = GLQF_HMASK_SEL(prof_id);
2669 break;
2670 case ICE_BLK_FD:
2671 offset = GLQF_FDMASK_SEL(prof_id);
2672 break;
2673 default:
2674 ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
2675 blk);
2676 return;
2677 }
2678
2679 wr32(hw, offset, enable_mask);
2680 ice_debug(hw, ICE_DBG_PKG, "write mask enable, blk %d (%d): %x = %x\n",
2681 blk, prof_id, offset, enable_mask);
2682}
2683
2684
2685
2686
2687
2688
2689static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk)
2690{
2691 u16 per_pf;
2692 u16 i;
2693
2694 mutex_init(&hw->blk[blk].masks.lock);
2695
2696 per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs;
2697
2698 hw->blk[blk].masks.count = per_pf;
2699 hw->blk[blk].masks.first = hw->pf_id * per_pf;
2700
2701 memset(hw->blk[blk].masks.masks, 0, sizeof(hw->blk[blk].masks.masks));
2702
2703 for (i = hw->blk[blk].masks.first;
2704 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
2705 ice_write_prof_mask_reg(hw, blk, i, 0, 0);
2706}
2707
2708
2709
2710
2711
2712static void ice_init_all_prof_masks(struct ice_hw *hw)
2713{
2714 ice_init_prof_masks(hw, ICE_BLK_RSS);
2715 ice_init_prof_masks(hw, ICE_BLK_FD);
2716}
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726static enum ice_status
2727ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask,
2728 u16 *mask_idx)
2729{
2730 bool found_unused = false, found_copy = false;
2731 enum ice_status status = ICE_ERR_MAX_LIMIT;
2732 u16 unused_idx = 0, copy_idx = 0;
2733 u16 i;
2734
2735 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
2736 return ICE_ERR_PARAM;
2737
2738 mutex_lock(&hw->blk[blk].masks.lock);
2739
2740 for (i = hw->blk[blk].masks.first;
2741 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
2742 if (hw->blk[blk].masks.masks[i].in_use) {
2743
2744
2745
2746 if (hw->blk[blk].masks.masks[i].mask == mask &&
2747 hw->blk[blk].masks.masks[i].idx == idx) {
2748 found_copy = true;
2749 copy_idx = i;
2750 break;
2751 }
2752 } else {
2753
2754
2755
2756 if (!found_unused) {
2757 found_unused = true;
2758 unused_idx = i;
2759 }
2760 }
2761
2762 if (found_copy)
2763 i = copy_idx;
2764 else if (found_unused)
2765 i = unused_idx;
2766 else
2767 goto err_ice_alloc_prof_mask;
2768
2769
2770 if (found_unused) {
2771 hw->blk[blk].masks.masks[i].in_use = true;
2772 hw->blk[blk].masks.masks[i].mask = mask;
2773 hw->blk[blk].masks.masks[i].idx = idx;
2774 hw->blk[blk].masks.masks[i].ref = 0;
2775 ice_write_prof_mask_reg(hw, blk, i, idx, mask);
2776 }
2777
2778 hw->blk[blk].masks.masks[i].ref++;
2779 *mask_idx = i;
2780 status = 0;
2781
2782err_ice_alloc_prof_mask:
2783 mutex_unlock(&hw->blk[blk].masks.lock);
2784
2785 return status;
2786}
2787
2788
2789
2790
2791
2792
2793
2794static enum ice_status
2795ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx)
2796{
2797 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
2798 return ICE_ERR_PARAM;
2799
2800 if (!(mask_idx >= hw->blk[blk].masks.first &&
2801 mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count))
2802 return ICE_ERR_DOES_NOT_EXIST;
2803
2804 mutex_lock(&hw->blk[blk].masks.lock);
2805
2806 if (!hw->blk[blk].masks.masks[mask_idx].in_use)
2807 goto exit_ice_free_prof_mask;
2808
2809 if (hw->blk[blk].masks.masks[mask_idx].ref > 1) {
2810 hw->blk[blk].masks.masks[mask_idx].ref--;
2811 goto exit_ice_free_prof_mask;
2812 }
2813
2814
2815 hw->blk[blk].masks.masks[mask_idx].in_use = false;
2816 hw->blk[blk].masks.masks[mask_idx].mask = 0;
2817 hw->blk[blk].masks.masks[mask_idx].idx = 0;
2818
2819
2820 ice_debug(hw, ICE_DBG_PKG, "Free mask, blk %d, mask %d\n", blk,
2821 mask_idx);
2822 ice_write_prof_mask_reg(hw, blk, mask_idx, 0, 0);
2823
2824exit_ice_free_prof_mask:
2825 mutex_unlock(&hw->blk[blk].masks.lock);
2826
2827 return 0;
2828}
2829
2830
2831
2832
2833
2834
2835
2836static enum ice_status
2837ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id)
2838{
2839 u32 mask_bm;
2840 u16 i;
2841
2842 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
2843 return ICE_ERR_PARAM;
2844
2845 mask_bm = hw->blk[blk].es.mask_ena[prof_id];
2846 for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++)
2847 if (mask_bm & BIT(i))
2848 ice_free_prof_mask(hw, blk, i);
2849
2850 return 0;
2851}
2852
2853
2854
2855
2856
2857
2858
2859
2860static void ice_shutdown_prof_masks(struct ice_hw *hw, enum ice_block blk)
2861{
2862 u16 i;
2863
2864 mutex_lock(&hw->blk[blk].masks.lock);
2865
2866 for (i = hw->blk[blk].masks.first;
2867 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) {
2868 ice_write_prof_mask_reg(hw, blk, i, 0, 0);
2869
2870 hw->blk[blk].masks.masks[i].in_use = false;
2871 hw->blk[blk].masks.masks[i].idx = 0;
2872 hw->blk[blk].masks.masks[i].mask = 0;
2873 }
2874
2875 mutex_unlock(&hw->blk[blk].masks.lock);
2876 mutex_destroy(&hw->blk[blk].masks.lock);
2877}
2878
2879
2880
2881
2882
2883
2884
2885static void ice_shutdown_all_prof_masks(struct ice_hw *hw)
2886{
2887 ice_shutdown_prof_masks(hw, ICE_BLK_RSS);
2888 ice_shutdown_prof_masks(hw, ICE_BLK_FD);
2889}
2890
2891
2892
2893
2894
2895
2896
2897
2898static enum ice_status
2899ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
2900 u16 *masks)
2901{
2902 bool err = false;
2903 u32 ena_mask = 0;
2904 u16 idx;
2905 u16 i;
2906
2907
2908 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
2909 return 0;
2910
2911 for (i = 0; i < hw->blk[blk].es.fvw; i++)
2912 if (masks[i] && masks[i] != 0xFFFF) {
2913 if (!ice_alloc_prof_mask(hw, blk, i, masks[i], &idx)) {
2914 ena_mask |= BIT(idx);
2915 } else {
2916
2917 err = true;
2918 break;
2919 }
2920 }
2921
2922 if (err) {
2923
2924 for (i = 0; i < BITS_PER_BYTE * sizeof(ena_mask); i++)
2925 if (ena_mask & BIT(i))
2926 ice_free_prof_mask(hw, blk, i);
2927
2928 return ICE_ERR_OUT_OF_RANGE;
2929 }
2930
2931
2932 ice_write_prof_mask_enable_res(hw, blk, prof_id, ena_mask);
2933
2934
2935 hw->blk[blk].es.mask_ena[prof_id] = ena_mask;
2936
2937 return 0;
2938}
2939
2940
2941
2942
2943
2944
2945
2946
2947static void
2948ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
2949 struct ice_fv_word *fv)
2950{
2951 u16 off;
2952
2953 off = prof_id * hw->blk[blk].es.fvw;
2954 if (!fv) {
2955 memset(&hw->blk[blk].es.t[off], 0,
2956 hw->blk[blk].es.fvw * sizeof(*fv));
2957 hw->blk[blk].es.written[prof_id] = false;
2958 } else {
2959 memcpy(&hw->blk[blk].es.t[off], fv,
2960 hw->blk[blk].es.fvw * sizeof(*fv));
2961 }
2962}
2963
2964
2965
2966
2967
2968
2969
2970static enum ice_status
2971ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
2972{
2973 if (prof_id > hw->blk[blk].es.count)
2974 return ICE_ERR_PARAM;
2975
2976 if (hw->blk[blk].es.ref_count[prof_id] > 0) {
2977 if (!--hw->blk[blk].es.ref_count[prof_id]) {
2978 ice_write_es(hw, blk, prof_id, NULL);
2979 ice_free_prof_masks(hw, blk, prof_id);
2980 return ice_free_prof_id(hw, blk, prof_id);
2981 }
2982 }
2983
2984 return 0;
2985}
2986
2987
2988static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
2989
2990 { ICE_SID_XLT1_SW,
2991 ICE_SID_XLT2_SW,
2992 ICE_SID_PROFID_TCAM_SW,
2993 ICE_SID_PROFID_REDIR_SW,
2994 ICE_SID_FLD_VEC_SW
2995 },
2996
2997
2998 { ICE_SID_XLT1_ACL,
2999 ICE_SID_XLT2_ACL,
3000 ICE_SID_PROFID_TCAM_ACL,
3001 ICE_SID_PROFID_REDIR_ACL,
3002 ICE_SID_FLD_VEC_ACL
3003 },
3004
3005
3006 { ICE_SID_XLT1_FD,
3007 ICE_SID_XLT2_FD,
3008 ICE_SID_PROFID_TCAM_FD,
3009 ICE_SID_PROFID_REDIR_FD,
3010 ICE_SID_FLD_VEC_FD
3011 },
3012
3013
3014 { ICE_SID_XLT1_RSS,
3015 ICE_SID_XLT2_RSS,
3016 ICE_SID_PROFID_TCAM_RSS,
3017 ICE_SID_PROFID_REDIR_RSS,
3018 ICE_SID_FLD_VEC_RSS
3019 },
3020
3021
3022 { ICE_SID_XLT1_PE,
3023 ICE_SID_XLT2_PE,
3024 ICE_SID_PROFID_TCAM_PE,
3025 ICE_SID_PROFID_REDIR_PE,
3026 ICE_SID_FLD_VEC_PE
3027 }
3028};
3029
3030
3031
3032
3033
3034
3035static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
3036{
3037 u16 pt;
3038
3039 for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
3040 u8 ptg;
3041
3042 ptg = hw->blk[blk].xlt1.t[pt];
3043 if (ptg != ICE_DEFAULT_PTG) {
3044 ice_ptg_alloc_val(hw, blk, ptg);
3045 ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
3046 }
3047 }
3048}
3049
3050
3051
3052
3053
3054
3055static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
3056{
3057 u16 vsi;
3058
3059 for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
3060 u16 vsig;
3061
3062 vsig = hw->blk[blk].xlt2.t[vsi];
3063 if (vsig) {
3064 ice_vsig_alloc_val(hw, blk, vsig);
3065 ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
3066
3067
3068
3069 hw->blk[blk].xlt2.vsis[vsi].changed = 0;
3070 }
3071 }
3072}
3073
3074
3075
3076
3077
3078static void ice_init_sw_db(struct ice_hw *hw)
3079{
3080 u16 i;
3081
3082 for (i = 0; i < ICE_BLK_COUNT; i++) {
3083 ice_init_sw_xlt1_db(hw, (enum ice_block)i);
3084 ice_init_sw_xlt2_db(hw, (enum ice_block)i);
3085 }
3086}
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
3101{
3102 u32 dst_len, sect_len, offset = 0;
3103 struct ice_prof_redir_section *pr;
3104 struct ice_prof_id_section *pid;
3105 struct ice_xlt1_section *xlt1;
3106 struct ice_xlt2_section *xlt2;
3107 struct ice_sw_fv_section *es;
3108 struct ice_pkg_enum state;
3109 u8 *src, *dst;
3110 void *sect;
3111
3112
3113
3114
3115
3116 if (!hw->seg) {
3117 ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n");
3118 return;
3119 }
3120
3121 memset(&state, 0, sizeof(state));
3122
3123 sect = ice_pkg_enum_section(hw->seg, &state, sid);
3124
3125 while (sect) {
3126 switch (sid) {
3127 case ICE_SID_XLT1_SW:
3128 case ICE_SID_XLT1_FD:
3129 case ICE_SID_XLT1_RSS:
3130 case ICE_SID_XLT1_ACL:
3131 case ICE_SID_XLT1_PE:
3132 xlt1 = sect;
3133 src = xlt1->value;
3134 sect_len = le16_to_cpu(xlt1->count) *
3135 sizeof(*hw->blk[block_id].xlt1.t);
3136 dst = hw->blk[block_id].xlt1.t;
3137 dst_len = hw->blk[block_id].xlt1.count *
3138 sizeof(*hw->blk[block_id].xlt1.t);
3139 break;
3140 case ICE_SID_XLT2_SW:
3141 case ICE_SID_XLT2_FD:
3142 case ICE_SID_XLT2_RSS:
3143 case ICE_SID_XLT2_ACL:
3144 case ICE_SID_XLT2_PE:
3145 xlt2 = sect;
3146 src = (__force u8 *)xlt2->value;
3147 sect_len = le16_to_cpu(xlt2->count) *
3148 sizeof(*hw->blk[block_id].xlt2.t);
3149 dst = (u8 *)hw->blk[block_id].xlt2.t;
3150 dst_len = hw->blk[block_id].xlt2.count *
3151 sizeof(*hw->blk[block_id].xlt2.t);
3152 break;
3153 case ICE_SID_PROFID_TCAM_SW:
3154 case ICE_SID_PROFID_TCAM_FD:
3155 case ICE_SID_PROFID_TCAM_RSS:
3156 case ICE_SID_PROFID_TCAM_ACL:
3157 case ICE_SID_PROFID_TCAM_PE:
3158 pid = sect;
3159 src = (u8 *)pid->entry;
3160 sect_len = le16_to_cpu(pid->count) *
3161 sizeof(*hw->blk[block_id].prof.t);
3162 dst = (u8 *)hw->blk[block_id].prof.t;
3163 dst_len = hw->blk[block_id].prof.count *
3164 sizeof(*hw->blk[block_id].prof.t);
3165 break;
3166 case ICE_SID_PROFID_REDIR_SW:
3167 case ICE_SID_PROFID_REDIR_FD:
3168 case ICE_SID_PROFID_REDIR_RSS:
3169 case ICE_SID_PROFID_REDIR_ACL:
3170 case ICE_SID_PROFID_REDIR_PE:
3171 pr = sect;
3172 src = pr->redir_value;
3173 sect_len = le16_to_cpu(pr->count) *
3174 sizeof(*hw->blk[block_id].prof_redir.t);
3175 dst = hw->blk[block_id].prof_redir.t;
3176 dst_len = hw->blk[block_id].prof_redir.count *
3177 sizeof(*hw->blk[block_id].prof_redir.t);
3178 break;
3179 case ICE_SID_FLD_VEC_SW:
3180 case ICE_SID_FLD_VEC_FD:
3181 case ICE_SID_FLD_VEC_RSS:
3182 case ICE_SID_FLD_VEC_ACL:
3183 case ICE_SID_FLD_VEC_PE:
3184 es = sect;
3185 src = (u8 *)es->fv;
3186 sect_len = (u32)(le16_to_cpu(es->count) *
3187 hw->blk[block_id].es.fvw) *
3188 sizeof(*hw->blk[block_id].es.t);
3189 dst = (u8 *)hw->blk[block_id].es.t;
3190 dst_len = (u32)(hw->blk[block_id].es.count *
3191 hw->blk[block_id].es.fvw) *
3192 sizeof(*hw->blk[block_id].es.t);
3193 break;
3194 default:
3195 return;
3196 }
3197
3198
3199
3200
3201 if (offset > dst_len)
3202 return;
3203
3204
3205
3206
3207
3208
3209 if ((offset + sect_len) > dst_len)
3210 sect_len = dst_len - offset;
3211
3212 memcpy(dst + offset, src, sect_len);
3213 offset += sect_len;
3214 sect = ice_pkg_enum_section(NULL, &state, sid);
3215 }
3216}
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226void ice_fill_blk_tbls(struct ice_hw *hw)
3227{
3228 u8 i;
3229
3230 for (i = 0; i < ICE_BLK_COUNT; i++) {
3231 enum ice_block blk_id = (enum ice_block)i;
3232
3233 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid);
3234 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid);
3235 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid);
3236 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid);
3237 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid);
3238 }
3239
3240 ice_init_sw_db(hw);
3241}
3242
3243
3244
3245
3246
3247
3248static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx)
3249{
3250 struct ice_es *es = &hw->blk[blk_idx].es;
3251 struct ice_prof_map *del, *tmp;
3252
3253 mutex_lock(&es->prof_map_lock);
3254 list_for_each_entry_safe(del, tmp, &es->prof_map, list) {
3255 list_del(&del->list);
3256 devm_kfree(ice_hw_to_dev(hw), del);
3257 }
3258 INIT_LIST_HEAD(&es->prof_map);
3259 mutex_unlock(&es->prof_map_lock);
3260}
3261
3262
3263
3264
3265
3266
3267static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
3268{
3269 struct ice_flow_prof *p, *tmp;
3270
3271 mutex_lock(&hw->fl_profs_locks[blk_idx]);
3272 list_for_each_entry_safe(p, tmp, &hw->fl_profs[blk_idx], l_entry) {
3273 struct ice_flow_entry *e, *t;
3274
3275 list_for_each_entry_safe(e, t, &p->entries, l_entry)
3276 ice_flow_rem_entry(hw, (enum ice_block)blk_idx,
3277 ICE_FLOW_ENTRY_HNDL(e));
3278
3279 list_del(&p->l_entry);
3280
3281 mutex_destroy(&p->entries_lock);
3282 devm_kfree(ice_hw_to_dev(hw), p);
3283 }
3284 mutex_unlock(&hw->fl_profs_locks[blk_idx]);
3285
3286
3287
3288
3289 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
3290}
3291
3292
3293
3294
3295
3296
3297static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk)
3298{
3299 u16 i;
3300
3301 if (!hw->blk[blk].xlt2.vsig_tbl)
3302 return;
3303
3304 for (i = 1; i < ICE_MAX_VSIGS; i++)
3305 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use)
3306 ice_vsig_free(hw, blk, i);
3307}
3308
3309
3310
3311
3312
3313void ice_free_hw_tbls(struct ice_hw *hw)
3314{
3315 struct ice_rss_cfg *r, *rt;
3316 u8 i;
3317
3318 for (i = 0; i < ICE_BLK_COUNT; i++) {
3319 if (hw->blk[i].is_list_init) {
3320 struct ice_es *es = &hw->blk[i].es;
3321
3322 ice_free_prof_map(hw, i);
3323 mutex_destroy(&es->prof_map_lock);
3324
3325 ice_free_flow_profs(hw, i);
3326 mutex_destroy(&hw->fl_profs_locks[i]);
3327
3328 hw->blk[i].is_list_init = false;
3329 }
3330 ice_free_vsig_tbl(hw, (enum ice_block)i);
3331 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptypes);
3332 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptg_tbl);
3333 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.t);
3334 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.t);
3335 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsig_tbl);
3336 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsis);
3337 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof.t);
3338 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t);
3339 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t);
3340 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count);
3341 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written);
3342 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.mask_ena);
3343 }
3344
3345 list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) {
3346 list_del(&r->l_entry);
3347 devm_kfree(ice_hw_to_dev(hw), r);
3348 }
3349 mutex_destroy(&hw->rss_locks);
3350 ice_shutdown_all_prof_masks(hw);
3351 memset(hw->blk, 0, sizeof(hw->blk));
3352}
3353
3354
3355
3356
3357
3358
3359static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
3360{
3361 mutex_init(&hw->fl_profs_locks[blk_idx]);
3362 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
3363}
3364
3365
3366
3367
3368
3369void ice_clear_hw_tbls(struct ice_hw *hw)
3370{
3371 u8 i;
3372
3373 for (i = 0; i < ICE_BLK_COUNT; i++) {
3374 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
3375 struct ice_prof_tcam *prof = &hw->blk[i].prof;
3376 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
3377 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
3378 struct ice_es *es = &hw->blk[i].es;
3379
3380 if (hw->blk[i].is_list_init) {
3381 ice_free_prof_map(hw, i);
3382 ice_free_flow_profs(hw, i);
3383 }
3384
3385 ice_free_vsig_tbl(hw, (enum ice_block)i);
3386
3387 memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes));
3388 memset(xlt1->ptg_tbl, 0,
3389 ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl));
3390 memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t));
3391
3392 memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis));
3393 memset(xlt2->vsig_tbl, 0,
3394 xlt2->count * sizeof(*xlt2->vsig_tbl));
3395 memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t));
3396
3397 memset(prof->t, 0, prof->count * sizeof(*prof->t));
3398 memset(prof_redir->t, 0,
3399 prof_redir->count * sizeof(*prof_redir->t));
3400
3401 memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw);
3402 memset(es->ref_count, 0, es->count * sizeof(*es->ref_count));
3403 memset(es->written, 0, es->count * sizeof(*es->written));
3404 memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena));
3405 }
3406}
3407
3408
3409
3410
3411
3412enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
3413{
3414 u8 i;
3415
3416 mutex_init(&hw->rss_locks);
3417 INIT_LIST_HEAD(&hw->rss_list_head);
3418 ice_init_all_prof_masks(hw);
3419 for (i = 0; i < ICE_BLK_COUNT; i++) {
3420 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
3421 struct ice_prof_tcam *prof = &hw->blk[i].prof;
3422 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
3423 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
3424 struct ice_es *es = &hw->blk[i].es;
3425 u16 j;
3426
3427 if (hw->blk[i].is_list_init)
3428 continue;
3429
3430 ice_init_flow_profs(hw, i);
3431 mutex_init(&es->prof_map_lock);
3432 INIT_LIST_HEAD(&es->prof_map);
3433 hw->blk[i].is_list_init = true;
3434
3435 hw->blk[i].overwrite = blk_sizes[i].overwrite;
3436 es->reverse = blk_sizes[i].reverse;
3437
3438 xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
3439 xlt1->count = blk_sizes[i].xlt1;
3440
3441 xlt1->ptypes = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
3442 sizeof(*xlt1->ptypes), GFP_KERNEL);
3443
3444 if (!xlt1->ptypes)
3445 goto err;
3446
3447 xlt1->ptg_tbl = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_PTGS,
3448 sizeof(*xlt1->ptg_tbl),
3449 GFP_KERNEL);
3450
3451 if (!xlt1->ptg_tbl)
3452 goto err;
3453
3454 xlt1->t = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
3455 sizeof(*xlt1->t), GFP_KERNEL);
3456 if (!xlt1->t)
3457 goto err;
3458
3459 xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
3460 xlt2->count = blk_sizes[i].xlt2;
3461
3462 xlt2->vsis = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
3463 sizeof(*xlt2->vsis), GFP_KERNEL);
3464
3465 if (!xlt2->vsis)
3466 goto err;
3467
3468 xlt2->vsig_tbl = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
3469 sizeof(*xlt2->vsig_tbl),
3470 GFP_KERNEL);
3471 if (!xlt2->vsig_tbl)
3472 goto err;
3473
3474 for (j = 0; j < xlt2->count; j++)
3475 INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
3476
3477 xlt2->t = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
3478 sizeof(*xlt2->t), GFP_KERNEL);
3479 if (!xlt2->t)
3480 goto err;
3481
3482 prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
3483 prof->count = blk_sizes[i].prof_tcam;
3484 prof->max_prof_id = blk_sizes[i].prof_id;
3485 prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
3486 prof->t = devm_kcalloc(ice_hw_to_dev(hw), prof->count,
3487 sizeof(*prof->t), GFP_KERNEL);
3488
3489 if (!prof->t)
3490 goto err;
3491
3492 prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
3493 prof_redir->count = blk_sizes[i].prof_redir;
3494 prof_redir->t = devm_kcalloc(ice_hw_to_dev(hw),
3495 prof_redir->count,
3496 sizeof(*prof_redir->t),
3497 GFP_KERNEL);
3498
3499 if (!prof_redir->t)
3500 goto err;
3501
3502 es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
3503 es->count = blk_sizes[i].es;
3504 es->fvw = blk_sizes[i].fvw;
3505 es->t = devm_kcalloc(ice_hw_to_dev(hw),
3506 (u32)(es->count * es->fvw),
3507 sizeof(*es->t), GFP_KERNEL);
3508 if (!es->t)
3509 goto err;
3510
3511 es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count,
3512 sizeof(*es->ref_count),
3513 GFP_KERNEL);
3514 if (!es->ref_count)
3515 goto err;
3516
3517 es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count,
3518 sizeof(*es->written), GFP_KERNEL);
3519 if (!es->written)
3520 goto err;
3521
3522 es->mask_ena = devm_kcalloc(ice_hw_to_dev(hw), es->count,
3523 sizeof(*es->mask_ena), GFP_KERNEL);
3524 if (!es->mask_ena)
3525 goto err;
3526 }
3527 return 0;
3528
3529err:
3530 ice_free_hw_tbls(hw);
3531 return ICE_ERR_NO_MEMORY;
3532}
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547static enum ice_status
3548ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
3549 u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
3550 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
3551 u8 key[ICE_TCAM_KEY_SZ])
3552{
3553 struct ice_prof_id_key inkey;
3554
3555 inkey.xlt1 = ptg;
3556 inkey.xlt2_cdid = cpu_to_le16(vsig);
3557 inkey.flags = cpu_to_le16(flags);
3558
3559 switch (hw->blk[blk].prof.cdid_bits) {
3560 case 0:
3561 break;
3562 case 2:
3563#define ICE_CD_2_M 0xC000U
3564#define ICE_CD_2_S 14
3565 inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_2_M);
3566 inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_2_S);
3567 break;
3568 case 4:
3569#define ICE_CD_4_M 0xF000U
3570#define ICE_CD_4_S 12
3571 inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_4_M);
3572 inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_4_S);
3573 break;
3574 case 8:
3575#define ICE_CD_8_M 0xFF00U
3576#define ICE_CD_8_S 16
3577 inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_8_M);
3578 inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_8_S);
3579 break;
3580 default:
3581 ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n");
3582 break;
3583 }
3584
3585 return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk,
3586 nm_msk, 0, ICE_TCAM_KEY_SZ / 2);
3587}
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603static enum ice_status
3604ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
3605 u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
3606 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
3607 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],
3608 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
3609{
3610 struct ice_prof_tcam_entry;
3611 enum ice_status status;
3612
3613 status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
3614 dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
3615 if (!status) {
3616 hw->blk[blk].prof.t[idx].addr = cpu_to_le16(idx);
3617 hw->blk[blk].prof.t[idx].prof_id = prof_id;
3618 }
3619
3620 return status;
3621}
3622
3623
3624
3625
3626
3627
3628
3629
3630static enum ice_status
3631ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
3632{
3633 u16 idx = vsig & ICE_VSIG_IDX_M;
3634 struct ice_vsig_vsi *ptr;
3635
3636 *refs = 0;
3637
3638 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
3639 return ICE_ERR_DOES_NOT_EXIST;
3640
3641 ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
3642 while (ptr) {
3643 (*refs)++;
3644 ptr = ptr->next_vsi;
3645 }
3646
3647 return 0;
3648}
3649
3650
3651
3652
3653
3654
3655
3656
3657static bool
3658ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
3659{
3660 u16 idx = vsig & ICE_VSIG_IDX_M;
3661 struct ice_vsig_prof *ent;
3662
3663 list_for_each_entry(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3664 list)
3665 if (ent->profile_cookie == hdl)
3666 return true;
3667
3668 ice_debug(hw, ICE_DBG_INIT, "Characteristic list for VSI group %d not found.\n",
3669 vsig);
3670 return false;
3671}
3672
3673
3674
3675
3676
3677
3678
3679
3680static enum ice_status
3681ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
3682 struct ice_buf_build *bld, struct list_head *chgs)
3683{
3684 u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
3685 struct ice_chs_chg *tmp;
3686
3687 list_for_each_entry(tmp, chgs, list_entry)
3688 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) {
3689 u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
3690 struct ice_pkg_es *p;
3691 u32 id;
3692
3693 id = ice_sect_id(blk, ICE_VEC_TBL);
3694 p = ice_pkg_buf_alloc_section(bld, id,
3695 struct_size(p, es, 1) +
3696 vec_size -
3697 sizeof(p->es[0]));
3698
3699 if (!p)
3700 return ICE_ERR_MAX_LIMIT;
3701
3702 p->count = cpu_to_le16(1);
3703 p->offset = cpu_to_le16(tmp->prof_id);
3704
3705 memcpy(p->es, &hw->blk[blk].es.t[off], vec_size);
3706 }
3707
3708 return 0;
3709}
3710
3711
3712
3713
3714
3715
3716
3717
3718static enum ice_status
3719ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
3720 struct ice_buf_build *bld, struct list_head *chgs)
3721{
3722 struct ice_chs_chg *tmp;
3723
3724 list_for_each_entry(tmp, chgs, list_entry)
3725 if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) {
3726 struct ice_prof_id_section *p;
3727 u32 id;
3728
3729 id = ice_sect_id(blk, ICE_PROF_TCAM);
3730 p = ice_pkg_buf_alloc_section(bld, id,
3731 struct_size(p, entry, 1));
3732
3733 if (!p)
3734 return ICE_ERR_MAX_LIMIT;
3735
3736 p->count = cpu_to_le16(1);
3737 p->entry[0].addr = cpu_to_le16(tmp->tcam_idx);
3738 p->entry[0].prof_id = tmp->prof_id;
3739
3740 memcpy(p->entry[0].key,
3741 &hw->blk[blk].prof.t[tmp->tcam_idx].key,
3742 sizeof(hw->blk[blk].prof.t->key));
3743 }
3744
3745 return 0;
3746}
3747
3748
3749
3750
3751
3752
3753
3754static enum ice_status
3755ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
3756 struct list_head *chgs)
3757{
3758 struct ice_chs_chg *tmp;
3759
3760 list_for_each_entry(tmp, chgs, list_entry)
3761 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) {
3762 struct ice_xlt1_section *p;
3763 u32 id;
3764
3765 id = ice_sect_id(blk, ICE_XLT1);
3766 p = ice_pkg_buf_alloc_section(bld, id,
3767 struct_size(p, value, 1));
3768
3769 if (!p)
3770 return ICE_ERR_MAX_LIMIT;
3771
3772 p->count = cpu_to_le16(1);
3773 p->offset = cpu_to_le16(tmp->ptype);
3774 p->value[0] = tmp->ptg;
3775 }
3776
3777 return 0;
3778}
3779
3780
3781
3782
3783
3784
3785
3786static enum ice_status
3787ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
3788 struct list_head *chgs)
3789{
3790 struct ice_chs_chg *tmp;
3791
3792 list_for_each_entry(tmp, chgs, list_entry) {
3793 struct ice_xlt2_section *p;
3794 u32 id;
3795
3796 switch (tmp->type) {
3797 case ICE_VSIG_ADD:
3798 case ICE_VSI_MOVE:
3799 case ICE_VSIG_REM:
3800 id = ice_sect_id(blk, ICE_XLT2);
3801 p = ice_pkg_buf_alloc_section(bld, id,
3802 struct_size(p, value, 1));
3803
3804 if (!p)
3805 return ICE_ERR_MAX_LIMIT;
3806
3807 p->count = cpu_to_le16(1);
3808 p->offset = cpu_to_le16(tmp->vsi);
3809 p->value[0] = cpu_to_le16(tmp->vsig);
3810 break;
3811 default:
3812 break;
3813 }
3814 }
3815
3816 return 0;
3817}
3818
3819
3820
3821
3822
3823
3824
3825static enum ice_status
3826ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
3827 struct list_head *chgs)
3828{
3829 struct ice_buf_build *b;
3830 struct ice_chs_chg *tmp;
3831 enum ice_status status;
3832 u16 pkg_sects;
3833 u16 xlt1 = 0;
3834 u16 xlt2 = 0;
3835 u16 tcam = 0;
3836 u16 es = 0;
3837 u16 sects;
3838
3839
3840 list_for_each_entry(tmp, chgs, list_entry) {
3841 switch (tmp->type) {
3842 case ICE_PTG_ES_ADD:
3843 if (tmp->add_ptg)
3844 xlt1++;
3845 if (tmp->add_prof)
3846 es++;
3847 break;
3848 case ICE_TCAM_ADD:
3849 tcam++;
3850 break;
3851 case ICE_VSIG_ADD:
3852 case ICE_VSI_MOVE:
3853 case ICE_VSIG_REM:
3854 xlt2++;
3855 break;
3856 default:
3857 break;
3858 }
3859 }
3860 sects = xlt1 + xlt2 + tcam + es;
3861
3862 if (!sects)
3863 return 0;
3864
3865
3866 b = ice_pkg_buf_alloc(hw);
3867 if (!b)
3868 return ICE_ERR_NO_MEMORY;
3869
3870 status = ice_pkg_buf_reserve_section(b, sects);
3871 if (status)
3872 goto error_tmp;
3873
3874
3875 if (es) {
3876 status = ice_prof_bld_es(hw, blk, b, chgs);
3877 if (status)
3878 goto error_tmp;
3879 }
3880
3881 if (tcam) {
3882 status = ice_prof_bld_tcam(hw, blk, b, chgs);
3883 if (status)
3884 goto error_tmp;
3885 }
3886
3887 if (xlt1) {
3888 status = ice_prof_bld_xlt1(blk, b, chgs);
3889 if (status)
3890 goto error_tmp;
3891 }
3892
3893 if (xlt2) {
3894 status = ice_prof_bld_xlt2(blk, b, chgs);
3895 if (status)
3896 goto error_tmp;
3897 }
3898
3899
3900
3901
3902
3903 pkg_sects = ice_pkg_buf_get_active_sections(b);
3904 if (!pkg_sects || pkg_sects != sects) {
3905 status = ICE_ERR_INVAL_SIZE;
3906 goto error_tmp;
3907 }
3908
3909
3910 status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
3911 if (status == ICE_ERR_AQ_ERROR)
3912 ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n");
3913
3914error_tmp:
3915 ice_pkg_buf_free(hw, b);
3916 return status;
3917}
3918
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928static void ice_update_fd_mask(struct ice_hw *hw, u16 prof_id, u32 mask_sel)
3929{
3930 wr32(hw, GLQF_FDMASK_SEL(prof_id), mask_sel);
3931
3932 ice_debug(hw, ICE_DBG_INIT, "fd mask(%d): %x = %x\n", prof_id,
3933 GLQF_FDMASK_SEL(prof_id), mask_sel);
3934}
3935
3936struct ice_fd_src_dst_pair {
3937 u8 prot_id;
3938 u8 count;
3939 u16 off;
3940};
3941
3942static const struct ice_fd_src_dst_pair ice_fd_pairs[] = {
3943
3944 { ICE_PROT_IPV4_OF_OR_S, 2, 12 },
3945 { ICE_PROT_IPV4_OF_OR_S, 2, 16 },
3946
3947 { ICE_PROT_IPV4_IL, 2, 12 },
3948 { ICE_PROT_IPV4_IL, 2, 16 },
3949
3950 { ICE_PROT_IPV6_OF_OR_S, 8, 8 },
3951 { ICE_PROT_IPV6_OF_OR_S, 8, 24 },
3952
3953 { ICE_PROT_IPV6_IL, 8, 8 },
3954 { ICE_PROT_IPV6_IL, 8, 24 },
3955
3956 { ICE_PROT_TCP_IL, 1, 0 },
3957 { ICE_PROT_TCP_IL, 1, 2 },
3958
3959 { ICE_PROT_UDP_OF, 1, 0 },
3960 { ICE_PROT_UDP_OF, 1, 2 },
3961
3962 { ICE_PROT_UDP_IL_OR_S, 1, 0 },
3963 { ICE_PROT_UDP_IL_OR_S, 1, 2 },
3964
3965 { ICE_PROT_SCTP_IL, 1, 0 },
3966 { ICE_PROT_SCTP_IL, 1, 2 }
3967};
3968
3969#define ICE_FD_SRC_DST_PAIR_COUNT ARRAY_SIZE(ice_fd_pairs)
3970
3971
3972
3973
3974
3975
3976
3977static enum ice_status
3978ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
3979{
3980 DECLARE_BITMAP(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
3981 u8 pair_start[ICE_FD_SRC_DST_PAIR_COUNT] = { 0 };
3982#define ICE_FD_FV_NOT_FOUND (-2)
3983 s8 first_free = ICE_FD_FV_NOT_FOUND;
3984 u8 used[ICE_MAX_FV_WORDS] = { 0 };
3985 s8 orig_free, si;
3986 u32 mask_sel = 0;
3987 u8 i, j, k;
3988
3989 bitmap_zero(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
3990
3991
3992
3993
3994
3995
3996
3997
3998 for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) {
3999
4000
4001
4002 if (first_free == ICE_FD_FV_NOT_FOUND && es[i].prot_id !=
4003 ICE_PROT_INVALID)
4004 first_free = i - 1;
4005
4006 for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
4007 if (es[i].prot_id == ice_fd_pairs[j].prot_id &&
4008 es[i].off == ice_fd_pairs[j].off) {
4009 set_bit(j, pair_list);
4010 pair_start[j] = i;
4011 }
4012 }
4013
4014 orig_free = first_free;
4015
4016
4017 for (i = 0; i < ICE_FD_SRC_DST_PAIR_COUNT; i += 2) {
4018 u8 bit1 = test_bit(i + 1, pair_list);
4019 u8 bit0 = test_bit(i, pair_list);
4020
4021 if (bit0 ^ bit1) {
4022 u8 index;
4023
4024
4025 if (!bit0)
4026 index = i;
4027 else
4028 index = i + 1;
4029
4030
4031 if (first_free + 1 < (s8)ice_fd_pairs[index].count)
4032 return ICE_ERR_MAX_LIMIT;
4033
4034
4035 for (k = 0; k < ice_fd_pairs[index].count; k++) {
4036 es[first_free - k].prot_id =
4037 ice_fd_pairs[index].prot_id;
4038 es[first_free - k].off =
4039 ice_fd_pairs[index].off + (k * 2);
4040
4041 if (k > first_free)
4042 return ICE_ERR_OUT_OF_RANGE;
4043
4044
4045 mask_sel |= BIT(first_free - k);
4046 }
4047
4048 pair_start[index] = first_free;
4049 first_free -= ice_fd_pairs[index].count;
4050 }
4051 }
4052
4053
4054 si = hw->blk[ICE_BLK_FD].es.fvw - 1;
4055 while (si >= 0) {
4056 u8 indexes_used = 1;
4057
4058
4059#define ICE_SWAP_VALID 0x80
4060 used[si] = si | ICE_SWAP_VALID;
4061
4062 if (orig_free == ICE_FD_FV_NOT_FOUND || si <= orig_free) {
4063 si -= indexes_used;
4064 continue;
4065 }
4066
4067
4068 for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
4069 if (es[si].prot_id == ice_fd_pairs[j].prot_id &&
4070 es[si].off == ice_fd_pairs[j].off) {
4071 u8 idx;
4072
4073
4074 idx = j + ((j % 2) ? -1 : 1);
4075
4076 indexes_used = ice_fd_pairs[idx].count;
4077 for (k = 0; k < indexes_used; k++) {
4078 used[si - k] = (pair_start[idx] - k) |
4079 ICE_SWAP_VALID;
4080 }
4081
4082 break;
4083 }
4084
4085 si -= indexes_used;
4086 }
4087
4088
4089
4090
4091 for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) {
4092 u32 raw_swap = 0;
4093 u32 raw_in = 0;
4094
4095 for (k = 0; k < 4; k++) {
4096 u8 idx;
4097
4098 idx = (j * 4) + k;
4099 if (used[idx] && !(mask_sel & BIT(idx))) {
4100 raw_swap |= used[idx] << (k * BITS_PER_BYTE);
4101#define ICE_INSET_DFLT 0x9f
4102 raw_in |= ICE_INSET_DFLT << (k * BITS_PER_BYTE);
4103 }
4104 }
4105
4106
4107 wr32(hw, GLQF_FDSWAP(prof_id, j), raw_swap);
4108
4109 ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %08x\n",
4110 prof_id, j, GLQF_FDSWAP(prof_id, j), raw_swap);
4111
4112
4113 wr32(hw, GLQF_FDINSET(prof_id, j), raw_in);
4114
4115 ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): %x = %08x\n",
4116 prof_id, j, GLQF_FDINSET(prof_id, j), raw_in);
4117 }
4118
4119
4120 ice_update_fd_mask(hw, prof_id, 0);
4121
4122 return 0;
4123}
4124
4125
4126static const struct ice_ptype_attrib_info ice_ptype_attributes[] = {
4127 { ICE_GTP_PDU_EH, ICE_GTP_PDU_FLAG_MASK },
4128 { ICE_GTP_SESSION, ICE_GTP_FLAGS_MASK },
4129 { ICE_GTP_DOWNLINK, ICE_GTP_FLAGS_MASK },
4130 { ICE_GTP_UPLINK, ICE_GTP_FLAGS_MASK },
4131};
4132
4133
4134
4135
4136
4137
4138static void
4139ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type,
4140 struct ice_ptype_attrib_info *info)
4141{
4142 *info = ice_ptype_attributes[type];
4143}
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153static enum ice_status
4154ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
4155 const struct ice_ptype_attributes *attr, u16 attr_cnt)
4156{
4157 bool found = false;
4158 u16 i;
4159
4160 for (i = 0; i < attr_cnt; i++)
4161 if (attr[i].ptype == ptype) {
4162 found = true;
4163
4164 prof->ptg[prof->ptg_cnt] = ptg;
4165 ice_get_ptype_attrib_info(attr[i].attrib,
4166 &prof->attr[prof->ptg_cnt]);
4167
4168 if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
4169 return ICE_ERR_MAX_LIMIT;
4170 }
4171
4172 if (!found)
4173 return ICE_ERR_DOES_NOT_EXIST;
4174
4175 return 0;
4176}
4177
4178
4179
4180
4181
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191
4192
4193
4194enum ice_status
4195ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
4196 const struct ice_ptype_attributes *attr, u16 attr_cnt,
4197 struct ice_fv_word *es, u16 *masks)
4198{
4199 u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
4200 DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
4201 struct ice_prof_map *prof;
4202 enum ice_status status;
4203 u8 byte = 0;
4204 u8 prof_id;
4205
4206 bitmap_zero(ptgs_used, ICE_XLT1_CNT);
4207
4208 mutex_lock(&hw->blk[blk].es.prof_map_lock);
4209
4210
4211 status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id);
4212 if (status) {
4213
4214 status = ice_alloc_prof_id(hw, blk, &prof_id);
4215 if (status)
4216 goto err_ice_add_prof;
4217 if (blk == ICE_BLK_FD) {
4218
4219
4220
4221
4222
4223
4224
4225 status = ice_update_fd_swap(hw, prof_id, es);
4226 if (status)
4227 goto err_ice_add_prof;
4228 }
4229 status = ice_update_prof_masking(hw, blk, prof_id, masks);
4230 if (status)
4231 goto err_ice_add_prof;
4232
4233
4234 ice_write_es(hw, blk, prof_id, es);
4235 }
4236
4237 ice_prof_inc_ref(hw, blk, prof_id);
4238
4239
4240 prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL);
4241 if (!prof) {
4242 status = ICE_ERR_NO_MEMORY;
4243 goto err_ice_add_prof;
4244 }
4245
4246 prof->profile_cookie = id;
4247 prof->prof_id = prof_id;
4248 prof->ptg_cnt = 0;
4249 prof->context = 0;
4250
4251
4252 while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
4253 u8 bit;
4254
4255 if (!ptypes[byte]) {
4256 bytes--;
4257 byte++;
4258 continue;
4259 }
4260
4261
4262 for_each_set_bit(bit, (unsigned long *)&ptypes[byte],
4263 BITS_PER_BYTE) {
4264 u16 ptype;
4265 u8 ptg;
4266
4267 ptype = byte * BITS_PER_BYTE + bit;
4268
4269
4270
4271
4272 if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
4273 continue;
4274
4275
4276 if (test_bit(ptg, ptgs_used))
4277 continue;
4278
4279 set_bit(ptg, ptgs_used);
4280
4281
4282
4283 status = ice_add_prof_attrib(prof, ptg, ptype,
4284 attr, attr_cnt);
4285 if (status == ICE_ERR_MAX_LIMIT)
4286 break;
4287 if (status) {
4288
4289
4290
4291 prof->ptg[prof->ptg_cnt] = ptg;
4292 prof->attr[prof->ptg_cnt].flags = 0;
4293 prof->attr[prof->ptg_cnt].mask = 0;
4294
4295 if (++prof->ptg_cnt >=
4296 ICE_MAX_PTG_PER_PROFILE)
4297 break;
4298 }
4299 }
4300
4301 bytes--;
4302 byte++;
4303 }
4304
4305 list_add(&prof->list, &hw->blk[blk].es.prof_map);
4306 status = 0;
4307
4308err_ice_add_prof:
4309 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4310 return status;
4311}
4312
4313
4314
4315
4316
4317
4318
4319
4320
4321
4322static struct ice_prof_map *
4323ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
4324{
4325 struct ice_prof_map *entry = NULL;
4326 struct ice_prof_map *map;
4327
4328 list_for_each_entry(map, &hw->blk[blk].es.prof_map, list)
4329 if (map->profile_cookie == id) {
4330 entry = map;
4331 break;
4332 }
4333
4334 return entry;
4335}
4336
4337
4338
4339
4340
4341
4342
4343static u16
4344ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
4345{
4346 u16 idx = vsig & ICE_VSIG_IDX_M, count = 0;
4347 struct ice_vsig_prof *p;
4348
4349 list_for_each_entry(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4350 list)
4351 count++;
4352
4353 return count;
4354}
4355
4356
4357
4358
4359
4360
4361
4362static enum ice_status
4363ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
4364{
4365
4366 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4367 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
4368 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
4369 enum ice_status status;
4370
4371
4372 status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
4373 dc_msk, nm_msk);
4374 if (status)
4375 return status;
4376
4377
4378 status = ice_free_tcam_ent(hw, blk, idx);
4379
4380 return status;
4381}
4382
4383
4384
4385
4386
4387
4388
4389static enum ice_status
4390ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
4391 struct ice_vsig_prof *prof)
4392{
4393 enum ice_status status;
4394 u16 i;
4395
4396 for (i = 0; i < prof->tcam_count; i++)
4397 if (prof->tcam[i].in_use) {
4398 prof->tcam[i].in_use = false;
4399 status = ice_rel_tcam_idx(hw, blk,
4400 prof->tcam[i].tcam_idx);
4401 if (status)
4402 return ICE_ERR_HW_TABLE;
4403 }
4404
4405 return 0;
4406}
4407
4408
4409
4410
4411
4412
4413
4414
4415static enum ice_status
4416ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
4417 struct list_head *chg)
4418{
4419 u16 idx = vsig & ICE_VSIG_IDX_M;
4420 struct ice_vsig_vsi *vsi_cur;
4421 struct ice_vsig_prof *d, *t;
4422 enum ice_status status;
4423
4424
4425 list_for_each_entry_safe(d, t,
4426 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4427 list) {
4428 status = ice_rem_prof_id(hw, blk, d);
4429 if (status)
4430 return status;
4431
4432 list_del(&d->list);
4433 devm_kfree(ice_hw_to_dev(hw), d);
4434 }
4435
4436
4437 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
4438
4439
4440
4441 if (vsi_cur)
4442 do {
4443 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
4444 struct ice_chs_chg *p;
4445
4446 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
4447 GFP_KERNEL);
4448 if (!p)
4449 return ICE_ERR_NO_MEMORY;
4450
4451 p->type = ICE_VSIG_REM;
4452 p->orig_vsig = vsig;
4453 p->vsig = ICE_DEFAULT_VSIG;
4454 p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
4455
4456 list_add(&p->list_entry, chg);
4457
4458 vsi_cur = tmp;
4459 } while (vsi_cur);
4460
4461 return ice_vsig_free(hw, blk, vsig);
4462}
4463
4464
4465
4466
4467
4468
4469
4470
4471
4472static enum ice_status
4473ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
4474 struct list_head *chg)
4475{
4476 u16 idx = vsig & ICE_VSIG_IDX_M;
4477 struct ice_vsig_prof *p, *t;
4478 enum ice_status status;
4479
4480 list_for_each_entry_safe(p, t,
4481 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4482 list)
4483 if (p->profile_cookie == hdl) {
4484 if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
4485
4486 return ice_rem_vsig(hw, blk, vsig, chg);
4487
4488 status = ice_rem_prof_id(hw, blk, p);
4489 if (!status) {
4490 list_del(&p->list);
4491 devm_kfree(ice_hw_to_dev(hw), p);
4492 }
4493 return status;
4494 }
4495
4496 return ICE_ERR_DOES_NOT_EXIST;
4497}
4498
4499
4500
4501
4502
4503
4504
4505static enum ice_status
4506ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
4507{
4508 struct ice_chs_chg *del, *tmp;
4509 enum ice_status status;
4510 struct list_head chg;
4511 u16 i;
4512
4513 INIT_LIST_HEAD(&chg);
4514
4515 for (i = 1; i < ICE_MAX_VSIGS; i++)
4516 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
4517 if (ice_has_prof_vsig(hw, blk, i, id)) {
4518 status = ice_rem_prof_id_vsig(hw, blk, i, id,
4519 &chg);
4520 if (status)
4521 goto err_ice_rem_flow_all;
4522 }
4523 }
4524
4525 status = ice_upd_prof_hw(hw, blk, &chg);
4526
4527err_ice_rem_flow_all:
4528 list_for_each_entry_safe(del, tmp, &chg, list_entry) {
4529 list_del(&del->list_entry);
4530 devm_kfree(ice_hw_to_dev(hw), del);
4531 }
4532
4533 return status;
4534}
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
4547{
4548 struct ice_prof_map *pmap;
4549 enum ice_status status;
4550
4551 mutex_lock(&hw->blk[blk].es.prof_map_lock);
4552
4553 pmap = ice_search_prof_id(hw, blk, id);
4554 if (!pmap) {
4555 status = ICE_ERR_DOES_NOT_EXIST;
4556 goto err_ice_rem_prof;
4557 }
4558
4559
4560 status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
4561 if (status)
4562 goto err_ice_rem_prof;
4563
4564
4565 ice_prof_dec_ref(hw, blk, pmap->prof_id);
4566
4567 list_del(&pmap->list);
4568 devm_kfree(ice_hw_to_dev(hw), pmap);
4569
4570err_ice_rem_prof:
4571 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4572 return status;
4573}
4574
4575
4576
4577
4578
4579
4580
4581
4582static enum ice_status
4583ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
4584 struct list_head *chg)
4585{
4586 enum ice_status status = 0;
4587 struct ice_prof_map *map;
4588 struct ice_chs_chg *p;
4589 u16 i;
4590
4591 mutex_lock(&hw->blk[blk].es.prof_map_lock);
4592
4593 map = ice_search_prof_id(hw, blk, hdl);
4594 if (!map) {
4595 status = ICE_ERR_DOES_NOT_EXIST;
4596 goto err_ice_get_prof;
4597 }
4598
4599 for (i = 0; i < map->ptg_cnt; i++)
4600 if (!hw->blk[blk].es.written[map->prof_id]) {
4601
4602 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
4603 GFP_KERNEL);
4604 if (!p) {
4605 status = ICE_ERR_NO_MEMORY;
4606 goto err_ice_get_prof;
4607 }
4608
4609 p->type = ICE_PTG_ES_ADD;
4610 p->ptype = 0;
4611 p->ptg = map->ptg[i];
4612 p->add_ptg = 0;
4613
4614 p->add_prof = 1;
4615 p->prof_id = map->prof_id;
4616
4617 hw->blk[blk].es.written[map->prof_id] = true;
4618
4619 list_add(&p->list_entry, chg);
4620 }
4621
4622err_ice_get_prof:
4623 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4624
4625 return status;
4626}
4627
4628
4629
4630
4631
4632
4633
4634
4635
4636
4637static enum ice_status
4638ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
4639 struct list_head *lst)
4640{
4641 struct ice_vsig_prof *ent1, *ent2;
4642 u16 idx = vsig & ICE_VSIG_IDX_M;
4643
4644 list_for_each_entry(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4645 list) {
4646 struct ice_vsig_prof *p;
4647
4648
4649 p = devm_kmemdup(ice_hw_to_dev(hw), ent1, sizeof(*p),
4650 GFP_KERNEL);
4651 if (!p)
4652 goto err_ice_get_profs_vsig;
4653
4654 list_add_tail(&p->list, lst);
4655 }
4656
4657 return 0;
4658
4659err_ice_get_profs_vsig:
4660 list_for_each_entry_safe(ent1, ent2, lst, list) {
4661 list_del(&ent1->list);
4662 devm_kfree(ice_hw_to_dev(hw), ent1);
4663 }
4664
4665 return ICE_ERR_NO_MEMORY;
4666}
4667
4668
4669
4670
4671
4672
4673
4674
4675static enum ice_status
4676ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
4677 struct list_head *lst, u64 hdl)
4678{
4679 enum ice_status status = 0;
4680 struct ice_prof_map *map;
4681 struct ice_vsig_prof *p;
4682 u16 i;
4683
4684 mutex_lock(&hw->blk[blk].es.prof_map_lock);
4685 map = ice_search_prof_id(hw, blk, hdl);
4686 if (!map) {
4687 status = ICE_ERR_DOES_NOT_EXIST;
4688 goto err_ice_add_prof_to_lst;
4689 }
4690
4691 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4692 if (!p) {
4693 status = ICE_ERR_NO_MEMORY;
4694 goto err_ice_add_prof_to_lst;
4695 }
4696
4697 p->profile_cookie = map->profile_cookie;
4698 p->prof_id = map->prof_id;
4699 p->tcam_count = map->ptg_cnt;
4700
4701 for (i = 0; i < map->ptg_cnt; i++) {
4702 p->tcam[i].prof_id = map->prof_id;
4703 p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
4704 p->tcam[i].ptg = map->ptg[i];
4705 }
4706
4707 list_add(&p->list, lst);
4708
4709err_ice_add_prof_to_lst:
4710 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4711 return status;
4712}
4713
4714
4715
4716
4717
4718
4719
4720
4721
4722static enum ice_status
4723ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
4724 struct list_head *chg)
4725{
4726 enum ice_status status;
4727 struct ice_chs_chg *p;
4728 u16 orig_vsig;
4729
4730 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4731 if (!p)
4732 return ICE_ERR_NO_MEMORY;
4733
4734 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
4735 if (!status)
4736 status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
4737
4738 if (status) {
4739 devm_kfree(ice_hw_to_dev(hw), p);
4740 return status;
4741 }
4742
4743 p->type = ICE_VSI_MOVE;
4744 p->vsi = vsi;
4745 p->orig_vsig = orig_vsig;
4746 p->vsig = vsig;
4747
4748 list_add(&p->list_entry, chg);
4749
4750 return 0;
4751}
4752
4753
4754
4755
4756
4757
4758
4759static void
4760ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct list_head *chg)
4761{
4762 struct ice_chs_chg *pos, *tmp;
4763
4764 list_for_each_entry_safe(tmp, pos, chg, list_entry)
4765 if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) {
4766 list_del(&tmp->list_entry);
4767 devm_kfree(ice_hw_to_dev(hw), tmp);
4768 }
4769}
4770
4771
4772
4773
4774
4775
4776
4777
4778
4779
4780
4781
4782static enum ice_status
4783ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
4784 u16 vsig, struct ice_tcam_inf *tcam,
4785 struct list_head *chg)
4786{
4787 enum ice_status status;
4788 struct ice_chs_chg *p;
4789
4790 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4791 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
4792 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
4793
4794
4795 if (!enable) {
4796 status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx);
4797
4798
4799
4800
4801
4802 ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg);
4803 tcam->tcam_idx = 0;
4804 tcam->in_use = 0;
4805 return status;
4806 }
4807
4808
4809
4810
4811
4812
4813 status = ice_alloc_tcam_ent(hw, blk, tcam->attr.mask == 0,
4814 &tcam->tcam_idx);
4815 if (status)
4816 return status;
4817
4818
4819 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4820 if (!p)
4821 return ICE_ERR_NO_MEMORY;
4822
4823 status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
4824 tcam->ptg, vsig, 0, tcam->attr.flags,
4825 vl_msk, dc_msk, nm_msk);
4826 if (status)
4827 goto err_ice_prof_tcam_ena_dis;
4828
4829 tcam->in_use = 1;
4830
4831 p->type = ICE_TCAM_ADD;
4832 p->add_tcam_idx = true;
4833 p->prof_id = tcam->prof_id;
4834 p->ptg = tcam->ptg;
4835 p->vsig = 0;
4836 p->tcam_idx = tcam->tcam_idx;
4837
4838
4839 list_add(&p->list_entry, chg);
4840
4841 return 0;
4842
4843err_ice_prof_tcam_ena_dis:
4844 devm_kfree(ice_hw_to_dev(hw), p);
4845 return status;
4846}
4847
4848
4849
4850
4851
4852
4853
4854
4855static enum ice_status
4856ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
4857 struct list_head *chg)
4858{
4859 DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
4860 struct ice_vsig_prof *t;
4861 enum ice_status status;
4862 u16 idx;
4863
4864 bitmap_zero(ptgs_used, ICE_XLT1_CNT);
4865 idx = vsig & ICE_VSIG_IDX_M;
4866
4867
4868
4869
4870
4871
4872
4873
4874
4875
4876 list_for_each_entry(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4877 list) {
4878 u16 i;
4879
4880 for (i = 0; i < t->tcam_count; i++) {
4881
4882
4883
4884 if (test_bit(t->tcam[i].ptg, ptgs_used) &&
4885 t->tcam[i].in_use) {
4886
4887
4888
4889
4890 status = ice_prof_tcam_ena_dis(hw, blk, false,
4891 vsig,
4892 &t->tcam[i],
4893 chg);
4894 if (status)
4895 return status;
4896 } else if (!test_bit(t->tcam[i].ptg, ptgs_used) &&
4897 !t->tcam[i].in_use) {
4898
4899
4900
4901 status = ice_prof_tcam_ena_dis(hw, blk, true,
4902 vsig,
4903 &t->tcam[i],
4904 chg);
4905 if (status)
4906 return status;
4907 }
4908
4909
4910 set_bit(t->tcam[i].ptg, ptgs_used);
4911 }
4912 }
4913
4914 return 0;
4915}
4916
4917
4918
4919
4920
4921
4922
4923
4924
4925
4926static enum ice_status
4927ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
4928 bool rev, struct list_head *chg)
4929{
4930
4931 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4932 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
4933 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
4934 enum ice_status status = 0;
4935 struct ice_prof_map *map;
4936 struct ice_vsig_prof *t;
4937 struct ice_chs_chg *p;
4938 u16 vsig_idx, i;
4939
4940
4941 if (ice_has_prof_vsig(hw, blk, vsig, hdl))
4942 return ICE_ERR_ALREADY_EXISTS;
4943
4944
4945 t = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*t), GFP_KERNEL);
4946 if (!t)
4947 return ICE_ERR_NO_MEMORY;
4948
4949 mutex_lock(&hw->blk[blk].es.prof_map_lock);
4950
4951 map = ice_search_prof_id(hw, blk, hdl);
4952 if (!map) {
4953 status = ICE_ERR_DOES_NOT_EXIST;
4954 goto err_ice_add_prof_id_vsig;
4955 }
4956
4957 t->profile_cookie = map->profile_cookie;
4958 t->prof_id = map->prof_id;
4959 t->tcam_count = map->ptg_cnt;
4960
4961
4962 for (i = 0; i < map->ptg_cnt; i++) {
4963 u16 tcam_idx;
4964
4965
4966 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
4967 if (!p) {
4968 status = ICE_ERR_NO_MEMORY;
4969 goto err_ice_add_prof_id_vsig;
4970 }
4971
4972
4973
4974
4975
4976
4977 status = ice_alloc_tcam_ent(hw, blk, map->attr[i].mask == 0,
4978 &tcam_idx);
4979 if (status) {
4980 devm_kfree(ice_hw_to_dev(hw), p);
4981 goto err_ice_add_prof_id_vsig;
4982 }
4983
4984 t->tcam[i].ptg = map->ptg[i];
4985 t->tcam[i].prof_id = map->prof_id;
4986 t->tcam[i].tcam_idx = tcam_idx;
4987 t->tcam[i].attr = map->attr[i];
4988 t->tcam[i].in_use = true;
4989
4990 p->type = ICE_TCAM_ADD;
4991 p->add_tcam_idx = true;
4992 p->prof_id = t->tcam[i].prof_id;
4993 p->ptg = t->tcam[i].ptg;
4994 p->vsig = vsig;
4995 p->tcam_idx = t->tcam[i].tcam_idx;
4996
4997
4998 status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
4999 t->tcam[i].prof_id,
5000 t->tcam[i].ptg, vsig, 0, 0,
5001 vl_msk, dc_msk, nm_msk);
5002 if (status) {
5003 devm_kfree(ice_hw_to_dev(hw), p);
5004 goto err_ice_add_prof_id_vsig;
5005 }
5006
5007
5008 list_add(&p->list_entry, chg);
5009 }
5010
5011
5012 vsig_idx = vsig & ICE_VSIG_IDX_M;
5013 if (rev)
5014 list_add_tail(&t->list,
5015 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
5016 else
5017 list_add(&t->list,
5018 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
5019
5020 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
5021 return status;
5022
5023err_ice_add_prof_id_vsig:
5024 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
5025
5026 devm_kfree(ice_hw_to_dev(hw), t);
5027 return status;
5028}
5029
5030
5031
5032
5033
5034
5035
5036
5037
5038static enum ice_status
5039ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
5040 struct list_head *chg)
5041{
5042 enum ice_status status;
5043 struct ice_chs_chg *p;
5044 u16 new_vsig;
5045
5046 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
5047 if (!p)
5048 return ICE_ERR_NO_MEMORY;
5049
5050 new_vsig = ice_vsig_alloc(hw, blk);
5051 if (!new_vsig) {
5052 status = ICE_ERR_HW_TABLE;
5053 goto err_ice_create_prof_id_vsig;
5054 }
5055
5056 status = ice_move_vsi(hw, blk, vsi, new_vsig, chg);
5057 if (status)
5058 goto err_ice_create_prof_id_vsig;
5059
5060 status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg);
5061 if (status)
5062 goto err_ice_create_prof_id_vsig;
5063
5064 p->type = ICE_VSIG_ADD;
5065 p->vsi = vsi;
5066 p->orig_vsig = ICE_DEFAULT_VSIG;
5067 p->vsig = new_vsig;
5068
5069 list_add(&p->list_entry, chg);
5070
5071 return 0;
5072
5073err_ice_create_prof_id_vsig:
5074
5075 devm_kfree(ice_hw_to_dev(hw), p);
5076 return status;
5077}
5078
5079
5080
5081
5082
5083
5084
5085
5086
5087
5088static enum ice_status
5089ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
5090 struct list_head *lst, u16 *new_vsig,
5091 struct list_head *chg)
5092{
5093 struct ice_vsig_prof *t;
5094 enum ice_status status;
5095 u16 vsig;
5096
5097 vsig = ice_vsig_alloc(hw, blk);
5098 if (!vsig)
5099 return ICE_ERR_HW_TABLE;
5100
5101 status = ice_move_vsi(hw, blk, vsi, vsig, chg);
5102 if (status)
5103 return status;
5104
5105 list_for_each_entry(t, lst, list) {
5106
5107 status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
5108 true, chg);
5109 if (status)
5110 return status;
5111 }
5112
5113 *new_vsig = vsig;
5114
5115 return 0;
5116}
5117
5118
5119
5120
5121
5122
5123
5124
5125static bool
5126ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
5127{
5128 struct ice_vsig_prof *t;
5129 enum ice_status status;
5130 struct list_head lst;
5131
5132 INIT_LIST_HEAD(&lst);
5133
5134 t = kzalloc(sizeof(*t), GFP_KERNEL);
5135 if (!t)
5136 return false;
5137
5138 t->profile_cookie = hdl;
5139 list_add(&t->list, &lst);
5140
5141 status = ice_find_dup_props_vsig(hw, blk, &lst, vsig);
5142
5143 list_del(&t->list);
5144 kfree(t);
5145
5146 return !status;
5147}
5148
5149
5150
5151
5152
5153
5154
5155
5156
5157
5158
5159
5160enum ice_status
5161ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
5162{
5163 struct ice_vsig_prof *tmp1, *del1;
5164 struct ice_chs_chg *tmp, *del;
5165 struct list_head union_lst;
5166 enum ice_status status;
5167 struct list_head chg;
5168 u16 vsig;
5169
5170 INIT_LIST_HEAD(&union_lst);
5171 INIT_LIST_HEAD(&chg);
5172
5173
5174 status = ice_get_prof(hw, blk, hdl, &chg);
5175 if (status)
5176 return status;
5177
5178
5179 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
5180 if (!status && vsig) {
5181 bool only_vsi;
5182 u16 or_vsig;
5183 u16 ref;
5184
5185
5186 or_vsig = vsig;
5187
5188
5189
5190
5191
5192 if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
5193 status = ICE_ERR_ALREADY_EXISTS;
5194 goto err_ice_add_prof_id_flow;
5195 }
5196
5197
5198 status = ice_vsig_get_ref(hw, blk, vsig, &ref);
5199 if (status)
5200 goto err_ice_add_prof_id_flow;
5201 only_vsi = (ref == 1);
5202
5203
5204
5205
5206 status = ice_get_profs_vsig(hw, blk, vsig, &union_lst);
5207 if (status)
5208 goto err_ice_add_prof_id_flow;
5209
5210 status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl);
5211 if (status)
5212 goto err_ice_add_prof_id_flow;
5213
5214
5215 status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig);
5216 if (!status) {
5217
5218 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5219 if (status)
5220 goto err_ice_add_prof_id_flow;
5221
5222
5223
5224
5225 if (only_vsi) {
5226 status = ice_rem_vsig(hw, blk, or_vsig, &chg);
5227 if (status)
5228 goto err_ice_add_prof_id_flow;
5229 }
5230 } else if (only_vsi) {
5231
5232
5233
5234
5235
5236 status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false,
5237 &chg);
5238 if (status)
5239 goto err_ice_add_prof_id_flow;
5240
5241
5242 status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
5243 if (status)
5244 goto err_ice_add_prof_id_flow;
5245 } else {
5246
5247 status = ice_create_vsig_from_lst(hw, blk, vsi,
5248 &union_lst, &vsig,
5249 &chg);
5250 if (status)
5251 goto err_ice_add_prof_id_flow;
5252
5253
5254 status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
5255 if (status)
5256 goto err_ice_add_prof_id_flow;
5257 }
5258 } else {
5259
5260
5261 if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) {
5262
5263
5264 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5265 if (status)
5266 goto err_ice_add_prof_id_flow;
5267 } else {
5268
5269
5270 status = ice_create_prof_id_vsig(hw, blk, vsi, hdl,
5271 &chg);
5272 if (status)
5273 goto err_ice_add_prof_id_flow;
5274 }
5275 }
5276
5277
5278 if (!status)
5279 status = ice_upd_prof_hw(hw, blk, &chg);
5280
5281err_ice_add_prof_id_flow:
5282 list_for_each_entry_safe(del, tmp, &chg, list_entry) {
5283 list_del(&del->list_entry);
5284 devm_kfree(ice_hw_to_dev(hw), del);
5285 }
5286
5287 list_for_each_entry_safe(del1, tmp1, &union_lst, list) {
5288 list_del(&del1->list);
5289 devm_kfree(ice_hw_to_dev(hw), del1);
5290 }
5291
5292 return status;
5293}
5294
5295
5296
5297
5298
5299
5300
5301static enum ice_status
5302ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl)
5303{
5304 struct ice_vsig_prof *ent, *tmp;
5305
5306 list_for_each_entry_safe(ent, tmp, lst, list)
5307 if (ent->profile_cookie == hdl) {
5308 list_del(&ent->list);
5309 devm_kfree(ice_hw_to_dev(hw), ent);
5310 return 0;
5311 }
5312
5313 return ICE_ERR_DOES_NOT_EXIST;
5314}
5315
5316
5317
5318
5319
5320
5321
5322
5323
5324
5325
5326
5327enum ice_status
5328ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
5329{
5330 struct ice_vsig_prof *tmp1, *del1;
5331 struct ice_chs_chg *tmp, *del;
5332 struct list_head chg, copy;
5333 enum ice_status status;
5334 u16 vsig;
5335
5336 INIT_LIST_HEAD(©);
5337 INIT_LIST_HEAD(&chg);
5338
5339
5340 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
5341 if (!status && vsig) {
5342 bool last_profile;
5343 bool only_vsi;
5344 u16 ref;
5345
5346
5347 last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1;
5348 status = ice_vsig_get_ref(hw, blk, vsig, &ref);
5349 if (status)
5350 goto err_ice_rem_prof_id_flow;
5351 only_vsi = (ref == 1);
5352
5353 if (only_vsi) {
5354
5355
5356
5357
5358
5359
5360 if (last_profile) {
5361
5362
5363
5364 status = ice_rem_vsig(hw, blk, vsig, &chg);
5365 if (status)
5366 goto err_ice_rem_prof_id_flow;
5367 } else {
5368 status = ice_rem_prof_id_vsig(hw, blk, vsig,
5369 hdl, &chg);
5370 if (status)
5371 goto err_ice_rem_prof_id_flow;
5372
5373
5374 status = ice_adj_prof_priorities(hw, blk, vsig,
5375 &chg);
5376 if (status)
5377 goto err_ice_rem_prof_id_flow;
5378 }
5379
5380 } else {
5381
5382 status = ice_get_profs_vsig(hw, blk, vsig, ©);
5383 if (status)
5384 goto err_ice_rem_prof_id_flow;
5385
5386
5387 status = ice_rem_prof_from_list(hw, ©, hdl);
5388 if (status)
5389 goto err_ice_rem_prof_id_flow;
5390
5391 if (list_empty(©)) {
5392 status = ice_move_vsi(hw, blk, vsi,
5393 ICE_DEFAULT_VSIG, &chg);
5394 if (status)
5395 goto err_ice_rem_prof_id_flow;
5396
5397 } else if (!ice_find_dup_props_vsig(hw, blk, ©,
5398 &vsig)) {
5399
5400
5401
5402
5403
5404
5405
5406 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5407 if (status)
5408 goto err_ice_rem_prof_id_flow;
5409 } else {
5410
5411
5412
5413
5414 status = ice_create_vsig_from_lst(hw, blk, vsi,
5415 ©, &vsig,
5416 &chg);
5417 if (status)
5418 goto err_ice_rem_prof_id_flow;
5419
5420
5421 status = ice_adj_prof_priorities(hw, blk, vsig,
5422 &chg);
5423 if (status)
5424 goto err_ice_rem_prof_id_flow;
5425 }
5426 }
5427 } else {
5428 status = ICE_ERR_DOES_NOT_EXIST;
5429 }
5430
5431
5432 if (!status)
5433 status = ice_upd_prof_hw(hw, blk, &chg);
5434
5435err_ice_rem_prof_id_flow:
5436 list_for_each_entry_safe(del, tmp, &chg, list_entry) {
5437 list_del(&del->list_entry);
5438 devm_kfree(ice_hw_to_dev(hw), del);
5439 }
5440
5441 list_for_each_entry_safe(del1, tmp1, ©, list) {
5442 list_del(&del1->list);
5443 devm_kfree(ice_hw_to_dev(hw), del1);
5444 }
5445
5446 return status;
5447}
5448