1
2
3
4#include <linux/skbuff.h>
5#include <net/psample.h>
6#include "en/mapping.h"
7#include "esw/sample.h"
8#include "eswitch.h"
9#include "en_tc.h"
10#include "fs_core.h"
11
12#define MLX5_ESW_VPORT_TBL_SIZE_SAMPLE (64 * 1024)
13
14static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_sample_ns = {
15 .max_fte = MLX5_ESW_VPORT_TBL_SIZE_SAMPLE,
16 .max_num_groups = 0,
17 .flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | MLX5_FLOW_TABLE_TUNNEL_EN_DECAP,
18};
19
20struct mlx5_esw_psample {
21 struct mlx5e_priv *priv;
22 struct mlx5_flow_table *termtbl;
23 struct mlx5_flow_handle *termtbl_rule;
24 DECLARE_HASHTABLE(hashtbl, 8);
25 struct mutex ht_lock;
26 DECLARE_HASHTABLE(restore_hashtbl, 8);
27 struct mutex restore_lock;
28};
29
30struct mlx5_sampler {
31 struct hlist_node hlist;
32 u32 sampler_id;
33 u32 sample_ratio;
34 u32 sample_table_id;
35 u32 default_table_id;
36 int count;
37};
38
39struct mlx5_sample_flow {
40 struct mlx5_sampler *sampler;
41 struct mlx5_sample_restore *restore;
42 struct mlx5_flow_attr *pre_attr;
43 struct mlx5_flow_handle *pre_rule;
44 struct mlx5_flow_handle *rule;
45};
46
47struct mlx5_sample_restore {
48 struct hlist_node hlist;
49 struct mlx5_modify_hdr *modify_hdr;
50 struct mlx5_flow_handle *rule;
51 u32 obj_id;
52 int count;
53};
54
55static int
56sampler_termtbl_create(struct mlx5_esw_psample *esw_psample)
57{
58 struct mlx5_core_dev *dev = esw_psample->priv->mdev;
59 struct mlx5_eswitch *esw = dev->priv.eswitch;
60 struct mlx5_flow_table_attr ft_attr = {};
61 struct mlx5_flow_destination dest = {};
62 struct mlx5_flow_namespace *root_ns;
63 struct mlx5_flow_act act = {};
64 int err;
65
66 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, termination_table)) {
67 mlx5_core_warn(dev, "termination table is not supported\n");
68 return -EOPNOTSUPP;
69 }
70
71 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
72 if (!root_ns) {
73 mlx5_core_warn(dev, "failed to get FDB flow namespace\n");
74 return -EOPNOTSUPP;
75 }
76
77 ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION | MLX5_FLOW_TABLE_UNMANAGED;
78 ft_attr.autogroup.max_num_groups = 1;
79 ft_attr.prio = FDB_SLOW_PATH;
80 ft_attr.max_fte = 1;
81 ft_attr.level = 1;
82 esw_psample->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
83 if (IS_ERR(esw_psample->termtbl)) {
84 err = PTR_ERR(esw_psample->termtbl);
85 mlx5_core_warn(dev, "failed to create termtbl, err: %d\n", err);
86 return err;
87 }
88
89 act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
90 dest.vport.num = esw->manager_vport;
91 esw_psample->termtbl_rule = mlx5_add_flow_rules(esw_psample->termtbl, NULL, &act, &dest, 1);
92 if (IS_ERR(esw_psample->termtbl_rule)) {
93 err = PTR_ERR(esw_psample->termtbl_rule);
94 mlx5_core_warn(dev, "failed to create termtbl rule, err: %d\n", err);
95 mlx5_destroy_flow_table(esw_psample->termtbl);
96 return err;
97 }
98
99 return 0;
100}
101
102static void
103sampler_termtbl_destroy(struct mlx5_esw_psample *esw_psample)
104{
105 mlx5_del_flow_rules(esw_psample->termtbl_rule);
106 mlx5_destroy_flow_table(esw_psample->termtbl);
107}
108
109static int
110sampler_obj_create(struct mlx5_core_dev *mdev, struct mlx5_sampler *sampler)
111{
112 u32 in[MLX5_ST_SZ_DW(create_sampler_obj_in)] = {};
113 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
114 u64 general_obj_types;
115 void *obj;
116 int err;
117
118 general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
119 if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_SAMPLER))
120 return -EOPNOTSUPP;
121 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level))
122 return -EOPNOTSUPP;
123
124 obj = MLX5_ADDR_OF(create_sampler_obj_in, in, sampler_object);
125 MLX5_SET(sampler_obj, obj, table_type, FS_FT_FDB);
126 MLX5_SET(sampler_obj, obj, ignore_flow_level, 1);
127 MLX5_SET(sampler_obj, obj, level, 1);
128 MLX5_SET(sampler_obj, obj, sample_ratio, sampler->sample_ratio);
129 MLX5_SET(sampler_obj, obj, sample_table_id, sampler->sample_table_id);
130 MLX5_SET(sampler_obj, obj, default_table_id, sampler->default_table_id);
131 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
132 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_SAMPLER);
133
134 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
135 if (!err)
136 sampler->sampler_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
137
138 return err;
139}
140
141static void
142sampler_obj_destroy(struct mlx5_core_dev *mdev, u32 sampler_id)
143{
144 u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
145 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
146
147 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
148 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_SAMPLER);
149 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sampler_id);
150
151 mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
152}
153
154static u32
155sampler_hash(u32 sample_ratio, u32 default_table_id)
156{
157 return jhash_2words(sample_ratio, default_table_id, 0);
158}
159
160static int
161sampler_cmp(u32 sample_ratio1, u32 default_table_id1, u32 sample_ratio2, u32 default_table_id2)
162{
163 return sample_ratio1 != sample_ratio2 || default_table_id1 != default_table_id2;
164}
165
166static struct mlx5_sampler *
167sampler_get(struct mlx5_esw_psample *esw_psample, u32 sample_ratio, u32 default_table_id)
168{
169 struct mlx5_sampler *sampler;
170 u32 hash_key;
171 int err;
172
173 mutex_lock(&esw_psample->ht_lock);
174 hash_key = sampler_hash(sample_ratio, default_table_id);
175 hash_for_each_possible(esw_psample->hashtbl, sampler, hlist, hash_key)
176 if (!sampler_cmp(sampler->sample_ratio, sampler->default_table_id,
177 sample_ratio, default_table_id))
178 goto add_ref;
179
180 sampler = kzalloc(sizeof(*sampler), GFP_KERNEL);
181 if (!sampler) {
182 err = -ENOMEM;
183 goto err_alloc;
184 }
185
186 sampler->sample_table_id = esw_psample->termtbl->id;
187 sampler->default_table_id = default_table_id;
188 sampler->sample_ratio = sample_ratio;
189
190 err = sampler_obj_create(esw_psample->priv->mdev, sampler);
191 if (err)
192 goto err_create;
193
194 hash_add(esw_psample->hashtbl, &sampler->hlist, hash_key);
195
196add_ref:
197 sampler->count++;
198 mutex_unlock(&esw_psample->ht_lock);
199 return sampler;
200
201err_create:
202 kfree(sampler);
203err_alloc:
204 mutex_unlock(&esw_psample->ht_lock);
205 return ERR_PTR(err);
206}
207
208static void
209sampler_put(struct mlx5_esw_psample *esw_psample, struct mlx5_sampler *sampler)
210{
211 mutex_lock(&esw_psample->ht_lock);
212 if (--sampler->count == 0) {
213 hash_del(&sampler->hlist);
214 sampler_obj_destroy(esw_psample->priv->mdev, sampler->sampler_id);
215 kfree(sampler);
216 }
217 mutex_unlock(&esw_psample->ht_lock);
218}
219
220static struct mlx5_modify_hdr *
221sample_metadata_rule_get(struct mlx5_core_dev *mdev, u32 obj_id)
222{
223 struct mlx5e_tc_mod_hdr_acts mod_acts = {};
224 struct mlx5_modify_hdr *modify_hdr;
225 int err;
226
227 err = mlx5e_tc_match_to_reg_set(mdev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB,
228 CHAIN_TO_REG, obj_id);
229 if (err)
230 goto err_set_regc0;
231
232 modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_FDB,
233 mod_acts.num_actions,
234 mod_acts.actions);
235 if (IS_ERR(modify_hdr)) {
236 err = PTR_ERR(modify_hdr);
237 goto err_modify_hdr;
238 }
239
240 dealloc_mod_hdr_actions(&mod_acts);
241 return modify_hdr;
242
243err_modify_hdr:
244 dealloc_mod_hdr_actions(&mod_acts);
245err_set_regc0:
246 return ERR_PTR(err);
247}
248
249static struct mlx5_sample_restore *
250sample_restore_get(struct mlx5_esw_psample *esw_psample, u32 obj_id)
251{
252 struct mlx5_core_dev *mdev = esw_psample->priv->mdev;
253 struct mlx5_eswitch *esw = mdev->priv.eswitch;
254 struct mlx5_sample_restore *restore;
255 struct mlx5_modify_hdr *modify_hdr;
256 int err;
257
258 mutex_lock(&esw_psample->restore_lock);
259 hash_for_each_possible(esw_psample->restore_hashtbl, restore, hlist, obj_id)
260 if (restore->obj_id == obj_id)
261 goto add_ref;
262
263 restore = kzalloc(sizeof(*restore), GFP_KERNEL);
264 if (!restore) {
265 err = -ENOMEM;
266 goto err_alloc;
267 }
268 restore->obj_id = obj_id;
269
270 modify_hdr = sample_metadata_rule_get(mdev, obj_id);
271 if (IS_ERR(modify_hdr)) {
272 err = PTR_ERR(modify_hdr);
273 goto err_modify_hdr;
274 }
275 restore->modify_hdr = modify_hdr;
276
277 restore->rule = esw_add_restore_rule(esw, obj_id);
278 if (IS_ERR(restore->rule)) {
279 err = PTR_ERR(restore->rule);
280 goto err_restore;
281 }
282
283 hash_add(esw_psample->restore_hashtbl, &restore->hlist, obj_id);
284add_ref:
285 restore->count++;
286 mutex_unlock(&esw_psample->restore_lock);
287 return restore;
288
289err_restore:
290 mlx5_modify_header_dealloc(mdev, restore->modify_hdr);
291err_modify_hdr:
292 kfree(restore);
293err_alloc:
294 mutex_unlock(&esw_psample->restore_lock);
295 return ERR_PTR(err);
296}
297
298static void
299sample_restore_put(struct mlx5_esw_psample *esw_psample, struct mlx5_sample_restore *restore)
300{
301 mutex_lock(&esw_psample->restore_lock);
302 if (--restore->count == 0)
303 hash_del(&restore->hlist);
304 mutex_unlock(&esw_psample->restore_lock);
305
306 if (!restore->count) {
307 mlx5_del_flow_rules(restore->rule);
308 mlx5_modify_header_dealloc(esw_psample->priv->mdev, restore->modify_hdr);
309 kfree(restore);
310 }
311}
312
313void mlx5_esw_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj)
314{
315 u32 trunc_size = mapped_obj->sample.trunc_size;
316 struct psample_group psample_group = {};
317 struct psample_metadata md = {};
318
319 md.trunc_size = trunc_size ? min(trunc_size, skb->len) : skb->len;
320 md.in_ifindex = skb->dev->ifindex;
321 psample_group.group_num = mapped_obj->sample.group_id;
322 psample_group.net = &init_net;
323 skb_push(skb, skb->mac_len);
324
325 psample_sample_packet(&psample_group, skb, mapped_obj->sample.rate, &md);
326}
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364struct mlx5_flow_handle *
365mlx5_esw_sample_offload(struct mlx5_esw_psample *esw_psample,
366 struct mlx5_flow_spec *spec,
367 struct mlx5_flow_attr *attr)
368{
369 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
370 struct mlx5_vport_tbl_attr per_vport_tbl_attr;
371 struct mlx5_esw_flow_attr *pre_esw_attr;
372 struct mlx5_mapped_obj restore_obj = {};
373 struct mlx5_sample_flow *sample_flow;
374 struct mlx5_sample_attr *sample_attr;
375 struct mlx5_flow_table *default_tbl;
376 struct mlx5_flow_attr *pre_attr;
377 struct mlx5_eswitch *esw;
378 u32 obj_id;
379 int err;
380
381 if (IS_ERR_OR_NULL(esw_psample))
382 return ERR_PTR(-EOPNOTSUPP);
383
384
385
386
387 esw = esw_psample->priv->mdev->priv.eswitch;
388 if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
389 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
390
391 sample_flow = kzalloc(sizeof(*sample_flow), GFP_KERNEL);
392 if (!sample_flow)
393 return ERR_PTR(-ENOMEM);
394 esw_attr->sample->sample_flow = sample_flow;
395
396
397
398
399
400
401 per_vport_tbl_attr.chain = attr->chain;
402 per_vport_tbl_attr.prio = attr->prio;
403 per_vport_tbl_attr.vport = esw_attr->in_rep->vport;
404 per_vport_tbl_attr.vport_ns = &mlx5_esw_vport_tbl_sample_ns;
405 default_tbl = mlx5_esw_vporttbl_get(esw, &per_vport_tbl_attr);
406 if (IS_ERR(default_tbl)) {
407 err = PTR_ERR(default_tbl);
408 goto err_default_tbl;
409 }
410
411
412
413
414 esw_attr->sample->sample_default_tbl = default_tbl;
415
416
417
418
419
420
421 mlx5_eswitch_clear_rule_source_port(esw, spec);
422 sample_flow->rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
423 if (IS_ERR(sample_flow->rule)) {
424 err = PTR_ERR(sample_flow->rule);
425 goto err_offload_rule;
426 }
427
428
429 sample_flow->sampler = sampler_get(esw_psample, esw_attr->sample->rate, default_tbl->id);
430 if (IS_ERR(sample_flow->sampler)) {
431 err = PTR_ERR(sample_flow->sampler);
432 goto err_sampler;
433 }
434
435
436 restore_obj.type = MLX5_MAPPED_OBJ_SAMPLE;
437 restore_obj.sample.group_id = esw_attr->sample->group_num;
438 restore_obj.sample.rate = esw_attr->sample->rate;
439 restore_obj.sample.trunc_size = esw_attr->sample->trunc_size;
440 err = mapping_add(esw->offloads.reg_c0_obj_pool, &restore_obj, &obj_id);
441 if (err)
442 goto err_obj_id;
443 esw_attr->sample->restore_obj_id = obj_id;
444
445
446 sample_flow->restore = sample_restore_get(esw_psample, obj_id);
447 if (IS_ERR(sample_flow->restore)) {
448 err = PTR_ERR(sample_flow->restore);
449 goto err_sample_restore;
450 }
451
452
453
454
455 pre_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
456 if (!pre_attr) {
457 err = -ENOMEM;
458 goto err_alloc_flow_attr;
459 }
460 sample_attr = kzalloc(sizeof(*sample_attr), GFP_KERNEL);
461 if (!sample_attr) {
462 err = -ENOMEM;
463 goto err_alloc_sample_attr;
464 }
465 pre_esw_attr = pre_attr->esw_attr;
466 pre_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
467 pre_attr->modify_hdr = sample_flow->restore->modify_hdr;
468 pre_attr->flags = MLX5_ESW_ATTR_FLAG_SAMPLE;
469 pre_attr->chain = attr->chain;
470 pre_attr->prio = attr->prio;
471 pre_esw_attr->sample = sample_attr;
472 pre_esw_attr->sample->sampler_id = sample_flow->sampler->sampler_id;
473 pre_esw_attr->in_mdev = esw_attr->in_mdev;
474 pre_esw_attr->in_rep = esw_attr->in_rep;
475 sample_flow->pre_rule = mlx5_eswitch_add_offloaded_rule(esw, spec, pre_attr);
476 if (IS_ERR(sample_flow->pre_rule)) {
477 err = PTR_ERR(sample_flow->pre_rule);
478 goto err_pre_offload_rule;
479 }
480 sample_flow->pre_attr = pre_attr;
481
482 return sample_flow->rule;
483
484err_pre_offload_rule:
485 kfree(sample_attr);
486err_alloc_sample_attr:
487 kfree(pre_attr);
488err_alloc_flow_attr:
489 sample_restore_put(esw_psample, sample_flow->restore);
490err_sample_restore:
491 mapping_remove(esw->offloads.reg_c0_obj_pool, obj_id);
492err_obj_id:
493 sampler_put(esw_psample, sample_flow->sampler);
494err_sampler:
495
496
497
498 attr->prio = 0;
499 attr->chain = 0;
500 mlx5_eswitch_del_offloaded_rule(esw, sample_flow->rule, attr);
501err_offload_rule:
502 mlx5_esw_vporttbl_put(esw, &per_vport_tbl_attr);
503err_default_tbl:
504 kfree(sample_flow);
505 return ERR_PTR(err);
506}
507
508void
509mlx5_esw_sample_unoffload(struct mlx5_esw_psample *esw_psample,
510 struct mlx5_flow_handle *rule,
511 struct mlx5_flow_attr *attr)
512{
513 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
514 struct mlx5_sample_flow *sample_flow;
515 struct mlx5_vport_tbl_attr tbl_attr;
516 struct mlx5_flow_attr *pre_attr;
517 struct mlx5_eswitch *esw;
518
519 if (IS_ERR_OR_NULL(esw_psample))
520 return;
521
522
523
524
525 esw = esw_psample->priv->mdev->priv.eswitch;
526 if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
527 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
528 return;
529 }
530
531 sample_flow = esw_attr->sample->sample_flow;
532 pre_attr = sample_flow->pre_attr;
533 memset(pre_attr, 0, sizeof(*pre_attr));
534 esw = esw_psample->priv->mdev->priv.eswitch;
535 mlx5_eswitch_del_offloaded_rule(esw, sample_flow->pre_rule, pre_attr);
536 mlx5_eswitch_del_offloaded_rule(esw, sample_flow->rule, attr);
537
538 sample_restore_put(esw_psample, sample_flow->restore);
539 mapping_remove(esw->offloads.reg_c0_obj_pool, esw_attr->sample->restore_obj_id);
540 sampler_put(esw_psample, sample_flow->sampler);
541 tbl_attr.chain = attr->chain;
542 tbl_attr.prio = attr->prio;
543 tbl_attr.vport = esw_attr->in_rep->vport;
544 tbl_attr.vport_ns = &mlx5_esw_vport_tbl_sample_ns;
545 mlx5_esw_vporttbl_put(esw, &tbl_attr);
546
547 kfree(pre_attr->esw_attr->sample);
548 kfree(pre_attr);
549 kfree(sample_flow);
550}
551
552struct mlx5_esw_psample *
553mlx5_esw_sample_init(struct mlx5e_priv *priv)
554{
555 struct mlx5_esw_psample *esw_psample;
556 int err;
557
558 esw_psample = kzalloc(sizeof(*esw_psample), GFP_KERNEL);
559 if (!esw_psample)
560 return ERR_PTR(-ENOMEM);
561 esw_psample->priv = priv;
562 err = sampler_termtbl_create(esw_psample);
563 if (err)
564 goto err_termtbl;
565
566 mutex_init(&esw_psample->ht_lock);
567 mutex_init(&esw_psample->restore_lock);
568
569 return esw_psample;
570
571err_termtbl:
572 kfree(esw_psample);
573 return ERR_PTR(err);
574}
575
576void
577mlx5_esw_sample_cleanup(struct mlx5_esw_psample *esw_psample)
578{
579 if (IS_ERR_OR_NULL(esw_psample))
580 return;
581
582 mutex_destroy(&esw_psample->restore_lock);
583 mutex_destroy(&esw_psample->ht_lock);
584 sampler_termtbl_destroy(esw_psample);
585 kfree(esw_psample);
586}
587