1
2
3
4#include <linux/kernel.h>
5#include <linux/types.h>
6#include <linux/rhashtable.h>
7
8#include "spectrum.h"
9#include "core.h"
10#include "reg.h"
11#include "spectrum_router.h"
12
13#define MLXSW_SP_ROUTER_XM_M_VAL 16
14
15static const u8 mlxsw_sp_router_xm_m_val[] = {
16 [MLXSW_SP_L3_PROTO_IPV4] = MLXSW_SP_ROUTER_XM_M_VAL,
17 [MLXSW_SP_L3_PROTO_IPV6] = 0,
18};
19
20#define MLXSW_SP_ROUTER_XM_L_VAL_MAX 16
21
22struct mlxsw_sp_router_xm {
23 bool ipv4_supported;
24 bool ipv6_supported;
25 unsigned int entries_size;
26 struct rhashtable ltable_ht;
27 struct rhashtable flush_ht;
28 unsigned int flush_count;
29 bool flush_all_mode;
30};
31
32struct mlxsw_sp_router_xm_ltable_node {
33 struct rhash_head ht_node;
34 u16 mindex;
35 u8 current_lvalue;
36 refcount_t refcnt;
37 unsigned int lvalue_ref[MLXSW_SP_ROUTER_XM_L_VAL_MAX + 1];
38};
39
40static const struct rhashtable_params mlxsw_sp_router_xm_ltable_ht_params = {
41 .key_offset = offsetof(struct mlxsw_sp_router_xm_ltable_node, mindex),
42 .head_offset = offsetof(struct mlxsw_sp_router_xm_ltable_node, ht_node),
43 .key_len = sizeof(u16),
44 .automatic_shrinking = true,
45};
46
47struct mlxsw_sp_router_xm_flush_info {
48 bool all;
49 enum mlxsw_sp_l3proto proto;
50 u16 virtual_router;
51 u8 prefix_len;
52 unsigned char addr[sizeof(struct in6_addr)];
53};
54
55struct mlxsw_sp_router_xm_fib_entry {
56 bool committed;
57 struct mlxsw_sp_router_xm_ltable_node *ltable_node;
58 u16 mindex;
59 u8 lvalue;
60 struct mlxsw_sp_router_xm_flush_info flush_info;
61};
62
63#define MLXSW_SP_ROUTE_LL_XM_ENTRIES_MAX \
64 (MLXSW_REG_XMDR_TRANS_LEN / MLXSW_REG_XMDR_C_LT_ROUTE_V4_LEN)
65
66struct mlxsw_sp_fib_entry_op_ctx_xm {
67 bool initialized;
68 char xmdr_pl[MLXSW_REG_XMDR_LEN];
69 unsigned int trans_offset;
70
71
72 unsigned int trans_item_len;
73
74
75
76 unsigned int entries_count;
77 struct mlxsw_sp_router_xm_fib_entry *entries[MLXSW_SP_ROUTE_LL_XM_ENTRIES_MAX];
78};
79
80static int mlxsw_sp_router_ll_xm_init(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
81 enum mlxsw_sp_l3proto proto)
82{
83 char rxlte_pl[MLXSW_REG_RXLTE_LEN];
84
85 mlxsw_reg_rxlte_pack(rxlte_pl, vr_id,
86 (enum mlxsw_reg_rxlte_protocol) proto, true);
87 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rxlte), rxlte_pl);
88}
89
90static int mlxsw_sp_router_ll_xm_ralta_write(struct mlxsw_sp *mlxsw_sp, char *xralta_pl)
91{
92 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(xralta), xralta_pl);
93}
94
95static int mlxsw_sp_router_ll_xm_ralst_write(struct mlxsw_sp *mlxsw_sp, char *xralst_pl)
96{
97 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(xralst), xralst_pl);
98}
99
100static int mlxsw_sp_router_ll_xm_raltb_write(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl)
101{
102 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(xraltb), xraltb_pl);
103}
104
105static u16 mlxsw_sp_router_ll_xm_mindex_get4(const u32 addr)
106{
107
108
109
110 return addr >> MLXSW_SP_ROUTER_XM_L_VAL_MAX;
111}
112
113static u16 mlxsw_sp_router_ll_xm_mindex_get6(const unsigned char *addr)
114{
115 WARN_ON_ONCE(1);
116 return 0;
117}
118
119static void mlxsw_sp_router_ll_xm_op_ctx_check_init(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
120 struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm)
121{
122 if (op_ctx->initialized)
123 return;
124 op_ctx->initialized = true;
125
126 mlxsw_reg_xmdr_pack(op_ctx_xm->xmdr_pl, true);
127 op_ctx_xm->trans_offset = 0;
128 op_ctx_xm->entries_count = 0;
129}
130
131static void mlxsw_sp_router_ll_xm_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
132 enum mlxsw_sp_l3proto proto,
133 enum mlxsw_sp_fib_entry_op op,
134 u16 virtual_router, u8 prefix_len,
135 unsigned char *addr,
136 struct mlxsw_sp_fib_entry_priv *priv)
137{
138 struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv;
139 struct mlxsw_sp_router_xm_fib_entry *fib_entry = (void *) priv->priv;
140 struct mlxsw_sp_router_xm_flush_info *flush_info;
141 enum mlxsw_reg_xmdr_c_ltr_op xmdr_c_ltr_op;
142 unsigned int len;
143
144 mlxsw_sp_router_ll_xm_op_ctx_check_init(op_ctx, op_ctx_xm);
145
146 switch (op) {
147 case MLXSW_SP_FIB_ENTRY_OP_WRITE:
148 xmdr_c_ltr_op = MLXSW_REG_XMDR_C_LTR_OP_WRITE;
149 break;
150 case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
151 xmdr_c_ltr_op = MLXSW_REG_XMDR_C_LTR_OP_UPDATE;
152 break;
153 case MLXSW_SP_FIB_ENTRY_OP_DELETE:
154 xmdr_c_ltr_op = MLXSW_REG_XMDR_C_LTR_OP_DELETE;
155 break;
156 default:
157 WARN_ON_ONCE(1);
158 return;
159 }
160
161 switch (proto) {
162 case MLXSW_SP_L3_PROTO_IPV4:
163 len = mlxsw_reg_xmdr_c_ltr_pack4(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset,
164 op_ctx_xm->entries_count, xmdr_c_ltr_op,
165 virtual_router, prefix_len, (u32 *) addr);
166 fib_entry->mindex = mlxsw_sp_router_ll_xm_mindex_get4(*((u32 *) addr));
167 break;
168 case MLXSW_SP_L3_PROTO_IPV6:
169 len = mlxsw_reg_xmdr_c_ltr_pack6(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset,
170 op_ctx_xm->entries_count, xmdr_c_ltr_op,
171 virtual_router, prefix_len, addr);
172 fib_entry->mindex = mlxsw_sp_router_ll_xm_mindex_get6(addr);
173 break;
174 default:
175 WARN_ON_ONCE(1);
176 return;
177 }
178 if (!op_ctx_xm->trans_offset)
179 op_ctx_xm->trans_item_len = len;
180 else
181 WARN_ON_ONCE(op_ctx_xm->trans_item_len != len);
182
183 op_ctx_xm->entries[op_ctx_xm->entries_count] = fib_entry;
184
185 fib_entry->lvalue = prefix_len > mlxsw_sp_router_xm_m_val[proto] ?
186 prefix_len - mlxsw_sp_router_xm_m_val[proto] : 0;
187
188 flush_info = &fib_entry->flush_info;
189 flush_info->proto = proto;
190 flush_info->virtual_router = virtual_router;
191 flush_info->prefix_len = prefix_len;
192 if (addr)
193 memcpy(flush_info->addr, addr, sizeof(flush_info->addr));
194 else
195 memset(flush_info->addr, 0, sizeof(flush_info->addr));
196}
197
198static void
199mlxsw_sp_router_ll_xm_fib_entry_act_remote_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
200 enum mlxsw_reg_ralue_trap_action trap_action,
201 u16 trap_id, u32 adjacency_index, u16 ecmp_size)
202{
203 struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv;
204
205 mlxsw_reg_xmdr_c_ltr_act_remote_pack(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset,
206 trap_action, trap_id, adjacency_index, ecmp_size);
207}
208
209static void
210mlxsw_sp_router_ll_xm_fib_entry_act_local_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
211 enum mlxsw_reg_ralue_trap_action trap_action,
212 u16 trap_id, u16 local_erif)
213{
214 struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv;
215
216 mlxsw_reg_xmdr_c_ltr_act_local_pack(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset,
217 trap_action, trap_id, local_erif);
218}
219
220static void
221mlxsw_sp_router_ll_xm_fib_entry_act_ip2me_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
222{
223 struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv;
224
225 mlxsw_reg_xmdr_c_ltr_act_ip2me_pack(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset);
226}
227
228static void
229mlxsw_sp_router_ll_xm_fib_entry_act_ip2me_tun_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
230 u32 tunnel_ptr)
231{
232 struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv;
233
234 mlxsw_reg_xmdr_c_ltr_act_ip2me_tun_pack(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset,
235 tunnel_ptr);
236}
237
238static struct mlxsw_sp_router_xm_ltable_node *
239mlxsw_sp_router_xm_ltable_node_get(struct mlxsw_sp_router_xm *router_xm, u16 mindex)
240{
241 struct mlxsw_sp_router_xm_ltable_node *ltable_node;
242 int err;
243
244 ltable_node = rhashtable_lookup_fast(&router_xm->ltable_ht, &mindex,
245 mlxsw_sp_router_xm_ltable_ht_params);
246 if (ltable_node) {
247 refcount_inc(<able_node->refcnt);
248 return ltable_node;
249 }
250 ltable_node = kzalloc(sizeof(*ltable_node), GFP_KERNEL);
251 if (!ltable_node)
252 return ERR_PTR(-ENOMEM);
253 ltable_node->mindex = mindex;
254 refcount_set(<able_node->refcnt, 1);
255
256 err = rhashtable_insert_fast(&router_xm->ltable_ht, <able_node->ht_node,
257 mlxsw_sp_router_xm_ltable_ht_params);
258 if (err)
259 goto err_insert;
260
261 return ltable_node;
262
263err_insert:
264 kfree(ltable_node);
265 return ERR_PTR(err);
266}
267
268static void mlxsw_sp_router_xm_ltable_node_put(struct mlxsw_sp_router_xm *router_xm,
269 struct mlxsw_sp_router_xm_ltable_node *ltable_node)
270{
271 if (!refcount_dec_and_test(<able_node->refcnt))
272 return;
273 rhashtable_remove_fast(&router_xm->ltable_ht, <able_node->ht_node,
274 mlxsw_sp_router_xm_ltable_ht_params);
275 kfree(ltable_node);
276}
277
278static int mlxsw_sp_router_xm_ltable_lvalue_set(struct mlxsw_sp *mlxsw_sp,
279 struct mlxsw_sp_router_xm_ltable_node *ltable_node)
280{
281 char xrmt_pl[MLXSW_REG_XRMT_LEN];
282
283 mlxsw_reg_xrmt_pack(xrmt_pl, ltable_node->mindex, ltable_node->current_lvalue);
284 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(xrmt), xrmt_pl);
285}
286
287struct mlxsw_sp_router_xm_flush_node {
288 struct rhash_head ht_node;
289 struct list_head list;
290 struct mlxsw_sp_router_xm_flush_info flush_info;
291 struct delayed_work dw;
292 struct mlxsw_sp *mlxsw_sp;
293 unsigned long start_jiffies;
294 unsigned int reuses;
295 refcount_t refcnt;
296};
297
298static const struct rhashtable_params mlxsw_sp_router_xm_flush_ht_params = {
299 .key_offset = offsetof(struct mlxsw_sp_router_xm_flush_node, flush_info),
300 .head_offset = offsetof(struct mlxsw_sp_router_xm_flush_node, ht_node),
301 .key_len = sizeof(struct mlxsw_sp_router_xm_flush_info),
302 .automatic_shrinking = true,
303};
304
305static struct mlxsw_sp_router_xm_flush_node *
306mlxsw_sp_router_xm_cache_flush_node_create(struct mlxsw_sp *mlxsw_sp,
307 struct mlxsw_sp_router_xm_flush_info *flush_info)
308{
309 struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
310 struct mlxsw_sp_router_xm_flush_node *flush_node;
311 int err;
312
313 flush_node = kzalloc(sizeof(*flush_node), GFP_KERNEL);
314 if (!flush_node)
315 return ERR_PTR(-ENOMEM);
316
317 flush_node->flush_info = *flush_info;
318 err = rhashtable_insert_fast(&router_xm->flush_ht, &flush_node->ht_node,
319 mlxsw_sp_router_xm_flush_ht_params);
320 if (err) {
321 kfree(flush_node);
322 return ERR_PTR(err);
323 }
324 router_xm->flush_count++;
325 flush_node->mlxsw_sp = mlxsw_sp;
326 flush_node->start_jiffies = jiffies;
327 refcount_set(&flush_node->refcnt, 1);
328 return flush_node;
329}
330
331static void
332mlxsw_sp_router_xm_cache_flush_node_hold(struct mlxsw_sp_router_xm_flush_node *flush_node)
333{
334 if (!flush_node)
335 return;
336 refcount_inc(&flush_node->refcnt);
337}
338
339static void
340mlxsw_sp_router_xm_cache_flush_node_put(struct mlxsw_sp_router_xm_flush_node *flush_node)
341{
342 if (!flush_node || !refcount_dec_and_test(&flush_node->refcnt))
343 return;
344 kfree(flush_node);
345}
346
347static void
348mlxsw_sp_router_xm_cache_flush_node_destroy(struct mlxsw_sp *mlxsw_sp,
349 struct mlxsw_sp_router_xm_flush_node *flush_node)
350{
351 struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
352
353 router_xm->flush_count--;
354 rhashtable_remove_fast(&router_xm->flush_ht, &flush_node->ht_node,
355 mlxsw_sp_router_xm_flush_ht_params);
356 mlxsw_sp_router_xm_cache_flush_node_put(flush_node);
357}
358
359static u32 mlxsw_sp_router_xm_flush_mask4(u8 prefix_len)
360{
361 return GENMASK(31, 32 - prefix_len);
362}
363
364static unsigned char *mlxsw_sp_router_xm_flush_mask6(u8 prefix_len)
365{
366 static unsigned char mask[sizeof(struct in6_addr)];
367
368 memset(mask, 0, sizeof(mask));
369 memset(mask, 0xff, prefix_len / 8);
370 mask[prefix_len / 8] = GENMASK(8, 8 - prefix_len % 8);
371 return mask;
372}
373
374#define MLXSW_SP_ROUTER_XM_CACHE_PARALLEL_FLUSHES_LIMIT 15
375#define MLXSW_SP_ROUTER_XM_CACHE_FLUSH_ALL_MIN_REUSES 15
376#define MLXSW_SP_ROUTER_XM_CACHE_DELAY 50
377#define MLXSW_SP_ROUTER_XM_CACHE_MAX_WAIT (MLXSW_SP_ROUTER_XM_CACHE_DELAY * 10)
378
379static void mlxsw_sp_router_xm_cache_flush_work(struct work_struct *work)
380{
381 struct mlxsw_sp_router_xm_flush_info *flush_info;
382 struct mlxsw_sp_router_xm_flush_node *flush_node;
383 char rlcmld_pl[MLXSW_REG_RLCMLD_LEN];
384 enum mlxsw_reg_rlcmld_select select;
385 struct mlxsw_sp *mlxsw_sp;
386 u32 addr4;
387 int err;
388
389 flush_node = container_of(work, struct mlxsw_sp_router_xm_flush_node,
390 dw.work);
391 mlxsw_sp = flush_node->mlxsw_sp;
392 flush_info = &flush_node->flush_info;
393
394 if (flush_info->all) {
395 char rlpmce_pl[MLXSW_REG_RLPMCE_LEN];
396
397 mlxsw_reg_rlpmce_pack(rlpmce_pl, true, false);
398 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rlpmce),
399 rlpmce_pl);
400 if (err)
401 dev_err(mlxsw_sp->bus_info->dev, "Failed to flush XM cache\n");
402
403 if (flush_node->reuses <
404 MLXSW_SP_ROUTER_XM_CACHE_FLUSH_ALL_MIN_REUSES)
405
406 mlxsw_sp->router->xm->flush_all_mode = false;
407 goto out;
408 }
409
410 select = MLXSW_REG_RLCMLD_SELECT_M_AND_ML_ENTRIES;
411
412 switch (flush_info->proto) {
413 case MLXSW_SP_L3_PROTO_IPV4:
414 addr4 = *((u32 *) flush_info->addr);
415 addr4 &= mlxsw_sp_router_xm_flush_mask4(flush_info->prefix_len);
416
417
418
419
420
421 if (flush_info->prefix_len > MLXSW_SP_ROUTER_XM_M_VAL)
422 select = MLXSW_REG_RLCMLD_SELECT_ML_ENTRIES;
423
424 mlxsw_reg_rlcmld_pack4(rlcmld_pl, select,
425 flush_info->virtual_router, addr4,
426 mlxsw_sp_router_xm_flush_mask4(flush_info->prefix_len));
427 break;
428 case MLXSW_SP_L3_PROTO_IPV6:
429 mlxsw_reg_rlcmld_pack6(rlcmld_pl, select,
430 flush_info->virtual_router, flush_info->addr,
431 mlxsw_sp_router_xm_flush_mask6(flush_info->prefix_len));
432 break;
433 default:
434 WARN_ON(true);
435 goto out;
436 }
437 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rlcmld), rlcmld_pl);
438 if (err)
439 dev_err(mlxsw_sp->bus_info->dev, "Failed to flush XM cache\n");
440
441out:
442 mlxsw_sp_router_xm_cache_flush_node_destroy(mlxsw_sp, flush_node);
443}
444
445static bool
446mlxsw_sp_router_xm_cache_flush_may_cancel(struct mlxsw_sp_router_xm_flush_node *flush_node)
447{
448 unsigned long max_wait = usecs_to_jiffies(MLXSW_SP_ROUTER_XM_CACHE_MAX_WAIT);
449 unsigned long delay = usecs_to_jiffies(MLXSW_SP_ROUTER_XM_CACHE_DELAY);
450
451
452
453
454
455 if (time_is_before_jiffies(flush_node->start_jiffies + max_wait - delay) &&
456 cancel_delayed_work_sync(&flush_node->dw))
457 return true;
458 return false;
459}
460
461static int
462mlxsw_sp_router_xm_cache_flush_schedule(struct mlxsw_sp *mlxsw_sp,
463 struct mlxsw_sp_router_xm_flush_info *flush_info)
464{
465 unsigned long delay = usecs_to_jiffies(MLXSW_SP_ROUTER_XM_CACHE_DELAY);
466 struct mlxsw_sp_router_xm_flush_info flush_all_info = {.all = true};
467 struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
468 struct mlxsw_sp_router_xm_flush_node *flush_node;
469
470
471
472
473 if (router_xm->flush_count == MLXSW_SP_ROUTER_XM_CACHE_PARALLEL_FLUSHES_LIMIT)
474
475 router_xm->flush_all_mode = true;
476
477 if (router_xm->flush_all_mode)
478 flush_info = &flush_all_info;
479
480 rcu_read_lock();
481 flush_node = rhashtable_lookup_fast(&router_xm->flush_ht, flush_info,
482 mlxsw_sp_router_xm_flush_ht_params);
483
484
485
486 mlxsw_sp_router_xm_cache_flush_node_hold(flush_node);
487 rcu_read_unlock();
488
489 if (flush_node && mlxsw_sp_router_xm_cache_flush_may_cancel(flush_node)) {
490 flush_node->reuses++;
491 mlxsw_sp_router_xm_cache_flush_node_put(flush_node);
492
493
494
495
496
497 goto schedule_work;
498 } else {
499 mlxsw_sp_router_xm_cache_flush_node_put(flush_node);
500 }
501
502 flush_node = mlxsw_sp_router_xm_cache_flush_node_create(mlxsw_sp, flush_info);
503 if (IS_ERR(flush_node))
504 return PTR_ERR(flush_node);
505 INIT_DELAYED_WORK(&flush_node->dw, mlxsw_sp_router_xm_cache_flush_work);
506
507schedule_work:
508 mlxsw_core_schedule_dw(&flush_node->dw, delay);
509 return 0;
510}
511
512static int
513mlxsw_sp_router_xm_ml_entry_add(struct mlxsw_sp *mlxsw_sp,
514 struct mlxsw_sp_router_xm_fib_entry *fib_entry)
515{
516 struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
517 struct mlxsw_sp_router_xm_ltable_node *ltable_node;
518 u8 lvalue = fib_entry->lvalue;
519 int err;
520
521 ltable_node = mlxsw_sp_router_xm_ltable_node_get(router_xm,
522 fib_entry->mindex);
523 if (IS_ERR(ltable_node))
524 return PTR_ERR(ltable_node);
525 if (lvalue > ltable_node->current_lvalue) {
526
527 ltable_node->current_lvalue = lvalue;
528 err = mlxsw_sp_router_xm_ltable_lvalue_set(mlxsw_sp,
529 ltable_node);
530 if (err)
531 goto err_lvalue_set;
532
533
534
535
536
537
538 fib_entry->flush_info.prefix_len = MLXSW_SP_ROUTER_XM_M_VAL;
539 }
540
541 ltable_node->lvalue_ref[lvalue]++;
542 fib_entry->ltable_node = ltable_node;
543
544 return 0;
545
546err_lvalue_set:
547 mlxsw_sp_router_xm_ltable_node_put(router_xm, ltable_node);
548 return err;
549}
550
551static void
552mlxsw_sp_router_xm_ml_entry_del(struct mlxsw_sp *mlxsw_sp,
553 struct mlxsw_sp_router_xm_fib_entry *fib_entry)
554{
555 struct mlxsw_sp_router_xm_ltable_node *ltable_node =
556 fib_entry->ltable_node;
557 struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
558 u8 lvalue = fib_entry->lvalue;
559
560 ltable_node->lvalue_ref[lvalue]--;
561 if (lvalue == ltable_node->current_lvalue && lvalue &&
562 !ltable_node->lvalue_ref[lvalue]) {
563 u8 new_lvalue = lvalue - 1;
564
565
566 while (new_lvalue > 0 && !ltable_node->lvalue_ref[lvalue])
567 new_lvalue--;
568
569 ltable_node->current_lvalue = new_lvalue;
570 mlxsw_sp_router_xm_ltable_lvalue_set(mlxsw_sp, ltable_node);
571
572
573
574
575
576
577 fib_entry->flush_info.prefix_len = MLXSW_SP_ROUTER_XM_M_VAL;
578 }
579 mlxsw_sp_router_xm_ltable_node_put(router_xm, ltable_node);
580}
581
582static int
583mlxsw_sp_router_xm_ml_entries_add(struct mlxsw_sp *mlxsw_sp,
584 struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm)
585{
586 struct mlxsw_sp_router_xm_fib_entry *fib_entry;
587 int err;
588 int i;
589
590 for (i = 0; i < op_ctx_xm->entries_count; i++) {
591 fib_entry = op_ctx_xm->entries[i];
592 err = mlxsw_sp_router_xm_ml_entry_add(mlxsw_sp, fib_entry);
593 if (err)
594 goto rollback;
595 }
596 return 0;
597
598rollback:
599 for (i--; i >= 0; i--) {
600 fib_entry = op_ctx_xm->entries[i];
601 mlxsw_sp_router_xm_ml_entry_del(mlxsw_sp, fib_entry);
602 }
603 return err;
604}
605
606static void
607mlxsw_sp_router_xm_ml_entries_del(struct mlxsw_sp *mlxsw_sp,
608 struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm)
609{
610 struct mlxsw_sp_router_xm_fib_entry *fib_entry;
611 int i;
612
613 for (i = 0; i < op_ctx_xm->entries_count; i++) {
614 fib_entry = op_ctx_xm->entries[i];
615 mlxsw_sp_router_xm_ml_entry_del(mlxsw_sp, fib_entry);
616 }
617}
618
619static void
620mlxsw_sp_router_xm_ml_entries_cache_flush(struct mlxsw_sp *mlxsw_sp,
621 struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm)
622{
623 struct mlxsw_sp_router_xm_fib_entry *fib_entry;
624 int err;
625 int i;
626
627 for (i = 0; i < op_ctx_xm->entries_count; i++) {
628 fib_entry = op_ctx_xm->entries[i];
629 err = mlxsw_sp_router_xm_cache_flush_schedule(mlxsw_sp,
630 &fib_entry->flush_info);
631 if (err)
632 dev_err(mlxsw_sp->bus_info->dev, "Failed to flush XM cache\n");
633 }
634}
635
636static int mlxsw_sp_router_ll_xm_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
637 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
638 bool *postponed_for_bulk)
639{
640 struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv;
641 struct mlxsw_sp_router_xm_fib_entry *fib_entry;
642 u8 num_rec;
643 int err;
644 int i;
645
646 op_ctx_xm->trans_offset += op_ctx_xm->trans_item_len;
647 op_ctx_xm->entries_count++;
648
649
650
651
652
653
654 if (op_ctx->bulk_ok &&
655 op_ctx_xm->trans_offset + op_ctx_xm->trans_item_len <= MLXSW_REG_XMDR_TRANS_LEN) {
656 if (postponed_for_bulk)
657 *postponed_for_bulk = true;
658 return 0;
659 }
660
661 if (op_ctx->event == FIB_EVENT_ENTRY_REPLACE) {
662
663
664
665 err = mlxsw_sp_router_xm_ml_entries_add(mlxsw_sp, op_ctx_xm);
666 if (err)
667 goto out;
668 }
669
670 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(xmdr), op_ctx_xm->xmdr_pl);
671 if (err)
672 goto out;
673 num_rec = mlxsw_reg_xmdr_num_rec_get(op_ctx_xm->xmdr_pl);
674 if (num_rec > op_ctx_xm->entries_count) {
675 dev_err(mlxsw_sp->bus_info->dev, "Invalid XMDR number of records\n");
676 err = -EIO;
677 goto out;
678 }
679 for (i = 0; i < num_rec; i++) {
680 if (!mlxsw_reg_xmdr_reply_vect_get(op_ctx_xm->xmdr_pl, i)) {
681 dev_err(mlxsw_sp->bus_info->dev, "Command send over XMDR failed\n");
682 err = -EIO;
683 goto out;
684 } else {
685 fib_entry = op_ctx_xm->entries[i];
686 fib_entry->committed = true;
687 }
688 }
689
690 if (op_ctx->event == FIB_EVENT_ENTRY_DEL)
691
692
693
694 mlxsw_sp_router_xm_ml_entries_del(mlxsw_sp, op_ctx_xm);
695
696
697
698
699 mlxsw_sp_router_xm_ml_entries_cache_flush(mlxsw_sp, op_ctx_xm);
700
701out:
702
703 op_ctx->initialized = false;
704 return err;
705}
706
707static bool mlxsw_sp_router_ll_xm_fib_entry_is_committed(struct mlxsw_sp_fib_entry_priv *priv)
708{
709 struct mlxsw_sp_router_xm_fib_entry *fib_entry = (void *) priv->priv;
710
711 return fib_entry->committed;
712}
713
714const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_xm_ops = {
715 .init = mlxsw_sp_router_ll_xm_init,
716 .ralta_write = mlxsw_sp_router_ll_xm_ralta_write,
717 .ralst_write = mlxsw_sp_router_ll_xm_ralst_write,
718 .raltb_write = mlxsw_sp_router_ll_xm_raltb_write,
719 .fib_entry_op_ctx_size = sizeof(struct mlxsw_sp_fib_entry_op_ctx_xm),
720 .fib_entry_priv_size = sizeof(struct mlxsw_sp_router_xm_fib_entry),
721 .fib_entry_pack = mlxsw_sp_router_ll_xm_fib_entry_pack,
722 .fib_entry_act_remote_pack = mlxsw_sp_router_ll_xm_fib_entry_act_remote_pack,
723 .fib_entry_act_local_pack = mlxsw_sp_router_ll_xm_fib_entry_act_local_pack,
724 .fib_entry_act_ip2me_pack = mlxsw_sp_router_ll_xm_fib_entry_act_ip2me_pack,
725 .fib_entry_act_ip2me_tun_pack = mlxsw_sp_router_ll_xm_fib_entry_act_ip2me_tun_pack,
726 .fib_entry_commit = mlxsw_sp_router_ll_xm_fib_entry_commit,
727 .fib_entry_is_committed = mlxsw_sp_router_ll_xm_fib_entry_is_committed,
728};
729
730#define MLXSW_SP_ROUTER_XM_MINDEX_SIZE (64 * 1024)
731
732int mlxsw_sp_router_xm_init(struct mlxsw_sp *mlxsw_sp)
733{
734 struct mlxsw_sp_router_xm *router_xm;
735 char rxltm_pl[MLXSW_REG_RXLTM_LEN];
736 char xltq_pl[MLXSW_REG_XLTQ_LEN];
737 u32 mindex_size;
738 u16 device_id;
739 int err;
740
741 if (!mlxsw_sp->bus_info->xm_exists)
742 return 0;
743
744 router_xm = kzalloc(sizeof(*router_xm), GFP_KERNEL);
745 if (!router_xm)
746 return -ENOMEM;
747
748 mlxsw_reg_xltq_pack(xltq_pl);
749 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(xltq), xltq_pl);
750 if (err)
751 goto err_xltq_query;
752 mlxsw_reg_xltq_unpack(xltq_pl, &device_id, &router_xm->ipv4_supported,
753 &router_xm->ipv6_supported, &router_xm->entries_size, &mindex_size);
754
755 if (device_id != MLXSW_REG_XLTQ_XM_DEVICE_ID_XLT) {
756 dev_err(mlxsw_sp->bus_info->dev, "Invalid XM device id\n");
757 err = -EINVAL;
758 goto err_device_id_check;
759 }
760
761 if (mindex_size != MLXSW_SP_ROUTER_XM_MINDEX_SIZE) {
762 dev_err(mlxsw_sp->bus_info->dev, "Unexpected M-index size\n");
763 err = -EINVAL;
764 goto err_mindex_size_check;
765 }
766
767 mlxsw_reg_rxltm_pack(rxltm_pl, mlxsw_sp_router_xm_m_val[MLXSW_SP_L3_PROTO_IPV4],
768 mlxsw_sp_router_xm_m_val[MLXSW_SP_L3_PROTO_IPV6]);
769 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rxltm), rxltm_pl);
770 if (err)
771 goto err_rxltm_write;
772
773 err = rhashtable_init(&router_xm->ltable_ht, &mlxsw_sp_router_xm_ltable_ht_params);
774 if (err)
775 goto err_ltable_ht_init;
776
777 err = rhashtable_init(&router_xm->flush_ht, &mlxsw_sp_router_xm_flush_ht_params);
778 if (err)
779 goto err_flush_ht_init;
780
781 mlxsw_sp->router->xm = router_xm;
782 return 0;
783
784err_flush_ht_init:
785 rhashtable_destroy(&router_xm->ltable_ht);
786err_ltable_ht_init:
787err_rxltm_write:
788err_mindex_size_check:
789err_device_id_check:
790err_xltq_query:
791 kfree(router_xm);
792 return err;
793}
794
795void mlxsw_sp_router_xm_fini(struct mlxsw_sp *mlxsw_sp)
796{
797 struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
798
799 if (!mlxsw_sp->bus_info->xm_exists)
800 return;
801
802 rhashtable_destroy(&router_xm->flush_ht);
803 rhashtable_destroy(&router_xm->ltable_ht);
804 kfree(router_xm);
805}
806
807bool mlxsw_sp_router_xm_ipv4_is_supported(const struct mlxsw_sp *mlxsw_sp)
808{
809 struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
810
811 return router_xm && router_xm->ipv4_supported;
812}
813