linux/net/mac80211/mesh_pathtbl.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2008 open80211s Ltd.
   3 * Author:     Luis Carlos Cobo <luisca@cozybit.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License version 2 as
   7 * published by the Free Software Foundation.
   8 */
   9
  10#include <linux/etherdevice.h>
  11#include <linux/list.h>
  12#include <linux/random.h>
  13#include <linux/spinlock.h>
  14#include <linux/string.h>
  15#include <net/mac80211.h>
  16#include "ieee80211_i.h"
  17#include "mesh.h"
  18
  19/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
  20#define INIT_PATHS_SIZE_ORDER   2
  21
  22/* Keep the mean chain length below this constant */
  23#define MEAN_CHAIN_LEN          2
  24
  25#define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
  26                                time_after(jiffies, mpath->exp_time) && \
  27                                !(mpath->flags & MESH_PATH_FIXED))
  28
  29struct mpath_node {
  30        struct hlist_node list;
  31        struct rcu_head rcu;
  32        /* This indirection allows two different tables to point to the same
  33         * mesh_path structure, useful when resizing
  34         */
  35        struct mesh_path *mpath;
  36};
  37
  38static struct mesh_table *mesh_paths;
  39static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
  40
  41/* This lock will have the grow table function as writer and add / delete nodes
  42 * as readers. When reading the table (i.e. doing lookups) we are well protected
  43 * by RCU
  44 */
  45static DEFINE_RWLOCK(pathtbl_resize_lock);
  46
  47/**
  48 *
  49 * mesh_path_assign_nexthop - update mesh path next hop
  50 *
  51 * @mpath: mesh path to update
  52 * @sta: next hop to assign
  53 *
  54 * Locking: mpath->state_lock must be held when calling this function
  55 */
  56void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
  57{
  58        rcu_assign_pointer(mpath->next_hop, sta);
  59}
  60
  61
  62/**
  63 * mesh_path_lookup - look up a path in the mesh path table
  64 * @dst: hardware address (ETH_ALEN length) of destination
  65 * @sdata: local subif
  66 *
  67 * Returns: pointer to the mesh path structure, or NULL if not found
  68 *
  69 * Locking: must be called within a read rcu section.
  70 */
  71struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
  72{
  73        struct mesh_path *mpath;
  74        struct hlist_node *n;
  75        struct hlist_head *bucket;
  76        struct mesh_table *tbl;
  77        struct mpath_node *node;
  78
  79        tbl = rcu_dereference(mesh_paths);
  80
  81        bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
  82        hlist_for_each_entry_rcu(node, n, bucket, list) {
  83                mpath = node->mpath;
  84                if (mpath->sdata == sdata &&
  85                                memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
  86                        if (MPATH_EXPIRED(mpath)) {
  87                                spin_lock_bh(&mpath->state_lock);
  88                                if (MPATH_EXPIRED(mpath))
  89                                        mpath->flags &= ~MESH_PATH_ACTIVE;
  90                                spin_unlock_bh(&mpath->state_lock);
  91                        }
  92                        return mpath;
  93                }
  94        }
  95        return NULL;
  96}
  97
  98struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
  99{
 100        struct mesh_path *mpath;
 101        struct hlist_node *n;
 102        struct hlist_head *bucket;
 103        struct mesh_table *tbl;
 104        struct mpath_node *node;
 105
 106        tbl = rcu_dereference(mpp_paths);
 107
 108        bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
 109        hlist_for_each_entry_rcu(node, n, bucket, list) {
 110                mpath = node->mpath;
 111                if (mpath->sdata == sdata &&
 112                    memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
 113                        if (MPATH_EXPIRED(mpath)) {
 114                                spin_lock_bh(&mpath->state_lock);
 115                                if (MPATH_EXPIRED(mpath))
 116                                        mpath->flags &= ~MESH_PATH_ACTIVE;
 117                                spin_unlock_bh(&mpath->state_lock);
 118                        }
 119                        return mpath;
 120                }
 121        }
 122        return NULL;
 123}
 124
 125
 126/**
 127 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
 128 * @idx: index
 129 * @sdata: local subif, or NULL for all entries
 130 *
 131 * Returns: pointer to the mesh path structure, or NULL if not found.
 132 *
 133 * Locking: must be called within a read rcu section.
 134 */
 135struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
 136{
 137        struct mpath_node *node;
 138        struct hlist_node *p;
 139        int i;
 140        int j = 0;
 141
 142        for_each_mesh_entry(mesh_paths, p, node, i) {
 143                if (sdata && node->mpath->sdata != sdata)
 144                        continue;
 145                if (j++ == idx) {
 146                        if (MPATH_EXPIRED(node->mpath)) {
 147                                spin_lock_bh(&node->mpath->state_lock);
 148                                if (MPATH_EXPIRED(node->mpath))
 149                                        node->mpath->flags &= ~MESH_PATH_ACTIVE;
 150                                spin_unlock_bh(&node->mpath->state_lock);
 151                        }
 152                        return node->mpath;
 153                }
 154        }
 155
 156        return NULL;
 157}
 158
 159/**
 160 * mesh_path_add - allocate and add a new path to the mesh path table
 161 * @addr: destination address of the path (ETH_ALEN length)
 162 * @sdata: local subif
 163 *
 164 * Returns: 0 on sucess
 165 *
 166 * State: the initial state of the new path is set to 0
 167 */
 168int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
 169{
 170        struct mesh_path *mpath, *new_mpath;
 171        struct mpath_node *node, *new_node;
 172        struct hlist_head *bucket;
 173        struct hlist_node *n;
 174        int grow = 0;
 175        int err = 0;
 176        u32 hash_idx;
 177
 178        if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
 179                /* never add ourselves as neighbours */
 180                return -ENOTSUPP;
 181
 182        if (is_multicast_ether_addr(dst))
 183                return -ENOTSUPP;
 184
 185        if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
 186                return -ENOSPC;
 187
 188        err = -ENOMEM;
 189        new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL);
 190        if (!new_mpath)
 191                goto err_path_alloc;
 192
 193        new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
 194        if (!new_node)
 195                goto err_node_alloc;
 196
 197        read_lock(&pathtbl_resize_lock);
 198        memcpy(new_mpath->dst, dst, ETH_ALEN);
 199        new_mpath->sdata = sdata;
 200        new_mpath->flags = 0;
 201        skb_queue_head_init(&new_mpath->frame_queue);
 202        new_node->mpath = new_mpath;
 203        new_mpath->timer.data = (unsigned long) new_mpath;
 204        new_mpath->timer.function = mesh_path_timer;
 205        new_mpath->exp_time = jiffies;
 206        spin_lock_init(&new_mpath->state_lock);
 207        init_timer(&new_mpath->timer);
 208
 209        hash_idx = mesh_table_hash(dst, sdata, mesh_paths);
 210        bucket = &mesh_paths->hash_buckets[hash_idx];
 211
 212        spin_lock(&mesh_paths->hashwlock[hash_idx]);
 213
 214        err = -EEXIST;
 215        hlist_for_each_entry(node, n, bucket, list) {
 216                mpath = node->mpath;
 217                if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
 218                        goto err_exists;
 219        }
 220
 221        hlist_add_head_rcu(&new_node->list, bucket);
 222        if (atomic_inc_return(&mesh_paths->entries) >=
 223                mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1))
 224                grow = 1;
 225
 226        spin_unlock(&mesh_paths->hashwlock[hash_idx]);
 227        read_unlock(&pathtbl_resize_lock);
 228        if (grow) {
 229                struct mesh_table *oldtbl, *newtbl;
 230
 231                write_lock(&pathtbl_resize_lock);
 232                oldtbl = mesh_paths;
 233                newtbl = mesh_table_grow(mesh_paths);
 234                if (!newtbl) {
 235                        write_unlock(&pathtbl_resize_lock);
 236                        return 0;
 237                }
 238                rcu_assign_pointer(mesh_paths, newtbl);
 239                write_unlock(&pathtbl_resize_lock);
 240
 241                synchronize_rcu();
 242                mesh_table_free(oldtbl, false);
 243        }
 244        return 0;
 245
 246err_exists:
 247        spin_unlock(&mesh_paths->hashwlock[hash_idx]);
 248        read_unlock(&pathtbl_resize_lock);
 249        kfree(new_node);
 250err_node_alloc:
 251        kfree(new_mpath);
 252err_path_alloc:
 253        atomic_dec(&sdata->u.mesh.mpaths);
 254        return err;
 255}
 256
 257
 258int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
 259{
 260        struct mesh_path *mpath, *new_mpath;
 261        struct mpath_node *node, *new_node;
 262        struct hlist_head *bucket;
 263        struct hlist_node *n;
 264        int grow = 0;
 265        int err = 0;
 266        u32 hash_idx;
 267
 268
 269        if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
 270                /* never add ourselves as neighbours */
 271                return -ENOTSUPP;
 272
 273        if (is_multicast_ether_addr(dst))
 274                return -ENOTSUPP;
 275
 276        err = -ENOMEM;
 277        new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL);
 278        if (!new_mpath)
 279                goto err_path_alloc;
 280
 281        new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
 282        if (!new_node)
 283                goto err_node_alloc;
 284
 285        read_lock(&pathtbl_resize_lock);
 286        memcpy(new_mpath->dst, dst, ETH_ALEN);
 287        memcpy(new_mpath->mpp, mpp, ETH_ALEN);
 288        new_mpath->sdata = sdata;
 289        new_mpath->flags = 0;
 290        skb_queue_head_init(&new_mpath->frame_queue);
 291        new_node->mpath = new_mpath;
 292        new_mpath->exp_time = jiffies;
 293        spin_lock_init(&new_mpath->state_lock);
 294
 295        hash_idx = mesh_table_hash(dst, sdata, mpp_paths);
 296        bucket = &mpp_paths->hash_buckets[hash_idx];
 297
 298        spin_lock(&mpp_paths->hashwlock[hash_idx]);
 299
 300        err = -EEXIST;
 301        hlist_for_each_entry(node, n, bucket, list) {
 302                mpath = node->mpath;
 303                if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
 304                        goto err_exists;
 305        }
 306
 307        hlist_add_head_rcu(&new_node->list, bucket);
 308        if (atomic_inc_return(&mpp_paths->entries) >=
 309                mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1))
 310                grow = 1;
 311
 312        spin_unlock(&mpp_paths->hashwlock[hash_idx]);
 313        read_unlock(&pathtbl_resize_lock);
 314        if (grow) {
 315                struct mesh_table *oldtbl, *newtbl;
 316
 317                write_lock(&pathtbl_resize_lock);
 318                oldtbl = mpp_paths;
 319                newtbl = mesh_table_grow(mpp_paths);
 320                if (!newtbl) {
 321                        write_unlock(&pathtbl_resize_lock);
 322                        return 0;
 323                }
 324                rcu_assign_pointer(mpp_paths, newtbl);
 325                write_unlock(&pathtbl_resize_lock);
 326
 327                synchronize_rcu();
 328                mesh_table_free(oldtbl, false);
 329        }
 330        return 0;
 331
 332err_exists:
 333        spin_unlock(&mpp_paths->hashwlock[hash_idx]);
 334        read_unlock(&pathtbl_resize_lock);
 335        kfree(new_node);
 336err_node_alloc:
 337        kfree(new_mpath);
 338err_path_alloc:
 339        return err;
 340}
 341
 342
 343/**
 344 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
 345 *
 346 * @sta: broken peer link
 347 *
 348 * This function must be called from the rate control algorithm if enough
 349 * delivery errors suggest that a peer link is no longer usable.
 350 */
 351void mesh_plink_broken(struct sta_info *sta)
 352{
 353        struct mesh_path *mpath;
 354        struct mpath_node *node;
 355        struct hlist_node *p;
 356        struct ieee80211_sub_if_data *sdata = sta->sdata;
 357        int i;
 358
 359        rcu_read_lock();
 360        for_each_mesh_entry(mesh_paths, p, node, i) {
 361                mpath = node->mpath;
 362                spin_lock_bh(&mpath->state_lock);
 363                if (mpath->next_hop == sta &&
 364                    mpath->flags & MESH_PATH_ACTIVE &&
 365                    !(mpath->flags & MESH_PATH_FIXED)) {
 366                        mpath->flags &= ~MESH_PATH_ACTIVE;
 367                        ++mpath->dsn;
 368                        spin_unlock_bh(&mpath->state_lock);
 369                        mesh_path_error_tx(mpath->dst,
 370                                        cpu_to_le32(mpath->dsn),
 371                                        sdata->dev->broadcast, sdata);
 372                } else
 373                spin_unlock_bh(&mpath->state_lock);
 374        }
 375        rcu_read_unlock();
 376}
 377
 378/**
 379 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
 380 *
 381 * @sta - mesh peer to match
 382 *
 383 * RCU notes: this function is called when a mesh plink transitions from
 384 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
 385 * allows path creation. This will happen before the sta can be freed (because
 386 * sta_info_destroy() calls this) so any reader in a rcu read block will be
 387 * protected against the plink disappearing.
 388 */
 389void mesh_path_flush_by_nexthop(struct sta_info *sta)
 390{
 391        struct mesh_path *mpath;
 392        struct mpath_node *node;
 393        struct hlist_node *p;
 394        int i;
 395
 396        for_each_mesh_entry(mesh_paths, p, node, i) {
 397                mpath = node->mpath;
 398                if (mpath->next_hop == sta)
 399                        mesh_path_del(mpath->dst, mpath->sdata);
 400        }
 401}
 402
 403void mesh_path_flush(struct ieee80211_sub_if_data *sdata)
 404{
 405        struct mesh_path *mpath;
 406        struct mpath_node *node;
 407        struct hlist_node *p;
 408        int i;
 409
 410        for_each_mesh_entry(mesh_paths, p, node, i) {
 411                mpath = node->mpath;
 412                if (mpath->sdata == sdata)
 413                        mesh_path_del(mpath->dst, mpath->sdata);
 414        }
 415}
 416
 417static void mesh_path_node_reclaim(struct rcu_head *rp)
 418{
 419        struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
 420        struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
 421
 422        del_timer_sync(&node->mpath->timer);
 423        atomic_dec(&sdata->u.mesh.mpaths);
 424        kfree(node->mpath);
 425        kfree(node);
 426}
 427
 428/**
 429 * mesh_path_del - delete a mesh path from the table
 430 *
 431 * @addr: dst address (ETH_ALEN length)
 432 * @sdata: local subif
 433 *
 434 * Returns: 0 if succesful
 435 */
 436int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
 437{
 438        struct mesh_path *mpath;
 439        struct mpath_node *node;
 440        struct hlist_head *bucket;
 441        struct hlist_node *n;
 442        int hash_idx;
 443        int err = 0;
 444
 445        read_lock(&pathtbl_resize_lock);
 446        hash_idx = mesh_table_hash(addr, sdata, mesh_paths);
 447        bucket = &mesh_paths->hash_buckets[hash_idx];
 448
 449        spin_lock(&mesh_paths->hashwlock[hash_idx]);
 450        hlist_for_each_entry(node, n, bucket, list) {
 451                mpath = node->mpath;
 452                if (mpath->sdata == sdata &&
 453                                memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
 454                        spin_lock_bh(&mpath->state_lock);
 455                        mpath->flags |= MESH_PATH_RESOLVING;
 456                        hlist_del_rcu(&node->list);
 457                        call_rcu(&node->rcu, mesh_path_node_reclaim);
 458                        atomic_dec(&mesh_paths->entries);
 459                        spin_unlock_bh(&mpath->state_lock);
 460                        goto enddel;
 461                }
 462        }
 463
 464        err = -ENXIO;
 465enddel:
 466        spin_unlock(&mesh_paths->hashwlock[hash_idx]);
 467        read_unlock(&pathtbl_resize_lock);
 468        return err;
 469}
 470
 471/**
 472 * mesh_path_tx_pending - sends pending frames in a mesh path queue
 473 *
 474 * @mpath: mesh path to activate
 475 *
 476 * Locking: the state_lock of the mpath structure must NOT be held when calling
 477 * this function.
 478 */
 479void mesh_path_tx_pending(struct mesh_path *mpath)
 480{
 481        struct sk_buff *skb;
 482
 483        while ((skb = skb_dequeue(&mpath->frame_queue)) &&
 484                        (mpath->flags & MESH_PATH_ACTIVE))
 485                dev_queue_xmit(skb);
 486}
 487
 488/**
 489 * mesh_path_discard_frame - discard a frame whose path could not be resolved
 490 *
 491 * @skb: frame to discard
 492 * @sdata: network subif the frame was to be sent through
 493 *
 494 * If the frame was beign forwarded from another MP, a PERR frame will be sent
 495 * to the precursor.
 496 *
 497 * Locking: the function must me called within a rcu_read_lock region
 498 */
 499void mesh_path_discard_frame(struct sk_buff *skb,
 500                             struct ieee80211_sub_if_data *sdata)
 501{
 502        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 503        struct mesh_path *mpath;
 504        u32 dsn = 0;
 505
 506        if (memcmp(hdr->addr4, sdata->dev->dev_addr, ETH_ALEN) != 0) {
 507                u8 *ra, *da;
 508
 509                da = hdr->addr3;
 510                ra = hdr->addr2;
 511                mpath = mesh_path_lookup(da, sdata);
 512                if (mpath)
 513                        dsn = ++mpath->dsn;
 514                mesh_path_error_tx(skb->data, cpu_to_le32(dsn), ra, sdata);
 515        }
 516
 517        kfree_skb(skb);
 518        sdata->u.mesh.mshstats.dropped_frames_no_route++;
 519}
 520
 521/**
 522 * mesh_path_flush_pending - free the pending queue of a mesh path
 523 *
 524 * @mpath: mesh path whose queue has to be freed
 525 *
 526 * Locking: the function must me called withing a rcu_read_lock region
 527 */
 528void mesh_path_flush_pending(struct mesh_path *mpath)
 529{
 530        struct sk_buff *skb;
 531
 532        while ((skb = skb_dequeue(&mpath->frame_queue)) &&
 533                        (mpath->flags & MESH_PATH_ACTIVE))
 534                mesh_path_discard_frame(skb, mpath->sdata);
 535}
 536
 537/**
 538 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
 539 *
 540 * @mpath: the mesh path to modify
 541 * @next_hop: the next hop to force
 542 *
 543 * Locking: this function must be called holding mpath->state_lock
 544 */
 545void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
 546{
 547        spin_lock_bh(&mpath->state_lock);
 548        mesh_path_assign_nexthop(mpath, next_hop);
 549        mpath->dsn = 0xffff;
 550        mpath->metric = 0;
 551        mpath->hop_count = 0;
 552        mpath->exp_time = 0;
 553        mpath->flags |= MESH_PATH_FIXED;
 554        mesh_path_activate(mpath);
 555        spin_unlock_bh(&mpath->state_lock);
 556        mesh_path_tx_pending(mpath);
 557}
 558
 559static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
 560{
 561        struct mesh_path *mpath;
 562        struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
 563        mpath = node->mpath;
 564        hlist_del_rcu(p);
 565        if (free_leafs)
 566                kfree(mpath);
 567        kfree(node);
 568}
 569
 570static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
 571{
 572        struct mesh_path *mpath;
 573        struct mpath_node *node, *new_node;
 574        u32 hash_idx;
 575
 576        new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
 577        if (new_node == NULL)
 578                return -ENOMEM;
 579
 580        node = hlist_entry(p, struct mpath_node, list);
 581        mpath = node->mpath;
 582        new_node->mpath = mpath;
 583        hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
 584        hlist_add_head(&new_node->list,
 585                        &newtbl->hash_buckets[hash_idx]);
 586        return 0;
 587}
 588
 589int mesh_pathtbl_init(void)
 590{
 591        mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
 592        if (!mesh_paths)
 593                return -ENOMEM;
 594        mesh_paths->free_node = &mesh_path_node_free;
 595        mesh_paths->copy_node = &mesh_path_node_copy;
 596        mesh_paths->mean_chain_len = MEAN_CHAIN_LEN;
 597
 598        mpp_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
 599        if (!mpp_paths) {
 600                mesh_table_free(mesh_paths, true);
 601                return -ENOMEM;
 602        }
 603        mpp_paths->free_node = &mesh_path_node_free;
 604        mpp_paths->copy_node = &mesh_path_node_copy;
 605        mpp_paths->mean_chain_len = MEAN_CHAIN_LEN;
 606
 607        return 0;
 608}
 609
 610void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
 611{
 612        struct mesh_path *mpath;
 613        struct mpath_node *node;
 614        struct hlist_node *p;
 615        int i;
 616
 617        read_lock(&pathtbl_resize_lock);
 618        for_each_mesh_entry(mesh_paths, p, node, i) {
 619                if (node->mpath->sdata != sdata)
 620                        continue;
 621                mpath = node->mpath;
 622                spin_lock_bh(&mpath->state_lock);
 623                if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
 624                    (!(mpath->flags & MESH_PATH_FIXED)) &&
 625                        time_after(jiffies,
 626                         mpath->exp_time + MESH_PATH_EXPIRE)) {
 627                        spin_unlock_bh(&mpath->state_lock);
 628                        mesh_path_del(mpath->dst, mpath->sdata);
 629                } else
 630                        spin_unlock_bh(&mpath->state_lock);
 631        }
 632        read_unlock(&pathtbl_resize_lock);
 633}
 634
 635void mesh_pathtbl_unregister(void)
 636{
 637        mesh_table_free(mesh_paths, true);
 638        mesh_table_free(mpp_paths, true);
 639}
 640