linux/net/ipv6/esp6.c
<<
>>
Prefs
   1/*
   2 * Copyright (C)2002 USAGI/WIDE Project
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  17 *
  18 * Authors
  19 *
  20 *      Mitsuru KANDA @USAGI       : IPv6 Support
  21 *      Kazunori MIYAZAWA @USAGI   :
  22 *      Kunihiro Ishiguro <kunihiro@ipinfusion.com>
  23 *
  24 *      This file is derived from net/ipv4/esp.c
  25 */
  26
  27#define pr_fmt(fmt) "IPv6: " fmt
  28
  29#include <crypto/aead.h>
  30#include <crypto/authenc.h>
  31#include <linux/err.h>
  32#include <linux/module.h>
  33#include <net/ip.h>
  34#include <net/xfrm.h>
  35#include <net/esp.h>
  36#include <linux/scatterlist.h>
  37#include <linux/kernel.h>
  38#include <linux/pfkeyv2.h>
  39#include <linux/random.h>
  40#include <linux/slab.h>
  41#include <linux/spinlock.h>
  42#include <net/ip6_route.h>
  43#include <net/icmp.h>
  44#include <net/ipv6.h>
  45#include <net/protocol.h>
  46#include <linux/icmpv6.h>
  47
  48struct esp_skb_cb {
  49        struct xfrm_skb_cb xfrm;
  50        void *tmp;
  51};
  52
  53#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
  54
  55static u32 esp6_get_mtu(struct xfrm_state *x, int mtu);
  56
  57/*
  58 * Allocate an AEAD request structure with extra space for SG and IV.
  59 *
  60 * For alignment considerations the upper 32 bits of the sequence number are
  61 * placed at the front, if present. Followed by the IV, the request and finally
  62 * the SG list.
  63 *
  64 * TODO: Use spare space in skb for this where possible.
  65 */
  66static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
  67{
  68        unsigned int len;
  69
  70        len = seqihlen;
  71
  72        len += crypto_aead_ivsize(aead);
  73
  74        if (len) {
  75                len += crypto_aead_alignmask(aead) &
  76                       ~(crypto_tfm_ctx_alignment() - 1);
  77                len = ALIGN(len, crypto_tfm_ctx_alignment());
  78        }
  79
  80        len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead);
  81        len = ALIGN(len, __alignof__(struct scatterlist));
  82
  83        len += sizeof(struct scatterlist) * nfrags;
  84
  85        return kmalloc(len, GFP_ATOMIC);
  86}
  87
  88static inline __be32 *esp_tmp_seqhi(void *tmp)
  89{
  90        return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
  91}
  92
  93static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
  94{
  95        return crypto_aead_ivsize(aead) ?
  96               PTR_ALIGN((u8 *)tmp + seqhilen,
  97                         crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
  98}
  99
 100static inline struct aead_givcrypt_request *esp_tmp_givreq(
 101        struct crypto_aead *aead, u8 *iv)
 102{
 103        struct aead_givcrypt_request *req;
 104
 105        req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
 106                                crypto_tfm_ctx_alignment());
 107        aead_givcrypt_set_tfm(req, aead);
 108        return req;
 109}
 110
 111static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
 112{
 113        struct aead_request *req;
 114
 115        req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
 116                                crypto_tfm_ctx_alignment());
 117        aead_request_set_tfm(req, aead);
 118        return req;
 119}
 120
 121static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
 122                                             struct aead_request *req)
 123{
 124        return (void *)ALIGN((unsigned long)(req + 1) +
 125                             crypto_aead_reqsize(aead),
 126                             __alignof__(struct scatterlist));
 127}
 128
 129static inline struct scatterlist *esp_givreq_sg(
 130        struct crypto_aead *aead, struct aead_givcrypt_request *req)
 131{
 132        return (void *)ALIGN((unsigned long)(req + 1) +
 133                             crypto_aead_reqsize(aead),
 134                             __alignof__(struct scatterlist));
 135}
 136
 137static void esp_output_done(struct crypto_async_request *base, int err)
 138{
 139        struct sk_buff *skb = base->data;
 140
 141        kfree(ESP_SKB_CB(skb)->tmp);
 142        xfrm_output_resume(skb, err);
 143}
 144
 145static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
 146{
 147        int err;
 148        struct ip_esp_hdr *esph;
 149        struct crypto_aead *aead;
 150        struct aead_givcrypt_request *req;
 151        struct scatterlist *sg;
 152        struct scatterlist *asg;
 153        struct sk_buff *trailer;
 154        void *tmp;
 155        int blksize;
 156        int clen;
 157        int alen;
 158        int plen;
 159        int tfclen;
 160        int nfrags;
 161        int assoclen;
 162        int sglists;
 163        int seqhilen;
 164        u8 *iv;
 165        u8 *tail;
 166        __be32 *seqhi;
 167        struct esp_data *esp = x->data;
 168
 169        /* skb is pure payload to encrypt */
 170        aead = esp->aead;
 171        alen = crypto_aead_authsize(aead);
 172
 173        tfclen = 0;
 174        if (x->tfcpad) {
 175                struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
 176                u32 padto;
 177
 178                padto = min(x->tfcpad, esp6_get_mtu(x, dst->child_mtu_cached));
 179                if (skb->len < padto)
 180                        tfclen = padto - skb->len;
 181        }
 182        blksize = ALIGN(crypto_aead_blocksize(aead), 4);
 183        clen = ALIGN(skb->len + 2 + tfclen, blksize);
 184        if (esp->padlen)
 185                clen = ALIGN(clen, esp->padlen);
 186        plen = clen - skb->len - tfclen;
 187
 188        err = skb_cow_data(skb, tfclen + plen + alen, &trailer);
 189        if (err < 0)
 190                goto error;
 191        nfrags = err;
 192
 193        assoclen = sizeof(*esph);
 194        sglists = 1;
 195        seqhilen = 0;
 196
 197        if (x->props.flags & XFRM_STATE_ESN) {
 198                sglists += 2;
 199                seqhilen += sizeof(__be32);
 200                assoclen += seqhilen;
 201        }
 202
 203        tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
 204        if (!tmp) {
 205                err = -ENOMEM;
 206                goto error;
 207        }
 208
 209        seqhi = esp_tmp_seqhi(tmp);
 210        iv = esp_tmp_iv(aead, tmp, seqhilen);
 211        req = esp_tmp_givreq(aead, iv);
 212        asg = esp_givreq_sg(aead, req);
 213        sg = asg + sglists;
 214
 215        /* Fill padding... */
 216        tail = skb_tail_pointer(trailer);
 217        if (tfclen) {
 218                memset(tail, 0, tfclen);
 219                tail += tfclen;
 220        }
 221        do {
 222                int i;
 223                for (i = 0; i < plen - 2; i++)
 224                        tail[i] = i + 1;
 225        } while (0);
 226        tail[plen - 2] = plen - 2;
 227        tail[plen - 1] = *skb_mac_header(skb);
 228        pskb_put(skb, trailer, clen - skb->len + alen);
 229
 230        skb_push(skb, -skb_network_offset(skb));
 231        esph = ip_esp_hdr(skb);
 232        *skb_mac_header(skb) = IPPROTO_ESP;
 233
 234        esph->spi = x->id.spi;
 235        esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
 236
 237        sg_init_table(sg, nfrags);
 238        skb_to_sgvec(skb, sg,
 239                     esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
 240                     clen + alen);
 241
 242        if ((x->props.flags & XFRM_STATE_ESN)) {
 243                sg_init_table(asg, 3);
 244                sg_set_buf(asg, &esph->spi, sizeof(__be32));
 245                *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
 246                sg_set_buf(asg + 1, seqhi, seqhilen);
 247                sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32));
 248        } else
 249                sg_init_one(asg, esph, sizeof(*esph));
 250
 251        aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
 252        aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
 253        aead_givcrypt_set_assoc(req, asg, assoclen);
 254        aead_givcrypt_set_giv(req, esph->enc_data,
 255                              XFRM_SKB_CB(skb)->seq.output.low);
 256
 257        ESP_SKB_CB(skb)->tmp = tmp;
 258        err = crypto_aead_givencrypt(req);
 259        if (err == -EINPROGRESS)
 260                goto error;
 261
 262        if (err == -EBUSY)
 263                err = NET_XMIT_DROP;
 264
 265        kfree(tmp);
 266
 267error:
 268        return err;
 269}
 270
 271static int esp_input_done2(struct sk_buff *skb, int err)
 272{
 273        struct xfrm_state *x = xfrm_input_state(skb);
 274        struct esp_data *esp = x->data;
 275        struct crypto_aead *aead = esp->aead;
 276        int alen = crypto_aead_authsize(aead);
 277        int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
 278        int elen = skb->len - hlen;
 279        int hdr_len = skb_network_header_len(skb);
 280        int padlen;
 281        u8 nexthdr[2];
 282
 283        kfree(ESP_SKB_CB(skb)->tmp);
 284
 285        if (unlikely(err))
 286                goto out;
 287
 288        if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2))
 289                BUG();
 290
 291        err = -EINVAL;
 292        padlen = nexthdr[0];
 293        if (padlen + 2 + alen >= elen) {
 294                LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage "
 295                               "padlen=%d, elen=%d\n", padlen + 2, elen - alen);
 296                goto out;
 297        }
 298
 299        /* ... check padding bits here. Silly. :-) */
 300
 301        pskb_trim(skb, skb->len - alen - padlen - 2);
 302        __skb_pull(skb, hlen);
 303        skb_set_transport_header(skb, -hdr_len);
 304
 305        err = nexthdr[1];
 306
 307        /* RFC4303: Drop dummy packets without any error */
 308        if (err == IPPROTO_NONE)
 309                err = -EINVAL;
 310
 311out:
 312        return err;
 313}
 314
 315static void esp_input_done(struct crypto_async_request *base, int err)
 316{
 317        struct sk_buff *skb = base->data;
 318
 319        xfrm_input_resume(skb, esp_input_done2(skb, err));
 320}
 321
 322static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
 323{
 324        struct ip_esp_hdr *esph;
 325        struct esp_data *esp = x->data;
 326        struct crypto_aead *aead = esp->aead;
 327        struct aead_request *req;
 328        struct sk_buff *trailer;
 329        int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
 330        int nfrags;
 331        int assoclen;
 332        int sglists;
 333        int seqhilen;
 334        int ret = 0;
 335        void *tmp;
 336        __be32 *seqhi;
 337        u8 *iv;
 338        struct scatterlist *sg;
 339        struct scatterlist *asg;
 340
 341        if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) {
 342                ret = -EINVAL;
 343                goto out;
 344        }
 345
 346        if (elen <= 0) {
 347                ret = -EINVAL;
 348                goto out;
 349        }
 350
 351        if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) {
 352                ret = -EINVAL;
 353                goto out;
 354        }
 355
 356        ret = -ENOMEM;
 357
 358        assoclen = sizeof(*esph);
 359        sglists = 1;
 360        seqhilen = 0;
 361
 362        if (x->props.flags & XFRM_STATE_ESN) {
 363                sglists += 2;
 364                seqhilen += sizeof(__be32);
 365                assoclen += seqhilen;
 366        }
 367
 368        tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
 369        if (!tmp)
 370                goto out;
 371
 372        ESP_SKB_CB(skb)->tmp = tmp;
 373        seqhi = esp_tmp_seqhi(tmp);
 374        iv = esp_tmp_iv(aead, tmp, seqhilen);
 375        req = esp_tmp_req(aead, iv);
 376        asg = esp_req_sg(aead, req);
 377        sg = asg + sglists;
 378
 379        skb->ip_summed = CHECKSUM_NONE;
 380
 381        esph = (struct ip_esp_hdr *)skb->data;
 382
 383        /* Get ivec. This can be wrong, check against another impls. */
 384        iv = esph->enc_data;
 385
 386        sg_init_table(sg, nfrags);
 387        skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen);
 388
 389        if ((x->props.flags & XFRM_STATE_ESN)) {
 390                sg_init_table(asg, 3);
 391                sg_set_buf(asg, &esph->spi, sizeof(__be32));
 392                *seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
 393                sg_set_buf(asg + 1, seqhi, seqhilen);
 394                sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32));
 395        } else
 396                sg_init_one(asg, esph, sizeof(*esph));
 397
 398        aead_request_set_callback(req, 0, esp_input_done, skb);
 399        aead_request_set_crypt(req, sg, sg, elen, iv);
 400        aead_request_set_assoc(req, asg, assoclen);
 401
 402        ret = crypto_aead_decrypt(req);
 403        if (ret == -EINPROGRESS)
 404                goto out;
 405
 406        ret = esp_input_done2(skb, ret);
 407
 408out:
 409        return ret;
 410}
 411
 412static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
 413{
 414        struct esp_data *esp = x->data;
 415        u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
 416        u32 align = max_t(u32, blksize, esp->padlen);
 417        unsigned int net_adj;
 418
 419        if (x->props.mode != XFRM_MODE_TUNNEL)
 420                net_adj = sizeof(struct ipv6hdr);
 421        else
 422                net_adj = 0;
 423
 424        return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
 425                 net_adj) & ~(align - 1)) + (net_adj - 2);
 426}
 427
 428static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 429                     u8 type, u8 code, int offset, __be32 info)
 430{
 431        struct net *net = dev_net(skb->dev);
 432        const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
 433        struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
 434        struct xfrm_state *x;
 435
 436        if (type != ICMPV6_DEST_UNREACH &&
 437            type != ICMPV6_PKT_TOOBIG &&
 438            type != NDISC_REDIRECT)
 439                return;
 440
 441        x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
 442                              esph->spi, IPPROTO_ESP, AF_INET6);
 443        if (!x)
 444                return;
 445
 446        if (type == NDISC_REDIRECT)
 447                ip6_redirect(skb, net, 0, 0);
 448        else
 449                ip6_update_pmtu(skb, net, info, 0, 0);
 450        xfrm_state_put(x);
 451}
 452
 453static void esp6_destroy(struct xfrm_state *x)
 454{
 455        struct esp_data *esp = x->data;
 456
 457        if (!esp)
 458                return;
 459
 460        crypto_free_aead(esp->aead);
 461        kfree(esp);
 462}
 463
 464static int esp_init_aead(struct xfrm_state *x)
 465{
 466        struct esp_data *esp = x->data;
 467        struct crypto_aead *aead;
 468        int err;
 469
 470        aead = crypto_alloc_aead(x->aead->alg_name, 0, 0);
 471        err = PTR_ERR(aead);
 472        if (IS_ERR(aead))
 473                goto error;
 474
 475        esp->aead = aead;
 476
 477        err = crypto_aead_setkey(aead, x->aead->alg_key,
 478                                 (x->aead->alg_key_len + 7) / 8);
 479        if (err)
 480                goto error;
 481
 482        err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
 483        if (err)
 484                goto error;
 485
 486error:
 487        return err;
 488}
 489
 490static int esp_init_authenc(struct xfrm_state *x)
 491{
 492        struct esp_data *esp = x->data;
 493        struct crypto_aead *aead;
 494        struct crypto_authenc_key_param *param;
 495        struct rtattr *rta;
 496        char *key;
 497        char *p;
 498        char authenc_name[CRYPTO_MAX_ALG_NAME];
 499        unsigned int keylen;
 500        int err;
 501
 502        err = -EINVAL;
 503        if (x->ealg == NULL)
 504                goto error;
 505
 506        err = -ENAMETOOLONG;
 507
 508        if ((x->props.flags & XFRM_STATE_ESN)) {
 509                if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
 510                             "authencesn(%s,%s)",
 511                             x->aalg ? x->aalg->alg_name : "digest_null",
 512                             x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
 513                        goto error;
 514        } else {
 515                if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
 516                             "authenc(%s,%s)",
 517                             x->aalg ? x->aalg->alg_name : "digest_null",
 518                             x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
 519                        goto error;
 520        }
 521
 522        aead = crypto_alloc_aead(authenc_name, 0, 0);
 523        err = PTR_ERR(aead);
 524        if (IS_ERR(aead))
 525                goto error;
 526
 527        esp->aead = aead;
 528
 529        keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
 530                 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
 531        err = -ENOMEM;
 532        key = kmalloc(keylen, GFP_KERNEL);
 533        if (!key)
 534                goto error;
 535
 536        p = key;
 537        rta = (void *)p;
 538        rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
 539        rta->rta_len = RTA_LENGTH(sizeof(*param));
 540        param = RTA_DATA(rta);
 541        p += RTA_SPACE(sizeof(*param));
 542
 543        if (x->aalg) {
 544                struct xfrm_algo_desc *aalg_desc;
 545
 546                memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
 547                p += (x->aalg->alg_key_len + 7) / 8;
 548
 549                aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
 550                BUG_ON(!aalg_desc);
 551
 552                err = -EINVAL;
 553                if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
 554                    crypto_aead_authsize(aead)) {
 555                        NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
 556                                 x->aalg->alg_name,
 557                                 crypto_aead_authsize(aead),
 558                                 aalg_desc->uinfo.auth.icv_fullbits/8);
 559                        goto free_key;
 560                }
 561
 562                err = crypto_aead_setauthsize(
 563                        aead, x->aalg->alg_trunc_len / 8);
 564                if (err)
 565                        goto free_key;
 566        }
 567
 568        param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
 569        memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
 570
 571        err = crypto_aead_setkey(aead, key, keylen);
 572
 573free_key:
 574        kfree(key);
 575
 576error:
 577        return err;
 578}
 579
 580static int esp6_init_state(struct xfrm_state *x)
 581{
 582        struct esp_data *esp;
 583        struct crypto_aead *aead;
 584        u32 align;
 585        int err;
 586
 587        if (x->encap)
 588                return -EINVAL;
 589
 590        esp = kzalloc(sizeof(*esp), GFP_KERNEL);
 591        if (esp == NULL)
 592                return -ENOMEM;
 593
 594        x->data = esp;
 595
 596        if (x->aead)
 597                err = esp_init_aead(x);
 598        else
 599                err = esp_init_authenc(x);
 600
 601        if (err)
 602                goto error;
 603
 604        aead = esp->aead;
 605
 606        esp->padlen = 0;
 607
 608        x->props.header_len = sizeof(struct ip_esp_hdr) +
 609                              crypto_aead_ivsize(aead);
 610        switch (x->props.mode) {
 611        case XFRM_MODE_BEET:
 612                if (x->sel.family != AF_INET6)
 613                        x->props.header_len += IPV4_BEET_PHMAXLEN +
 614                                               (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
 615                break;
 616        case XFRM_MODE_TRANSPORT:
 617                break;
 618        case XFRM_MODE_TUNNEL:
 619                x->props.header_len += sizeof(struct ipv6hdr);
 620                break;
 621        default:
 622                goto error;
 623        }
 624
 625        align = ALIGN(crypto_aead_blocksize(aead), 4);
 626        if (esp->padlen)
 627                align = max_t(u32, align, esp->padlen);
 628        x->props.trailer_len = align + 1 + crypto_aead_authsize(esp->aead);
 629
 630error:
 631        return err;
 632}
 633
 634static const struct xfrm_type esp6_type =
 635{
 636        .description    = "ESP6",
 637        .owner          = THIS_MODULE,
 638        .proto          = IPPROTO_ESP,
 639        .flags          = XFRM_TYPE_REPLAY_PROT,
 640        .init_state     = esp6_init_state,
 641        .destructor     = esp6_destroy,
 642        .get_mtu        = esp6_get_mtu,
 643        .input          = esp6_input,
 644        .output         = esp6_output,
 645        .hdr_offset     = xfrm6_find_1stfragopt,
 646};
 647
 648static const struct inet6_protocol esp6_protocol = {
 649        .handler        =       xfrm6_rcv,
 650        .err_handler    =       esp6_err,
 651        .flags          =       INET6_PROTO_NOPOLICY,
 652};
 653
 654static int __init esp6_init(void)
 655{
 656        if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
 657                pr_info("%s: can't add xfrm type\n", __func__);
 658                return -EAGAIN;
 659        }
 660        if (inet6_add_protocol(&esp6_protocol, IPPROTO_ESP) < 0) {
 661                pr_info("%s: can't add protocol\n", __func__);
 662                xfrm_unregister_type(&esp6_type, AF_INET6);
 663                return -EAGAIN;
 664        }
 665
 666        return 0;
 667}
 668
 669static void __exit esp6_fini(void)
 670{
 671        if (inet6_del_protocol(&esp6_protocol, IPPROTO_ESP) < 0)
 672                pr_info("%s: can't remove protocol\n", __func__);
 673        if (xfrm_unregister_type(&esp6_type, AF_INET6) < 0)
 674                pr_info("%s: can't remove xfrm type\n", __func__);
 675}
 676
 677module_init(esp6_init);
 678module_exit(esp6_fini);
 679
 680MODULE_LICENSE("GPL");
 681MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);
 682
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.