1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#include <linux/init.h>
40#include <linux/types.h>
41#include <linux/rcupdate.h>
42#include <linux/list.h>
43#include <linux/spinlock.h>
44#include <linux/string.h>
45#include <linux/jhash.h>
46#include <linux/audit.h>
47#include <linux/slab.h>
48#include <net/ip.h>
49#include <net/icmp.h>
50#include <net/tcp.h>
51#include <net/netlabel.h>
52#include <net/cipso_ipv4.h>
53#include <linux/atomic.h>
54#include <asm/bug.h>
55#include <asm/unaligned.h>
56
57
58
59
60
61
62static DEFINE_SPINLOCK(cipso_v4_doi_list_lock);
63static LIST_HEAD(cipso_v4_doi_list);
64
65
66int cipso_v4_cache_enabled = 1;
67int cipso_v4_cache_bucketsize = 10;
68#define CIPSO_V4_CACHE_BUCKETBITS 7
69#define CIPSO_V4_CACHE_BUCKETS (1 << CIPSO_V4_CACHE_BUCKETBITS)
70#define CIPSO_V4_CACHE_REORDERLIMIT 10
71struct cipso_v4_map_cache_bkt {
72 spinlock_t lock;
73 u32 size;
74 struct list_head list;
75};
76struct cipso_v4_map_cache_entry {
77 u32 hash;
78 unsigned char *key;
79 size_t key_len;
80
81 struct netlbl_lsm_cache *lsm_data;
82
83 u32 activity;
84 struct list_head list;
85};
86static struct cipso_v4_map_cache_bkt *cipso_v4_cache = NULL;
87
88
89int cipso_v4_rbm_optfmt = 0;
90int cipso_v4_rbm_strictvalid = 1;
91
92
93
94
95
96
97
98#define CIPSO_V4_OPT_LEN_MAX 40
99
100
101
102#define CIPSO_V4_HDR_LEN 6
103
104
105#define CIPSO_V4_TAG_RBM_BLEN 4
106
107
108#define CIPSO_V4_TAG_ENUM_BLEN 4
109
110
111#define CIPSO_V4_TAG_RNG_BLEN 4
112
113
114
115
116
117#define CIPSO_V4_TAG_RNG_CAT_MAX 8
118
119
120
121
122
123
124
125
126
127
128
129
130#define CIPSO_V4_TAG_LOC_BLEN 6
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148static int cipso_v4_bitmap_walk(const unsigned char *bitmap,
149 u32 bitmap_len,
150 u32 offset,
151 u8 state)
152{
153 u32 bit_spot;
154 u32 byte_offset;
155 unsigned char bitmask;
156 unsigned char byte;
157
158
159 byte_offset = offset / 8;
160 byte = bitmap[byte_offset];
161 bit_spot = offset;
162 bitmask = 0x80 >> (offset % 8);
163
164 while (bit_spot < bitmap_len) {
165 if ((state && (byte & bitmask) == bitmask) ||
166 (state == 0 && (byte & bitmask) == 0))
167 return bit_spot;
168
169 bit_spot++;
170 bitmask >>= 1;
171 if (bitmask == 0) {
172 byte = bitmap[++byte_offset];
173 bitmask = 0x80;
174 }
175 }
176
177 return -1;
178}
179
180
181
182
183
184
185
186
187
188
189
190static void cipso_v4_bitmap_setbit(unsigned char *bitmap,
191 u32 bit,
192 u8 state)
193{
194 u32 byte_spot;
195 u8 bitmask;
196
197
198 byte_spot = bit / 8;
199 bitmask = 0x80 >> (bit % 8);
200 if (state)
201 bitmap[byte_spot] |= bitmask;
202 else
203 bitmap[byte_spot] &= ~bitmask;
204}
205
206
207
208
209
210
211
212
213
214
215static void cipso_v4_cache_entry_free(struct cipso_v4_map_cache_entry *entry)
216{
217 if (entry->lsm_data)
218 netlbl_secattr_cache_free(entry->lsm_data);
219 kfree(entry->key);
220 kfree(entry);
221}
222
223
224
225
226
227
228
229
230
231
232static u32 cipso_v4_map_cache_hash(const unsigned char *key, u32 key_len)
233{
234 return jhash(key, key_len, 0);
235}
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250static int cipso_v4_cache_init(void)
251{
252 u32 iter;
253
254 cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS,
255 sizeof(struct cipso_v4_map_cache_bkt),
256 GFP_KERNEL);
257 if (cipso_v4_cache == NULL)
258 return -ENOMEM;
259
260 for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) {
261 spin_lock_init(&cipso_v4_cache[iter].lock);
262 cipso_v4_cache[iter].size = 0;
263 INIT_LIST_HEAD(&cipso_v4_cache[iter].list);
264 }
265
266 return 0;
267}
268
269
270
271
272
273
274
275
276
277void cipso_v4_cache_invalidate(void)
278{
279 struct cipso_v4_map_cache_entry *entry, *tmp_entry;
280 u32 iter;
281
282 for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) {
283 spin_lock_bh(&cipso_v4_cache[iter].lock);
284 list_for_each_entry_safe(entry,
285 tmp_entry,
286 &cipso_v4_cache[iter].list, list) {
287 list_del(&entry->list);
288 cipso_v4_cache_entry_free(entry);
289 }
290 cipso_v4_cache[iter].size = 0;
291 spin_unlock_bh(&cipso_v4_cache[iter].lock);
292 }
293}
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317static int cipso_v4_cache_check(const unsigned char *key,
318 u32 key_len,
319 struct netlbl_lsm_secattr *secattr)
320{
321 u32 bkt;
322 struct cipso_v4_map_cache_entry *entry;
323 struct cipso_v4_map_cache_entry *prev_entry = NULL;
324 u32 hash;
325
326 if (!cipso_v4_cache_enabled)
327 return -ENOENT;
328
329 hash = cipso_v4_map_cache_hash(key, key_len);
330 bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1);
331 spin_lock_bh(&cipso_v4_cache[bkt].lock);
332 list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) {
333 if (entry->hash == hash &&
334 entry->key_len == key_len &&
335 memcmp(entry->key, key, key_len) == 0) {
336 entry->activity += 1;
337 atomic_inc(&entry->lsm_data->refcount);
338 secattr->cache = entry->lsm_data;
339 secattr->flags |= NETLBL_SECATTR_CACHE;
340 secattr->type = NETLBL_NLTYPE_CIPSOV4;
341 if (prev_entry == NULL) {
342 spin_unlock_bh(&cipso_v4_cache[bkt].lock);
343 return 0;
344 }
345
346 if (prev_entry->activity > 0)
347 prev_entry->activity -= 1;
348 if (entry->activity > prev_entry->activity &&
349 entry->activity - prev_entry->activity >
350 CIPSO_V4_CACHE_REORDERLIMIT) {
351 __list_del(entry->list.prev, entry->list.next);
352 __list_add(&entry->list,
353 prev_entry->list.prev,
354 &prev_entry->list);
355 }
356
357 spin_unlock_bh(&cipso_v4_cache[bkt].lock);
358 return 0;
359 }
360 prev_entry = entry;
361 }
362 spin_unlock_bh(&cipso_v4_cache[bkt].lock);
363
364 return -ENOENT;
365}
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380int cipso_v4_cache_add(const struct sk_buff *skb,
381 const struct netlbl_lsm_secattr *secattr)
382{
383 int ret_val = -EPERM;
384 u32 bkt;
385 struct cipso_v4_map_cache_entry *entry = NULL;
386 struct cipso_v4_map_cache_entry *old_entry = NULL;
387 unsigned char *cipso_ptr;
388 u32 cipso_ptr_len;
389
390 if (!cipso_v4_cache_enabled || cipso_v4_cache_bucketsize <= 0)
391 return 0;
392
393 cipso_ptr = CIPSO_V4_OPTPTR(skb);
394 cipso_ptr_len = cipso_ptr[1];
395
396 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
397 if (entry == NULL)
398 return -ENOMEM;
399 entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC);
400 if (entry->key == NULL) {
401 ret_val = -ENOMEM;
402 goto cache_add_failure;
403 }
404 entry->key_len = cipso_ptr_len;
405 entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len);
406 atomic_inc(&secattr->cache->refcount);
407 entry->lsm_data = secattr->cache;
408
409 bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1);
410 spin_lock_bh(&cipso_v4_cache[bkt].lock);
411 if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) {
412 list_add(&entry->list, &cipso_v4_cache[bkt].list);
413 cipso_v4_cache[bkt].size += 1;
414 } else {
415 old_entry = list_entry(cipso_v4_cache[bkt].list.prev,
416 struct cipso_v4_map_cache_entry, list);
417 list_del(&old_entry->list);
418 list_add(&entry->list, &cipso_v4_cache[bkt].list);
419 cipso_v4_cache_entry_free(old_entry);
420 }
421 spin_unlock_bh(&cipso_v4_cache[bkt].lock);
422
423 return 0;
424
425cache_add_failure:
426 if (entry)
427 cipso_v4_cache_entry_free(entry);
428 return ret_val;
429}
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi)
445{
446 struct cipso_v4_doi *iter;
447
448 list_for_each_entry_rcu(iter, &cipso_v4_doi_list, list)
449 if (iter->doi == doi && atomic_read(&iter->refcount))
450 return iter;
451 return NULL;
452}
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467int cipso_v4_doi_add(struct cipso_v4_doi *doi_def,
468 struct netlbl_audit *audit_info)
469{
470 int ret_val = -EINVAL;
471 u32 iter;
472 u32 doi;
473 u32 doi_type;
474 struct audit_buffer *audit_buf;
475
476 doi = doi_def->doi;
477 doi_type = doi_def->type;
478
479 if (doi_def->doi == CIPSO_V4_DOI_UNKNOWN)
480 goto doi_add_return;
481 for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) {
482 switch (doi_def->tags[iter]) {
483 case CIPSO_V4_TAG_RBITMAP:
484 break;
485 case CIPSO_V4_TAG_RANGE:
486 case CIPSO_V4_TAG_ENUM:
487 if (doi_def->type != CIPSO_V4_MAP_PASS)
488 goto doi_add_return;
489 break;
490 case CIPSO_V4_TAG_LOCAL:
491 if (doi_def->type != CIPSO_V4_MAP_LOCAL)
492 goto doi_add_return;
493 break;
494 case CIPSO_V4_TAG_INVALID:
495 if (iter == 0)
496 goto doi_add_return;
497 break;
498 default:
499 goto doi_add_return;
500 }
501 }
502
503 atomic_set(&doi_def->refcount, 1);
504
505 spin_lock(&cipso_v4_doi_list_lock);
506 if (cipso_v4_doi_search(doi_def->doi) != NULL) {
507 spin_unlock(&cipso_v4_doi_list_lock);
508 ret_val = -EEXIST;
509 goto doi_add_return;
510 }
511 list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list);
512 spin_unlock(&cipso_v4_doi_list_lock);
513 ret_val = 0;
514
515doi_add_return:
516 audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info);
517 if (audit_buf != NULL) {
518 const char *type_str;
519 switch (doi_type) {
520 case CIPSO_V4_MAP_TRANS:
521 type_str = "trans";
522 break;
523 case CIPSO_V4_MAP_PASS:
524 type_str = "pass";
525 break;
526 case CIPSO_V4_MAP_LOCAL:
527 type_str = "local";
528 break;
529 default:
530 type_str = "(unknown)";
531 }
532 audit_log_format(audit_buf,
533 " cipso_doi=%u cipso_type=%s res=%u",
534 doi, type_str, ret_val == 0 ? 1 : 0);
535 audit_log_end(audit_buf);
536 }
537
538 return ret_val;
539}
540
541
542
543
544
545
546
547
548
549void cipso_v4_doi_free(struct cipso_v4_doi *doi_def)
550{
551 if (doi_def == NULL)
552 return;
553
554 switch (doi_def->type) {
555 case CIPSO_V4_MAP_TRANS:
556 kfree(doi_def->map.std->lvl.cipso);
557 kfree(doi_def->map.std->lvl.local);
558 kfree(doi_def->map.std->cat.cipso);
559 kfree(doi_def->map.std->cat.local);
560 break;
561 }
562 kfree(doi_def);
563}
564
565
566
567
568
569
570
571
572
573
574
575static void cipso_v4_doi_free_rcu(struct rcu_head *entry)
576{
577 struct cipso_v4_doi *doi_def;
578
579 doi_def = container_of(entry, struct cipso_v4_doi, rcu);
580 cipso_v4_doi_free(doi_def);
581}
582
583
584
585
586
587
588
589
590
591
592
593
594int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info)
595{
596 int ret_val;
597 struct cipso_v4_doi *doi_def;
598 struct audit_buffer *audit_buf;
599
600 spin_lock(&cipso_v4_doi_list_lock);
601 doi_def = cipso_v4_doi_search(doi);
602 if (doi_def == NULL) {
603 spin_unlock(&cipso_v4_doi_list_lock);
604 ret_val = -ENOENT;
605 goto doi_remove_return;
606 }
607 if (!atomic_dec_and_test(&doi_def->refcount)) {
608 spin_unlock(&cipso_v4_doi_list_lock);
609 ret_val = -EBUSY;
610 goto doi_remove_return;
611 }
612 list_del_rcu(&doi_def->list);
613 spin_unlock(&cipso_v4_doi_list_lock);
614
615 cipso_v4_cache_invalidate();
616 call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu);
617 ret_val = 0;
618
619doi_remove_return:
620 audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info);
621 if (audit_buf != NULL) {
622 audit_log_format(audit_buf,
623 " cipso_doi=%u res=%u",
624 doi, ret_val == 0 ? 1 : 0);
625 audit_log_end(audit_buf);
626 }
627
628 return ret_val;
629}
630
631
632
633
634
635
636
637
638
639
640
641
642struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi)
643{
644 struct cipso_v4_doi *doi_def;
645
646 rcu_read_lock();
647 doi_def = cipso_v4_doi_search(doi);
648 if (doi_def == NULL)
649 goto doi_getdef_return;
650 if (!atomic_inc_not_zero(&doi_def->refcount))
651 doi_def = NULL;
652
653doi_getdef_return:
654 rcu_read_unlock();
655 return doi_def;
656}
657
658
659
660
661
662
663
664
665
666void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def)
667{
668 if (doi_def == NULL)
669 return;
670
671 if (!atomic_dec_and_test(&doi_def->refcount))
672 return;
673 spin_lock(&cipso_v4_doi_list_lock);
674 list_del_rcu(&doi_def->list);
675 spin_unlock(&cipso_v4_doi_list_lock);
676
677 cipso_v4_cache_invalidate();
678 call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu);
679}
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694int cipso_v4_doi_walk(u32 *skip_cnt,
695 int (*callback) (struct cipso_v4_doi *doi_def, void *arg),
696 void *cb_arg)
697{
698 int ret_val = -ENOENT;
699 u32 doi_cnt = 0;
700 struct cipso_v4_doi *iter_doi;
701
702 rcu_read_lock();
703 list_for_each_entry_rcu(iter_doi, &cipso_v4_doi_list, list)
704 if (atomic_read(&iter_doi->refcount) > 0) {
705 if (doi_cnt++ < *skip_cnt)
706 continue;
707 ret_val = callback(iter_doi, cb_arg);
708 if (ret_val < 0) {
709 doi_cnt--;
710 goto doi_walk_return;
711 }
712 }
713
714doi_walk_return:
715 rcu_read_unlock();
716 *skip_cnt = doi_cnt;
717 return ret_val;
718}
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735static int cipso_v4_map_lvl_valid(const struct cipso_v4_doi *doi_def, u8 level)
736{
737 switch (doi_def->type) {
738 case CIPSO_V4_MAP_PASS:
739 return 0;
740 case CIPSO_V4_MAP_TRANS:
741 if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL)
742 return 0;
743 break;
744 }
745
746 return -EFAULT;
747}
748
749
750
751
752
753
754
755
756
757
758
759
760
761static int cipso_v4_map_lvl_hton(const struct cipso_v4_doi *doi_def,
762 u32 host_lvl,
763 u32 *net_lvl)
764{
765 switch (doi_def->type) {
766 case CIPSO_V4_MAP_PASS:
767 *net_lvl = host_lvl;
768 return 0;
769 case CIPSO_V4_MAP_TRANS:
770 if (host_lvl < doi_def->map.std->lvl.local_size &&
771 doi_def->map.std->lvl.local[host_lvl] < CIPSO_V4_INV_LVL) {
772 *net_lvl = doi_def->map.std->lvl.local[host_lvl];
773 return 0;
774 }
775 return -EPERM;
776 }
777
778 return -EINVAL;
779}
780
781
782
783
784
785
786
787
788
789
790
791
792
793static int cipso_v4_map_lvl_ntoh(const struct cipso_v4_doi *doi_def,
794 u32 net_lvl,
795 u32 *host_lvl)
796{
797 struct cipso_v4_std_map_tbl *map_tbl;
798
799 switch (doi_def->type) {
800 case CIPSO_V4_MAP_PASS:
801 *host_lvl = net_lvl;
802 return 0;
803 case CIPSO_V4_MAP_TRANS:
804 map_tbl = doi_def->map.std;
805 if (net_lvl < map_tbl->lvl.cipso_size &&
806 map_tbl->lvl.cipso[net_lvl] < CIPSO_V4_INV_LVL) {
807 *host_lvl = doi_def->map.std->lvl.cipso[net_lvl];
808 return 0;
809 }
810 return -EPERM;
811 }
812
813 return -EINVAL;
814}
815
816
817
818
819
820
821
822
823
824
825
826
827
828static int cipso_v4_map_cat_rbm_valid(const struct cipso_v4_doi *doi_def,
829 const unsigned char *bitmap,
830 u32 bitmap_len)
831{
832 int cat = -1;
833 u32 bitmap_len_bits = bitmap_len * 8;
834 u32 cipso_cat_size;
835 u32 *cipso_array;
836
837 switch (doi_def->type) {
838 case CIPSO_V4_MAP_PASS:
839 return 0;
840 case CIPSO_V4_MAP_TRANS:
841 cipso_cat_size = doi_def->map.std->cat.cipso_size;
842 cipso_array = doi_def->map.std->cat.cipso;
843 for (;;) {
844 cat = cipso_v4_bitmap_walk(bitmap,
845 bitmap_len_bits,
846 cat + 1,
847 1);
848 if (cat < 0)
849 break;
850 if (cat >= cipso_cat_size ||
851 cipso_array[cat] >= CIPSO_V4_INV_CAT)
852 return -EFAULT;
853 }
854
855 if (cat == -1)
856 return 0;
857 break;
858 }
859
860 return -EFAULT;
861}
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876static int cipso_v4_map_cat_rbm_hton(const struct cipso_v4_doi *doi_def,
877 const struct netlbl_lsm_secattr *secattr,
878 unsigned char *net_cat,
879 u32 net_cat_len)
880{
881 int host_spot = -1;
882 u32 net_spot = CIPSO_V4_INV_CAT;
883 u32 net_spot_max = 0;
884 u32 net_clen_bits = net_cat_len * 8;
885 u32 host_cat_size = 0;
886 u32 *host_cat_array = NULL;
887
888 if (doi_def->type == CIPSO_V4_MAP_TRANS) {
889 host_cat_size = doi_def->map.std->cat.local_size;
890 host_cat_array = doi_def->map.std->cat.local;
891 }
892
893 for (;;) {
894 host_spot = netlbl_secattr_catmap_walk(secattr->attr.mls.cat,
895 host_spot + 1);
896 if (host_spot < 0)
897 break;
898
899 switch (doi_def->type) {
900 case CIPSO_V4_MAP_PASS:
901 net_spot = host_spot;
902 break;
903 case CIPSO_V4_MAP_TRANS:
904 if (host_spot >= host_cat_size)
905 return -EPERM;
906 net_spot = host_cat_array[host_spot];
907 if (net_spot >= CIPSO_V4_INV_CAT)
908 return -EPERM;
909 break;
910 }
911 if (net_spot >= net_clen_bits)
912 return -ENOSPC;
913 cipso_v4_bitmap_setbit(net_cat, net_spot, 1);
914
915 if (net_spot > net_spot_max)
916 net_spot_max = net_spot;
917 }
918
919 if (++net_spot_max % 8)
920 return net_spot_max / 8 + 1;
921 return net_spot_max / 8;
922}
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def,
938 const unsigned char *net_cat,
939 u32 net_cat_len,
940 struct netlbl_lsm_secattr *secattr)
941{
942 int ret_val;
943 int net_spot = -1;
944 u32 host_spot = CIPSO_V4_INV_CAT;
945 u32 net_clen_bits = net_cat_len * 8;
946 u32 net_cat_size = 0;
947 u32 *net_cat_array = NULL;
948
949 if (doi_def->type == CIPSO_V4_MAP_TRANS) {
950 net_cat_size = doi_def->map.std->cat.cipso_size;
951 net_cat_array = doi_def->map.std->cat.cipso;
952 }
953
954 for (;;) {
955 net_spot = cipso_v4_bitmap_walk(net_cat,
956 net_clen_bits,
957 net_spot + 1,
958 1);
959 if (net_spot < 0) {
960 if (net_spot == -2)
961 return -EFAULT;
962 return 0;
963 }
964
965 switch (doi_def->type) {
966 case CIPSO_V4_MAP_PASS:
967 host_spot = net_spot;
968 break;
969 case CIPSO_V4_MAP_TRANS:
970 if (net_spot >= net_cat_size)
971 return -EPERM;
972 host_spot = net_cat_array[net_spot];
973 if (host_spot >= CIPSO_V4_INV_CAT)
974 return -EPERM;
975 break;
976 }
977 ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat,
978 host_spot,
979 GFP_ATOMIC);
980 if (ret_val != 0)
981 return ret_val;
982 }
983
984 return -EINVAL;
985}
986
987
988
989
990
991
992
993
994
995
996
997
998
999static int cipso_v4_map_cat_enum_valid(const struct cipso_v4_doi *doi_def,
1000 const unsigned char *enumcat,
1001 u32 enumcat_len)
1002{
1003 u16 cat;
1004 int cat_prev = -1;
1005 u32 iter;
1006
1007 if (doi_def->type != CIPSO_V4_MAP_PASS || enumcat_len & 0x01)
1008 return -EFAULT;
1009
1010 for (iter = 0; iter < enumcat_len; iter += 2) {
1011 cat = get_unaligned_be16(&enumcat[iter]);
1012 if (cat <= cat_prev)
1013 return -EFAULT;
1014 cat_prev = cat;
1015 }
1016
1017 return 0;
1018}
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034static int cipso_v4_map_cat_enum_hton(const struct cipso_v4_doi *doi_def,
1035 const struct netlbl_lsm_secattr *secattr,
1036 unsigned char *net_cat,
1037 u32 net_cat_len)
1038{
1039 int cat = -1;
1040 u32 cat_iter = 0;
1041
1042 for (;;) {
1043 cat = netlbl_secattr_catmap_walk(secattr->attr.mls.cat,
1044 cat + 1);
1045 if (cat < 0)
1046 break;
1047 if ((cat_iter + 2) > net_cat_len)
1048 return -ENOSPC;
1049
1050 *((__be16 *)&net_cat[cat_iter]) = htons(cat);
1051 cat_iter += 2;
1052 }
1053
1054 return cat_iter;
1055}
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070static int cipso_v4_map_cat_enum_ntoh(const struct cipso_v4_doi *doi_def,
1071 const unsigned char *net_cat,
1072 u32 net_cat_len,
1073 struct netlbl_lsm_secattr *secattr)
1074{
1075 int ret_val;
1076 u32 iter;
1077
1078 for (iter = 0; iter < net_cat_len; iter += 2) {
1079 ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat,
1080 get_unaligned_be16(&net_cat[iter]),
1081 GFP_ATOMIC);
1082 if (ret_val != 0)
1083 return ret_val;
1084 }
1085
1086 return 0;
1087}
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101static int cipso_v4_map_cat_rng_valid(const struct cipso_v4_doi *doi_def,
1102 const unsigned char *rngcat,
1103 u32 rngcat_len)
1104{
1105 u16 cat_high;
1106 u16 cat_low;
1107 u32 cat_prev = CIPSO_V4_MAX_REM_CATS + 1;
1108 u32 iter;
1109
1110 if (doi_def->type != CIPSO_V4_MAP_PASS || rngcat_len & 0x01)
1111 return -EFAULT;
1112
1113 for (iter = 0; iter < rngcat_len; iter += 4) {
1114 cat_high = get_unaligned_be16(&rngcat[iter]);
1115 if ((iter + 4) <= rngcat_len)
1116 cat_low = get_unaligned_be16(&rngcat[iter + 2]);
1117 else
1118 cat_low = 0;
1119
1120 if (cat_high > cat_prev)
1121 return -EFAULT;
1122
1123 cat_prev = cat_low;
1124 }
1125
1126 return 0;
1127}
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143static int cipso_v4_map_cat_rng_hton(const struct cipso_v4_doi *doi_def,
1144 const struct netlbl_lsm_secattr *secattr,
1145 unsigned char *net_cat,
1146 u32 net_cat_len)
1147{
1148 int iter = -1;
1149 u16 array[CIPSO_V4_TAG_RNG_CAT_MAX * 2];
1150 u32 array_cnt = 0;
1151 u32 cat_size = 0;
1152
1153
1154 if (net_cat_len >
1155 (CIPSO_V4_OPT_LEN_MAX - CIPSO_V4_HDR_LEN - CIPSO_V4_TAG_RNG_BLEN))
1156 return -ENOSPC;
1157
1158 for (;;) {
1159 iter = netlbl_secattr_catmap_walk(secattr->attr.mls.cat,
1160 iter + 1);
1161 if (iter < 0)
1162 break;
1163 cat_size += (iter == 0 ? 0 : sizeof(u16));
1164 if (cat_size > net_cat_len)
1165 return -ENOSPC;
1166 array[array_cnt++] = iter;
1167
1168 iter = netlbl_secattr_catmap_walk_rng(secattr->attr.mls.cat,
1169 iter);
1170 if (iter < 0)
1171 return -EFAULT;
1172 cat_size += sizeof(u16);
1173 if (cat_size > net_cat_len)
1174 return -ENOSPC;
1175 array[array_cnt++] = iter;
1176 }
1177
1178 for (iter = 0; array_cnt > 0;) {
1179 *((__be16 *)&net_cat[iter]) = htons(array[--array_cnt]);
1180 iter += 2;
1181 array_cnt--;
1182 if (array[array_cnt] != 0) {
1183 *((__be16 *)&net_cat[iter]) = htons(array[array_cnt]);
1184 iter += 2;
1185 }
1186 }
1187
1188 return cat_size;
1189}
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204static int cipso_v4_map_cat_rng_ntoh(const struct cipso_v4_doi *doi_def,
1205 const unsigned char *net_cat,
1206 u32 net_cat_len,
1207 struct netlbl_lsm_secattr *secattr)
1208{
1209 int ret_val;
1210 u32 net_iter;
1211 u16 cat_low;
1212 u16 cat_high;
1213
1214 for (net_iter = 0; net_iter < net_cat_len; net_iter += 4) {
1215 cat_high = get_unaligned_be16(&net_cat[net_iter]);
1216 if ((net_iter + 4) <= net_cat_len)
1217 cat_low = get_unaligned_be16(&net_cat[net_iter + 2]);
1218 else
1219 cat_low = 0;
1220
1221 ret_val = netlbl_secattr_catmap_setrng(secattr->attr.mls.cat,
1222 cat_low,
1223 cat_high,
1224 GFP_ATOMIC);
1225 if (ret_val != 0)
1226 return ret_val;
1227 }
1228
1229 return 0;
1230}
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246static void cipso_v4_gentag_hdr(const struct cipso_v4_doi *doi_def,
1247 unsigned char *buf,
1248 u32 len)
1249{
1250 buf[0] = IPOPT_CIPSO;
1251 buf[1] = CIPSO_V4_HDR_LEN + len;
1252 *(__be32 *)&buf[2] = htonl(doi_def->doi);
1253}
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def,
1270 const struct netlbl_lsm_secattr *secattr,
1271 unsigned char *buffer,
1272 u32 buffer_len)
1273{
1274 int ret_val;
1275 u32 tag_len;
1276 u32 level;
1277
1278 if ((secattr->flags & NETLBL_SECATTR_MLS_LVL) == 0)
1279 return -EPERM;
1280
1281 ret_val = cipso_v4_map_lvl_hton(doi_def,
1282 secattr->attr.mls.lvl,
1283 &level);
1284 if (ret_val != 0)
1285 return ret_val;
1286
1287 if (secattr->flags & NETLBL_SECATTR_MLS_CAT) {
1288 ret_val = cipso_v4_map_cat_rbm_hton(doi_def,
1289 secattr,
1290 &buffer[4],
1291 buffer_len - 4);
1292 if (ret_val < 0)
1293 return ret_val;
1294
1295
1296
1297
1298 if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10)
1299 tag_len = 14;
1300 else
1301 tag_len = 4 + ret_val;
1302 } else
1303 tag_len = 4;
1304
1305 buffer[0] = CIPSO_V4_TAG_RBITMAP;
1306 buffer[1] = tag_len;
1307 buffer[3] = level;
1308
1309 return tag_len;
1310}
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def,
1325 const unsigned char *tag,
1326 struct netlbl_lsm_secattr *secattr)
1327{
1328 int ret_val;
1329 u8 tag_len = tag[1];
1330 u32 level;
1331
1332 ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level);
1333 if (ret_val != 0)
1334 return ret_val;
1335 secattr->attr.mls.lvl = level;
1336 secattr->flags |= NETLBL_SECATTR_MLS_LVL;
1337
1338 if (tag_len > 4) {
1339 secattr->attr.mls.cat =
1340 netlbl_secattr_catmap_alloc(GFP_ATOMIC);
1341 if (secattr->attr.mls.cat == NULL)
1342 return -ENOMEM;
1343
1344 ret_val = cipso_v4_map_cat_rbm_ntoh(doi_def,
1345 &tag[4],
1346 tag_len - 4,
1347 secattr);
1348 if (ret_val != 0) {
1349 netlbl_secattr_catmap_free(secattr->attr.mls.cat);
1350 return ret_val;
1351 }
1352
1353 secattr->flags |= NETLBL_SECATTR_MLS_CAT;
1354 }
1355
1356 return 0;
1357}
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371static int cipso_v4_gentag_enum(const struct cipso_v4_doi *doi_def,
1372 const struct netlbl_lsm_secattr *secattr,
1373 unsigned char *buffer,
1374 u32 buffer_len)
1375{
1376 int ret_val;
1377 u32 tag_len;
1378 u32 level;
1379
1380 if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL))
1381 return -EPERM;
1382
1383 ret_val = cipso_v4_map_lvl_hton(doi_def,
1384 secattr->attr.mls.lvl,
1385 &level);
1386 if (ret_val != 0)
1387 return ret_val;
1388
1389 if (secattr->flags & NETLBL_SECATTR_MLS_CAT) {
1390 ret_val = cipso_v4_map_cat_enum_hton(doi_def,
1391 secattr,
1392 &buffer[4],
1393 buffer_len - 4);
1394 if (ret_val < 0)
1395 return ret_val;
1396
1397 tag_len = 4 + ret_val;
1398 } else
1399 tag_len = 4;
1400
1401 buffer[0] = CIPSO_V4_TAG_ENUM;
1402 buffer[1] = tag_len;
1403 buffer[3] = level;
1404
1405 return tag_len;
1406}
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420static int cipso_v4_parsetag_enum(const struct cipso_v4_doi *doi_def,
1421 const unsigned char *tag,
1422 struct netlbl_lsm_secattr *secattr)
1423{
1424 int ret_val;
1425 u8 tag_len = tag[1];
1426 u32 level;
1427
1428 ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level);
1429 if (ret_val != 0)
1430 return ret_val;
1431 secattr->attr.mls.lvl = level;
1432 secattr->flags |= NETLBL_SECATTR_MLS_LVL;
1433
1434 if (tag_len > 4) {
1435 secattr->attr.mls.cat =
1436 netlbl_secattr_catmap_alloc(GFP_ATOMIC);
1437 if (secattr->attr.mls.cat == NULL)
1438 return -ENOMEM;
1439
1440 ret_val = cipso_v4_map_cat_enum_ntoh(doi_def,
1441 &tag[4],
1442 tag_len - 4,
1443 secattr);
1444 if (ret_val != 0) {
1445 netlbl_secattr_catmap_free(secattr->attr.mls.cat);
1446 return ret_val;
1447 }
1448
1449 secattr->flags |= NETLBL_SECATTR_MLS_CAT;
1450 }
1451
1452 return 0;
1453}
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467static int cipso_v4_gentag_rng(const struct cipso_v4_doi *doi_def,
1468 const struct netlbl_lsm_secattr *secattr,
1469 unsigned char *buffer,
1470 u32 buffer_len)
1471{
1472 int ret_val;
1473 u32 tag_len;
1474 u32 level;
1475
1476 if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL))
1477 return -EPERM;
1478
1479 ret_val = cipso_v4_map_lvl_hton(doi_def,
1480 secattr->attr.mls.lvl,
1481 &level);
1482 if (ret_val != 0)
1483 return ret_val;
1484
1485 if (secattr->flags & NETLBL_SECATTR_MLS_CAT) {
1486 ret_val = cipso_v4_map_cat_rng_hton(doi_def,
1487 secattr,
1488 &buffer[4],
1489 buffer_len - 4);
1490 if (ret_val < 0)
1491 return ret_val;
1492
1493 tag_len = 4 + ret_val;
1494 } else
1495 tag_len = 4;
1496
1497 buffer[0] = CIPSO_V4_TAG_RANGE;
1498 buffer[1] = tag_len;
1499 buffer[3] = level;
1500
1501 return tag_len;
1502}
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def,
1516 const unsigned char *tag,
1517 struct netlbl_lsm_secattr *secattr)
1518{
1519 int ret_val;
1520 u8 tag_len = tag[1];
1521 u32 level;
1522
1523 ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level);
1524 if (ret_val != 0)
1525 return ret_val;
1526 secattr->attr.mls.lvl = level;
1527 secattr->flags |= NETLBL_SECATTR_MLS_LVL;
1528
1529 if (tag_len > 4) {
1530 secattr->attr.mls.cat =
1531 netlbl_secattr_catmap_alloc(GFP_ATOMIC);
1532 if (secattr->attr.mls.cat == NULL)
1533 return -ENOMEM;
1534
1535 ret_val = cipso_v4_map_cat_rng_ntoh(doi_def,
1536 &tag[4],
1537 tag_len - 4,
1538 secattr);
1539 if (ret_val != 0) {
1540 netlbl_secattr_catmap_free(secattr->attr.mls.cat);
1541 return ret_val;
1542 }
1543
1544 secattr->flags |= NETLBL_SECATTR_MLS_CAT;
1545 }
1546
1547 return 0;
1548}
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562static int cipso_v4_gentag_loc(const struct cipso_v4_doi *doi_def,
1563 const struct netlbl_lsm_secattr *secattr,
1564 unsigned char *buffer,
1565 u32 buffer_len)
1566{
1567 if (!(secattr->flags & NETLBL_SECATTR_SECID))
1568 return -EPERM;
1569
1570 buffer[0] = CIPSO_V4_TAG_LOCAL;
1571 buffer[1] = CIPSO_V4_TAG_LOC_BLEN;
1572 *(u32 *)&buffer[2] = secattr->attr.secid;
1573
1574 return CIPSO_V4_TAG_LOC_BLEN;
1575}
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def,
1589 const unsigned char *tag,
1590 struct netlbl_lsm_secattr *secattr)
1591{
1592 secattr->attr.secid = *(u32 *)&tag[2];
1593 secattr->flags |= NETLBL_SECATTR_SECID;
1594
1595 return 0;
1596}
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
1618{
1619 unsigned char *opt = *option;
1620 unsigned char *tag;
1621 unsigned char opt_iter;
1622 unsigned char err_offset = 0;
1623 u8 opt_len;
1624 u8 tag_len;
1625 struct cipso_v4_doi *doi_def = NULL;
1626 u32 tag_iter;
1627
1628
1629 opt_len = opt[1];
1630 if (opt_len < 8) {
1631 err_offset = 1;
1632 goto validate_return;
1633 }
1634
1635 rcu_read_lock();
1636 doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2]));
1637 if (doi_def == NULL) {
1638 err_offset = 2;
1639 goto validate_return_locked;
1640 }
1641
1642 opt_iter = CIPSO_V4_HDR_LEN;
1643 tag = opt + opt_iter;
1644 while (opt_iter < opt_len) {
1645 for (tag_iter = 0; doi_def->tags[tag_iter] != tag[0];)
1646 if (doi_def->tags[tag_iter] == CIPSO_V4_TAG_INVALID ||
1647 ++tag_iter == CIPSO_V4_TAG_MAXCNT) {
1648 err_offset = opt_iter;
1649 goto validate_return_locked;
1650 }
1651
1652 tag_len = tag[1];
1653 if (tag_len > (opt_len - opt_iter)) {
1654 err_offset = opt_iter + 1;
1655 goto validate_return_locked;
1656 }
1657
1658 switch (tag[0]) {
1659 case CIPSO_V4_TAG_RBITMAP:
1660 if (tag_len < CIPSO_V4_TAG_RBM_BLEN) {
1661 err_offset = opt_iter + 1;
1662 goto validate_return_locked;
1663 }
1664
1665
1666
1667
1668
1669
1670
1671
1672 if (cipso_v4_rbm_strictvalid) {
1673 if (cipso_v4_map_lvl_valid(doi_def,
1674 tag[3]) < 0) {
1675 err_offset = opt_iter + 3;
1676 goto validate_return_locked;
1677 }
1678 if (tag_len > CIPSO_V4_TAG_RBM_BLEN &&
1679 cipso_v4_map_cat_rbm_valid(doi_def,
1680 &tag[4],
1681 tag_len - 4) < 0) {
1682 err_offset = opt_iter + 4;
1683 goto validate_return_locked;
1684 }
1685 }
1686 break;
1687 case CIPSO_V4_TAG_ENUM:
1688 if (tag_len < CIPSO_V4_TAG_ENUM_BLEN) {
1689 err_offset = opt_iter + 1;
1690 goto validate_return_locked;
1691 }
1692
1693 if (cipso_v4_map_lvl_valid(doi_def,
1694 tag[3]) < 0) {
1695 err_offset = opt_iter + 3;
1696 goto validate_return_locked;
1697 }
1698 if (tag_len > CIPSO_V4_TAG_ENUM_BLEN &&
1699 cipso_v4_map_cat_enum_valid(doi_def,
1700 &tag[4],
1701 tag_len - 4) < 0) {
1702 err_offset = opt_iter + 4;
1703 goto validate_return_locked;
1704 }
1705 break;
1706 case CIPSO_V4_TAG_RANGE:
1707 if (tag_len < CIPSO_V4_TAG_RNG_BLEN) {
1708 err_offset = opt_iter + 1;
1709 goto validate_return_locked;
1710 }
1711
1712 if (cipso_v4_map_lvl_valid(doi_def,
1713 tag[3]) < 0) {
1714 err_offset = opt_iter + 3;
1715 goto validate_return_locked;
1716 }
1717 if (tag_len > CIPSO_V4_TAG_RNG_BLEN &&
1718 cipso_v4_map_cat_rng_valid(doi_def,
1719 &tag[4],
1720 tag_len - 4) < 0) {
1721 err_offset = opt_iter + 4;
1722 goto validate_return_locked;
1723 }
1724 break;
1725 case CIPSO_V4_TAG_LOCAL:
1726
1727
1728
1729 if (!(skb->dev->flags & IFF_LOOPBACK)) {
1730 err_offset = opt_iter;
1731 goto validate_return_locked;
1732 }
1733 if (tag_len != CIPSO_V4_TAG_LOC_BLEN) {
1734 err_offset = opt_iter + 1;
1735 goto validate_return_locked;
1736 }
1737 break;
1738 default:
1739 err_offset = opt_iter;
1740 goto validate_return_locked;
1741 }
1742
1743 tag += tag_len;
1744 opt_iter += tag_len;
1745 }
1746
1747validate_return_locked:
1748 rcu_read_unlock();
1749validate_return:
1750 *option = opt + err_offset;
1751 return err_offset;
1752}
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
1782{
1783 if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES)
1784 return;
1785
1786 if (gateway)
1787 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0);
1788 else
1789 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0);
1790}
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805static int cipso_v4_genopt(unsigned char *buf, u32 buf_len,
1806 const struct cipso_v4_doi *doi_def,
1807 const struct netlbl_lsm_secattr *secattr)
1808{
1809 int ret_val;
1810 u32 iter;
1811
1812 if (buf_len <= CIPSO_V4_HDR_LEN)
1813 return -ENOSPC;
1814
1815
1816
1817
1818 iter = 0;
1819 do {
1820 memset(buf, 0, buf_len);
1821 switch (doi_def->tags[iter]) {
1822 case CIPSO_V4_TAG_RBITMAP:
1823 ret_val = cipso_v4_gentag_rbm(doi_def,
1824 secattr,
1825 &buf[CIPSO_V4_HDR_LEN],
1826 buf_len - CIPSO_V4_HDR_LEN);
1827 break;
1828 case CIPSO_V4_TAG_ENUM:
1829 ret_val = cipso_v4_gentag_enum(doi_def,
1830 secattr,
1831 &buf[CIPSO_V4_HDR_LEN],
1832 buf_len - CIPSO_V4_HDR_LEN);
1833 break;
1834 case CIPSO_V4_TAG_RANGE:
1835 ret_val = cipso_v4_gentag_rng(doi_def,
1836 secattr,
1837 &buf[CIPSO_V4_HDR_LEN],
1838 buf_len - CIPSO_V4_HDR_LEN);
1839 break;
1840 case CIPSO_V4_TAG_LOCAL:
1841 ret_val = cipso_v4_gentag_loc(doi_def,
1842 secattr,
1843 &buf[CIPSO_V4_HDR_LEN],
1844 buf_len - CIPSO_V4_HDR_LEN);
1845 break;
1846 default:
1847 return -EPERM;
1848 }
1849
1850 iter++;
1851 } while (ret_val < 0 &&
1852 iter < CIPSO_V4_TAG_MAXCNT &&
1853 doi_def->tags[iter] != CIPSO_V4_TAG_INVALID);
1854 if (ret_val < 0)
1855 return ret_val;
1856 cipso_v4_gentag_hdr(doi_def, buf, ret_val);
1857 return CIPSO_V4_HDR_LEN + ret_val;
1858}
1859
1860static void opt_kfree_rcu(struct rcu_head *head)
1861{
1862 kfree(container_of(head, struct ip_options_rcu, rcu));
1863}
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879int cipso_v4_sock_setattr(struct sock *sk,
1880 const struct cipso_v4_doi *doi_def,
1881 const struct netlbl_lsm_secattr *secattr)
1882{
1883 int ret_val = -EPERM;
1884 unsigned char *buf = NULL;
1885 u32 buf_len;
1886 u32 opt_len;
1887 struct ip_options_rcu *old, *opt = NULL;
1888 struct inet_sock *sk_inet;
1889 struct inet_connection_sock *sk_conn;
1890
1891
1892
1893
1894
1895 if (sk == NULL)
1896 return 0;
1897
1898
1899
1900
1901 buf_len = CIPSO_V4_OPT_LEN_MAX;
1902 buf = kmalloc(buf_len, GFP_ATOMIC);
1903 if (buf == NULL) {
1904 ret_val = -ENOMEM;
1905 goto socket_setattr_failure;
1906 }
1907
1908 ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr);
1909 if (ret_val < 0)
1910 goto socket_setattr_failure;
1911 buf_len = ret_val;
1912
1913
1914
1915
1916
1917 opt_len = (buf_len + 3) & ~3;
1918 opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC);
1919 if (opt == NULL) {
1920 ret_val = -ENOMEM;
1921 goto socket_setattr_failure;
1922 }
1923 memcpy(opt->opt.__data, buf, buf_len);
1924 opt->opt.optlen = opt_len;
1925 opt->opt.cipso = sizeof(struct iphdr);
1926 kfree(buf);
1927 buf = NULL;
1928
1929 sk_inet = inet_sk(sk);
1930
1931 old = rcu_dereference_protected(sk_inet->inet_opt, sock_owned_by_user(sk));
1932 if (sk_inet->is_icsk) {
1933 sk_conn = inet_csk(sk);
1934 if (old)
1935 sk_conn->icsk_ext_hdr_len -= old->opt.optlen;
1936 sk_conn->icsk_ext_hdr_len += opt->opt.optlen;
1937 sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie);
1938 }
1939 rcu_assign_pointer(sk_inet->inet_opt, opt);
1940 if (old)
1941 call_rcu(&old->rcu, opt_kfree_rcu);
1942
1943 return 0;
1944
1945socket_setattr_failure:
1946 kfree(buf);
1947 kfree(opt);
1948 return ret_val;
1949}
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963int cipso_v4_req_setattr(struct request_sock *req,
1964 const struct cipso_v4_doi *doi_def,
1965 const struct netlbl_lsm_secattr *secattr)
1966{
1967 int ret_val = -EPERM;
1968 unsigned char *buf = NULL;
1969 u32 buf_len;
1970 u32 opt_len;
1971 struct ip_options_rcu *opt = NULL;
1972 struct inet_request_sock *req_inet;
1973
1974
1975
1976
1977 buf_len = CIPSO_V4_OPT_LEN_MAX;
1978 buf = kmalloc(buf_len, GFP_ATOMIC);
1979 if (buf == NULL) {
1980 ret_val = -ENOMEM;
1981 goto req_setattr_failure;
1982 }
1983
1984 ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr);
1985 if (ret_val < 0)
1986 goto req_setattr_failure;
1987 buf_len = ret_val;
1988
1989
1990
1991
1992
1993 opt_len = (buf_len + 3) & ~3;
1994 opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC);
1995 if (opt == NULL) {
1996 ret_val = -ENOMEM;
1997 goto req_setattr_failure;
1998 }
1999 memcpy(opt->opt.__data, buf, buf_len);
2000 opt->opt.optlen = opt_len;
2001 opt->opt.cipso = sizeof(struct iphdr);
2002 kfree(buf);
2003 buf = NULL;
2004
2005 req_inet = inet_rsk(req);
2006 opt = xchg(&req_inet->opt, opt);
2007 if (opt)
2008 call_rcu(&opt->rcu, opt_kfree_rcu);
2009
2010 return 0;
2011
2012req_setattr_failure:
2013 kfree(buf);
2014 kfree(opt);
2015 return ret_val;
2016}
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)
2029{
2030 int hdr_delta = 0;
2031 struct ip_options_rcu *opt = *opt_ptr;
2032
2033 if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) {
2034 u8 cipso_len;
2035 u8 cipso_off;
2036 unsigned char *cipso_ptr;
2037 int iter;
2038 int optlen_new;
2039
2040 cipso_off = opt->opt.cipso - sizeof(struct iphdr);
2041 cipso_ptr = &opt->opt.__data[cipso_off];
2042 cipso_len = cipso_ptr[1];
2043
2044 if (opt->opt.srr > opt->opt.cipso)
2045 opt->opt.srr -= cipso_len;
2046 if (opt->opt.rr > opt->opt.cipso)
2047 opt->opt.rr -= cipso_len;
2048 if (opt->opt.ts > opt->opt.cipso)
2049 opt->opt.ts -= cipso_len;
2050 if (opt->opt.router_alert > opt->opt.cipso)
2051 opt->opt.router_alert -= cipso_len;
2052 opt->opt.cipso = 0;
2053
2054 memmove(cipso_ptr, cipso_ptr + cipso_len,
2055 opt->opt.optlen - cipso_off - cipso_len);
2056
2057
2058
2059
2060
2061
2062 iter = 0;
2063 optlen_new = 0;
2064 while (iter < opt->opt.optlen)
2065 if (opt->opt.__data[iter] != IPOPT_NOP) {
2066 iter += opt->opt.__data[iter + 1];
2067 optlen_new = iter;
2068 } else
2069 iter++;
2070 hdr_delta = opt->opt.optlen;
2071 opt->opt.optlen = (optlen_new + 3) & ~3;
2072 hdr_delta -= opt->opt.optlen;
2073 } else {
2074
2075
2076 *opt_ptr = NULL;
2077 hdr_delta = opt->opt.optlen;
2078 call_rcu(&opt->rcu, opt_kfree_rcu);
2079 }
2080
2081 return hdr_delta;
2082}
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092void cipso_v4_sock_delattr(struct sock *sk)
2093{
2094 int hdr_delta;
2095 struct ip_options_rcu *opt;
2096 struct inet_sock *sk_inet;
2097
2098 sk_inet = inet_sk(sk);
2099 opt = rcu_dereference_protected(sk_inet->inet_opt, 1);
2100 if (opt == NULL || opt->opt.cipso == 0)
2101 return;
2102
2103 hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);
2104 if (sk_inet->is_icsk && hdr_delta > 0) {
2105 struct inet_connection_sock *sk_conn = inet_csk(sk);
2106 sk_conn->icsk_ext_hdr_len -= hdr_delta;
2107 sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie);
2108 }
2109}
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119void cipso_v4_req_delattr(struct request_sock *req)
2120{
2121 struct ip_options_rcu *opt;
2122 struct inet_request_sock *req_inet;
2123
2124 req_inet = inet_rsk(req);
2125 opt = req_inet->opt;
2126 if (opt == NULL || opt->opt.cipso == 0)
2127 return;
2128
2129 cipso_v4_delopt(&req_inet->opt);
2130}
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142static int cipso_v4_getattr(const unsigned char *cipso,
2143 struct netlbl_lsm_secattr *secattr)
2144{
2145 int ret_val = -ENOMSG;
2146 u32 doi;
2147 struct cipso_v4_doi *doi_def;
2148
2149 if (cipso_v4_cache_check(cipso, cipso[1], secattr) == 0)
2150 return 0;
2151
2152 doi = get_unaligned_be32(&cipso[2]);
2153 rcu_read_lock();
2154 doi_def = cipso_v4_doi_search(doi);
2155 if (doi_def == NULL)
2156 goto getattr_return;
2157
2158
2159
2160 switch (cipso[6]) {
2161 case CIPSO_V4_TAG_RBITMAP:
2162 ret_val = cipso_v4_parsetag_rbm(doi_def, &cipso[6], secattr);
2163 break;
2164 case CIPSO_V4_TAG_ENUM:
2165 ret_val = cipso_v4_parsetag_enum(doi_def, &cipso[6], secattr);
2166 break;
2167 case CIPSO_V4_TAG_RANGE:
2168 ret_val = cipso_v4_parsetag_rng(doi_def, &cipso[6], secattr);
2169 break;
2170 case CIPSO_V4_TAG_LOCAL:
2171 ret_val = cipso_v4_parsetag_loc(doi_def, &cipso[6], secattr);
2172 break;
2173 }
2174 if (ret_val == 0)
2175 secattr->type = NETLBL_NLTYPE_CIPSOV4;
2176
2177getattr_return:
2178 rcu_read_unlock();
2179 return ret_val;
2180}
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr)
2195{
2196 struct ip_options_rcu *opt;
2197 int res = -ENOMSG;
2198
2199 rcu_read_lock();
2200 opt = rcu_dereference(inet_sk(sk)->inet_opt);
2201 if (opt && opt->opt.cipso)
2202 res = cipso_v4_getattr(opt->opt.__data +
2203 opt->opt.cipso -
2204 sizeof(struct iphdr),
2205 secattr);
2206 rcu_read_unlock();
2207 return res;
2208}
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220int cipso_v4_skbuff_setattr(struct sk_buff *skb,
2221 const struct cipso_v4_doi *doi_def,
2222 const struct netlbl_lsm_secattr *secattr)
2223{
2224 int ret_val;
2225 struct iphdr *iph;
2226 struct ip_options *opt = &IPCB(skb)->opt;
2227 unsigned char buf[CIPSO_V4_OPT_LEN_MAX];
2228 u32 buf_len = CIPSO_V4_OPT_LEN_MAX;
2229 u32 opt_len;
2230 int len_delta;
2231
2232 ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr);
2233 if (ret_val < 0)
2234 return ret_val;
2235 buf_len = ret_val;
2236 opt_len = (buf_len + 3) & ~3;
2237
2238
2239
2240
2241
2242
2243
2244 len_delta = opt_len - opt->optlen;
2245
2246
2247
2248 ret_val = skb_cow(skb, skb_headroom(skb) + len_delta);
2249 if (ret_val < 0)
2250 return ret_val;
2251
2252 if (len_delta > 0) {
2253
2254
2255 iph = ip_hdr(skb);
2256 skb_push(skb, len_delta);
2257 memmove((char *)iph - len_delta, iph, iph->ihl << 2);
2258 skb_reset_network_header(skb);
2259 iph = ip_hdr(skb);
2260 } else if (len_delta < 0) {
2261 iph = ip_hdr(skb);
2262 memset(iph + 1, IPOPT_NOP, opt->optlen);
2263 } else
2264 iph = ip_hdr(skb);
2265
2266 if (opt->optlen > 0)
2267 memset(opt, 0, sizeof(*opt));
2268 opt->optlen = opt_len;
2269 opt->cipso = sizeof(struct iphdr);
2270 opt->is_changed = 1;
2271
2272
2273
2274
2275
2276
2277 memcpy(iph + 1, buf, buf_len);
2278 if (opt_len > buf_len)
2279 memset((char *)(iph + 1) + buf_len, 0, opt_len - buf_len);
2280 if (len_delta != 0) {
2281 iph->ihl = 5 + (opt_len >> 2);
2282 iph->tot_len = htons(skb->len);
2283 }
2284 ip_send_check(iph);
2285
2286 return 0;
2287}
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298int cipso_v4_skbuff_delattr(struct sk_buff *skb)
2299{
2300 int ret_val;
2301 struct iphdr *iph;
2302 struct ip_options *opt = &IPCB(skb)->opt;
2303 unsigned char *cipso_ptr;
2304
2305 if (opt->cipso == 0)
2306 return 0;
2307
2308
2309 ret_val = skb_cow(skb, skb_headroom(skb));
2310 if (ret_val < 0)
2311 return ret_val;
2312
2313
2314
2315
2316
2317 iph = ip_hdr(skb);
2318 cipso_ptr = (unsigned char *)iph + opt->cipso;
2319 memset(cipso_ptr, IPOPT_NOOP, cipso_ptr[1]);
2320 opt->cipso = 0;
2321 opt->is_changed = 1;
2322
2323 ip_send_check(iph);
2324
2325 return 0;
2326}
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338int cipso_v4_skbuff_getattr(const struct sk_buff *skb,
2339 struct netlbl_lsm_secattr *secattr)
2340{
2341 return cipso_v4_getattr(CIPSO_V4_OPTPTR(skb), secattr);
2342}
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356static int __init cipso_v4_init(void)
2357{
2358 int ret_val;
2359
2360 ret_val = cipso_v4_cache_init();
2361 if (ret_val != 0)
2362 panic("Failed to initialize the CIPSO/IPv4 cache (%d)\n",
2363 ret_val);
2364
2365 return 0;
2366}
2367
2368subsys_initcall(cipso_v4_init);
2369