1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
311
312#include <linux/utsname.h>
313#include <linux/module.h>
314#include <linux/kernel.h>
315#include <linux/major.h>
316#include <linux/string.h>
317#include <linux/fcntl.h>
318#include <linux/slab.h>
319#include <linux/random.h>
320#include <linux/poll.h>
321#include <linux/init.h>
322#include <linux/fs.h>
323#include <linux/genhd.h>
324#include <linux/interrupt.h>
325#include <linux/mm.h>
326#include <linux/nodemask.h>
327#include <linux/spinlock.h>
328#include <linux/kthread.h>
329#include <linux/percpu.h>
330#include <linux/fips.h>
331#include <linux/ptrace.h>
332#include <linux/workqueue.h>
333#include <linux/irq.h>
334#include <linux/ratelimit.h>
335#include <linux/syscalls.h>
336#include <linux/completion.h>
337#include <linux/uuid.h>
338#include <crypto/chacha.h>
339#include <crypto/sha1.h>
340
341#include <asm/processor.h>
342#include <linux/uaccess.h>
343#include <asm/irq.h>
344#include <asm/irq_regs.h>
345#include <asm/io.h>
346
347#define CREATE_TRACE_POINTS
348#include <trace/events/random.h>
349
350
351
352
353
354
355#define INPUT_POOL_SHIFT 12
356#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
357#define OUTPUT_POOL_SHIFT 10
358#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
359#define EXTRACT_SIZE 10
360
361
362#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
363
364
365
366
367
368
369
370
371#define ENTROPY_SHIFT 3
372#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
373
374
375
376
377
378
379static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426static const struct poolinfo {
427 int poolbitshift, poolwords, poolbytes, poolfracbits;
428#define S(x) ilog2(x)+5, (x), (x)*4, (x) << (ENTROPY_SHIFT+5)
429 int tap1, tap2, tap3, tap4, tap5;
430} poolinfo_table[] = {
431
432
433 { S(128), 104, 76, 51, 25, 1 },
434};
435
436
437
438
439static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
440static struct fasync_struct *fasync;
441
442static DEFINE_SPINLOCK(random_ready_list_lock);
443static LIST_HEAD(random_ready_list);
444
445struct crng_state {
446 __u32 state[16];
447 unsigned long init_time;
448 spinlock_t lock;
449};
450
451static struct crng_state primary_crng = {
452 .lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock),
453};
454
455
456
457
458
459
460
461
462
463static int crng_init = 0;
464#define crng_ready() (likely(crng_init > 1))
465static int crng_init_cnt = 0;
466static unsigned long crng_global_init_time = 0;
467#define CRNG_INIT_CNT_THRESH (2*CHACHA_KEY_SIZE)
468static void _extract_crng(struct crng_state *crng, __u8 out[CHACHA_BLOCK_SIZE]);
469static void _crng_backtrack_protect(struct crng_state *crng,
470 __u8 tmp[CHACHA_BLOCK_SIZE], int used);
471static void process_random_ready_list(void);
472static void _get_random_bytes(void *buf, int nbytes);
473
474static struct ratelimit_state unseeded_warning =
475 RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
476static struct ratelimit_state urandom_warning =
477 RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
478
479static int ratelimit_disable __read_mostly;
480
481module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
482MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
483
484
485
486
487
488
489
490
491struct entropy_store;
492struct entropy_store {
493
494 const struct poolinfo *poolinfo;
495 __u32 *pool;
496 const char *name;
497
498
499 spinlock_t lock;
500 unsigned short add_ptr;
501 unsigned short input_rotate;
502 int entropy_count;
503 unsigned int last_data_init:1;
504 __u8 last_data[EXTRACT_SIZE];
505};
506
507static ssize_t extract_entropy(struct entropy_store *r, void *buf,
508 size_t nbytes, int min, int rsvd);
509static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
510 size_t nbytes, int fips);
511
512static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
513static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
514
515static struct entropy_store input_pool = {
516 .poolinfo = &poolinfo_table[0],
517 .name = "input",
518 .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
519 .pool = input_pool_data
520};
521
522static __u32 const twist_table[8] = {
523 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
524 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
525
526
527
528
529
530
531
532
533
534
535
536static void _mix_pool_bytes(struct entropy_store *r, const void *in,
537 int nbytes)
538{
539 unsigned long i, tap1, tap2, tap3, tap4, tap5;
540 int input_rotate;
541 int wordmask = r->poolinfo->poolwords - 1;
542 const char *bytes = in;
543 __u32 w;
544
545 tap1 = r->poolinfo->tap1;
546 tap2 = r->poolinfo->tap2;
547 tap3 = r->poolinfo->tap3;
548 tap4 = r->poolinfo->tap4;
549 tap5 = r->poolinfo->tap5;
550
551 input_rotate = r->input_rotate;
552 i = r->add_ptr;
553
554
555 while (nbytes--) {
556 w = rol32(*bytes++, input_rotate);
557 i = (i - 1) & wordmask;
558
559
560 w ^= r->pool[i];
561 w ^= r->pool[(i + tap1) & wordmask];
562 w ^= r->pool[(i + tap2) & wordmask];
563 w ^= r->pool[(i + tap3) & wordmask];
564 w ^= r->pool[(i + tap4) & wordmask];
565 w ^= r->pool[(i + tap5) & wordmask];
566
567
568 r->pool[i] = (w >> 3) ^ twist_table[w & 7];
569
570
571
572
573
574
575
576 input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
577 }
578
579 r->input_rotate = input_rotate;
580 r->add_ptr = i;
581}
582
583static void __mix_pool_bytes(struct entropy_store *r, const void *in,
584 int nbytes)
585{
586 trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
587 _mix_pool_bytes(r, in, nbytes);
588}
589
590static void mix_pool_bytes(struct entropy_store *r, const void *in,
591 int nbytes)
592{
593 unsigned long flags;
594
595 trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
596 spin_lock_irqsave(&r->lock, flags);
597 _mix_pool_bytes(r, in, nbytes);
598 spin_unlock_irqrestore(&r->lock, flags);
599}
600
601struct fast_pool {
602 __u32 pool[4];
603 unsigned long last;
604 unsigned short reg_idx;
605 unsigned char count;
606};
607
608
609
610
611
612
613static void fast_mix(struct fast_pool *f)
614{
615 __u32 a = f->pool[0], b = f->pool[1];
616 __u32 c = f->pool[2], d = f->pool[3];
617
618 a += b; c += d;
619 b = rol32(b, 6); d = rol32(d, 27);
620 d ^= a; b ^= c;
621
622 a += b; c += d;
623 b = rol32(b, 16); d = rol32(d, 14);
624 d ^= a; b ^= c;
625
626 a += b; c += d;
627 b = rol32(b, 6); d = rol32(d, 27);
628 d ^= a; b ^= c;
629
630 a += b; c += d;
631 b = rol32(b, 16); d = rol32(d, 14);
632 d ^= a; b ^= c;
633
634 f->pool[0] = a; f->pool[1] = b;
635 f->pool[2] = c; f->pool[3] = d;
636 f->count++;
637}
638
639static void process_random_ready_list(void)
640{
641 unsigned long flags;
642 struct random_ready_callback *rdy, *tmp;
643
644 spin_lock_irqsave(&random_ready_list_lock, flags);
645 list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
646 struct module *owner = rdy->owner;
647
648 list_del_init(&rdy->list);
649 rdy->func(rdy);
650 module_put(owner);
651 }
652 spin_unlock_irqrestore(&random_ready_list_lock, flags);
653}
654
655
656
657
658
659
660static void credit_entropy_bits(struct entropy_store *r, int nbits)
661{
662 int entropy_count, orig;
663 const int pool_size = r->poolinfo->poolfracbits;
664 int nfrac = nbits << ENTROPY_SHIFT;
665
666 if (!nbits)
667 return;
668
669retry:
670 entropy_count = orig = READ_ONCE(r->entropy_count);
671 if (nfrac < 0) {
672
673 entropy_count += nfrac;
674 } else {
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696 int pnfrac = nfrac;
697 const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2;
698
699
700 do {
701 unsigned int anfrac = min(pnfrac, pool_size/2);
702 unsigned int add =
703 ((pool_size - entropy_count)*anfrac*3) >> s;
704
705 entropy_count += add;
706 pnfrac -= anfrac;
707 } while (unlikely(entropy_count < pool_size-2 && pnfrac));
708 }
709
710 if (WARN_ON(entropy_count < 0)) {
711 pr_warn("negative entropy/overflow: pool %s count %d\n",
712 r->name, entropy_count);
713 entropy_count = 0;
714 } else if (entropy_count > pool_size)
715 entropy_count = pool_size;
716 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
717 goto retry;
718
719 trace_credit_entropy_bits(r->name, nbits,
720 entropy_count >> ENTROPY_SHIFT, _RET_IP_);
721
722 if (r == &input_pool) {
723 int entropy_bits = entropy_count >> ENTROPY_SHIFT;
724
725 if (crng_init < 2 && entropy_bits >= 128)
726 crng_reseed(&primary_crng, r);
727 }
728}
729
730static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
731{
732 const int nbits_max = r->poolinfo->poolwords * 32;
733
734 if (nbits < 0)
735 return -EINVAL;
736
737
738 nbits = min(nbits, nbits_max);
739
740 credit_entropy_bits(r, nbits);
741 return 0;
742}
743
744
745
746
747
748
749
750#define CRNG_RESEED_INTERVAL (300*HZ)
751
752static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
753
754#ifdef CONFIG_NUMA
755
756
757
758
759
760
761static struct crng_state **crng_node_pool __read_mostly;
762#endif
763
764static void invalidate_batched_entropy(void);
765static void numa_crng_init(void);
766
767static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
768static int __init parse_trust_cpu(char *arg)
769{
770 return kstrtobool(arg, &trust_cpu);
771}
772early_param("random.trust_cpu", parse_trust_cpu);
773
774static bool crng_init_try_arch(struct crng_state *crng)
775{
776 int i;
777 bool arch_init = true;
778 unsigned long rv;
779
780 for (i = 4; i < 16; i++) {
781 if (!arch_get_random_seed_long(&rv) &&
782 !arch_get_random_long(&rv)) {
783 rv = random_get_entropy();
784 arch_init = false;
785 }
786 crng->state[i] ^= rv;
787 }
788
789 return arch_init;
790}
791
792static bool __init crng_init_try_arch_early(struct crng_state *crng)
793{
794 int i;
795 bool arch_init = true;
796 unsigned long rv;
797
798 for (i = 4; i < 16; i++) {
799 if (!arch_get_random_seed_long_early(&rv) &&
800 !arch_get_random_long_early(&rv)) {
801 rv = random_get_entropy();
802 arch_init = false;
803 }
804 crng->state[i] ^= rv;
805 }
806
807 return arch_init;
808}
809
810static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
811{
812 chacha_init_consts(crng->state);
813 _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
814 crng_init_try_arch(crng);
815 crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
816}
817
818static void __init crng_initialize_primary(struct crng_state *crng)
819{
820 chacha_init_consts(crng->state);
821 _extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0);
822 if (crng_init_try_arch_early(crng) && trust_cpu) {
823 invalidate_batched_entropy();
824 numa_crng_init();
825 crng_init = 2;
826 pr_notice("crng done (trusting CPU's manufacturer)\n");
827 }
828 crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
829}
830
831#ifdef CONFIG_NUMA
832static void do_numa_crng_init(struct work_struct *work)
833{
834 int i;
835 struct crng_state *crng;
836 struct crng_state **pool;
837
838 pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
839 for_each_online_node(i) {
840 crng = kmalloc_node(sizeof(struct crng_state),
841 GFP_KERNEL | __GFP_NOFAIL, i);
842 spin_lock_init(&crng->lock);
843 crng_initialize_secondary(crng);
844 pool[i] = crng;
845 }
846 mb();
847 if (cmpxchg(&crng_node_pool, NULL, pool)) {
848 for_each_node(i)
849 kfree(pool[i]);
850 kfree(pool);
851 }
852}
853
854static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
855
856static void numa_crng_init(void)
857{
858 schedule_work(&numa_crng_init_work);
859}
860#else
861static void numa_crng_init(void) {}
862#endif
863
864
865
866
867
868static int crng_fast_load(const char *cp, size_t len)
869{
870 unsigned long flags;
871 char *p;
872
873 if (!spin_trylock_irqsave(&primary_crng.lock, flags))
874 return 0;
875 if (crng_init != 0) {
876 spin_unlock_irqrestore(&primary_crng.lock, flags);
877 return 0;
878 }
879 p = (unsigned char *) &primary_crng.state[4];
880 while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
881 p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp;
882 cp++; crng_init_cnt++; len--;
883 }
884 spin_unlock_irqrestore(&primary_crng.lock, flags);
885 if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
886 invalidate_batched_entropy();
887 crng_init = 1;
888 pr_notice("fast init done\n");
889 }
890 return 1;
891}
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907static int crng_slow_load(const char *cp, size_t len)
908{
909 unsigned long flags;
910 static unsigned char lfsr = 1;
911 unsigned char tmp;
912 unsigned i, max = CHACHA_KEY_SIZE;
913 const char * src_buf = cp;
914 char * dest_buf = (char *) &primary_crng.state[4];
915
916 if (!spin_trylock_irqsave(&primary_crng.lock, flags))
917 return 0;
918 if (crng_init != 0) {
919 spin_unlock_irqrestore(&primary_crng.lock, flags);
920 return 0;
921 }
922 if (len > max)
923 max = len;
924
925 for (i = 0; i < max ; i++) {
926 tmp = lfsr;
927 lfsr >>= 1;
928 if (tmp & 1)
929 lfsr ^= 0xE1;
930 tmp = dest_buf[i % CHACHA_KEY_SIZE];
931 dest_buf[i % CHACHA_KEY_SIZE] ^= src_buf[i % len] ^ lfsr;
932 lfsr += (tmp << 3) | (tmp >> 5);
933 }
934 spin_unlock_irqrestore(&primary_crng.lock, flags);
935 return 1;
936}
937
938static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
939{
940 unsigned long flags;
941 int i, num;
942 union {
943 __u8 block[CHACHA_BLOCK_SIZE];
944 __u32 key[8];
945 } buf;
946
947 if (r) {
948 num = extract_entropy(r, &buf, 32, 16, 0);
949 if (num == 0)
950 return;
951 } else {
952 _extract_crng(&primary_crng, buf.block);
953 _crng_backtrack_protect(&primary_crng, buf.block,
954 CHACHA_KEY_SIZE);
955 }
956 spin_lock_irqsave(&crng->lock, flags);
957 for (i = 0; i < 8; i++) {
958 unsigned long rv;
959 if (!arch_get_random_seed_long(&rv) &&
960 !arch_get_random_long(&rv))
961 rv = random_get_entropy();
962 crng->state[i+4] ^= buf.key[i] ^ rv;
963 }
964 memzero_explicit(&buf, sizeof(buf));
965 crng->init_time = jiffies;
966 spin_unlock_irqrestore(&crng->lock, flags);
967 if (crng == &primary_crng && crng_init < 2) {
968 invalidate_batched_entropy();
969 numa_crng_init();
970 crng_init = 2;
971 process_random_ready_list();
972 wake_up_interruptible(&crng_init_wait);
973 kill_fasync(&fasync, SIGIO, POLL_IN);
974 pr_notice("crng init done\n");
975 if (unseeded_warning.missed) {
976 pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
977 unseeded_warning.missed);
978 unseeded_warning.missed = 0;
979 }
980 if (urandom_warning.missed) {
981 pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
982 urandom_warning.missed);
983 urandom_warning.missed = 0;
984 }
985 }
986}
987
988static void _extract_crng(struct crng_state *crng,
989 __u8 out[CHACHA_BLOCK_SIZE])
990{
991 unsigned long v, flags;
992
993 if (crng_ready() &&
994 (time_after(crng_global_init_time, crng->init_time) ||
995 time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)))
996 crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL);
997 spin_lock_irqsave(&crng->lock, flags);
998 if (arch_get_random_long(&v))
999 crng->state[14] ^= v;
1000 chacha20_block(&crng->state[0], out);
1001 if (crng->state[12] == 0)
1002 crng->state[13]++;
1003 spin_unlock_irqrestore(&crng->lock, flags);
1004}
1005
1006static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE])
1007{
1008 struct crng_state *crng = NULL;
1009
1010#ifdef CONFIG_NUMA
1011 if (crng_node_pool)
1012 crng = crng_node_pool[numa_node_id()];
1013 if (crng == NULL)
1014#endif
1015 crng = &primary_crng;
1016 _extract_crng(crng, out);
1017}
1018
1019
1020
1021
1022
1023static void _crng_backtrack_protect(struct crng_state *crng,
1024 __u8 tmp[CHACHA_BLOCK_SIZE], int used)
1025{
1026 unsigned long flags;
1027 __u32 *s, *d;
1028 int i;
1029
1030 used = round_up(used, sizeof(__u32));
1031 if (used + CHACHA_KEY_SIZE > CHACHA_BLOCK_SIZE) {
1032 extract_crng(tmp);
1033 used = 0;
1034 }
1035 spin_lock_irqsave(&crng->lock, flags);
1036 s = (__u32 *) &tmp[used];
1037 d = &crng->state[4];
1038 for (i=0; i < 8; i++)
1039 *d++ ^= *s++;
1040 spin_unlock_irqrestore(&crng->lock, flags);
1041}
1042
1043static void crng_backtrack_protect(__u8 tmp[CHACHA_BLOCK_SIZE], int used)
1044{
1045 struct crng_state *crng = NULL;
1046
1047#ifdef CONFIG_NUMA
1048 if (crng_node_pool)
1049 crng = crng_node_pool[numa_node_id()];
1050 if (crng == NULL)
1051#endif
1052 crng = &primary_crng;
1053 _crng_backtrack_protect(crng, tmp, used);
1054}
1055
1056static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
1057{
1058 ssize_t ret = 0, i = CHACHA_BLOCK_SIZE;
1059 __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
1060 int large_request = (nbytes > 256);
1061
1062 while (nbytes) {
1063 if (large_request && need_resched()) {
1064 if (signal_pending(current)) {
1065 if (ret == 0)
1066 ret = -ERESTARTSYS;
1067 break;
1068 }
1069 schedule();
1070 }
1071
1072 extract_crng(tmp);
1073 i = min_t(int, nbytes, CHACHA_BLOCK_SIZE);
1074 if (copy_to_user(buf, tmp, i)) {
1075 ret = -EFAULT;
1076 break;
1077 }
1078
1079 nbytes -= i;
1080 buf += i;
1081 ret += i;
1082 }
1083 crng_backtrack_protect(tmp, i);
1084
1085
1086 memzero_explicit(tmp, sizeof(tmp));
1087
1088 return ret;
1089}
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099struct timer_rand_state {
1100 cycles_t last_time;
1101 long last_delta, last_delta2;
1102};
1103
1104#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114void add_device_randomness(const void *buf, unsigned int size)
1115{
1116 unsigned long time = random_get_entropy() ^ jiffies;
1117 unsigned long flags;
1118
1119 if (!crng_ready() && size)
1120 crng_slow_load(buf, size);
1121
1122 trace_add_device_randomness(size, _RET_IP_);
1123 spin_lock_irqsave(&input_pool.lock, flags);
1124 _mix_pool_bytes(&input_pool, buf, size);
1125 _mix_pool_bytes(&input_pool, &time, sizeof(time));
1126 spin_unlock_irqrestore(&input_pool.lock, flags);
1127}
1128EXPORT_SYMBOL(add_device_randomness);
1129
1130static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
1143{
1144 struct entropy_store *r;
1145 struct {
1146 long jiffies;
1147 unsigned cycles;
1148 unsigned num;
1149 } sample;
1150 long delta, delta2, delta3;
1151
1152 sample.jiffies = jiffies;
1153 sample.cycles = random_get_entropy();
1154 sample.num = num;
1155 r = &input_pool;
1156 mix_pool_bytes(r, &sample, sizeof(sample));
1157
1158
1159
1160
1161
1162
1163 delta = sample.jiffies - READ_ONCE(state->last_time);
1164 WRITE_ONCE(state->last_time, sample.jiffies);
1165
1166 delta2 = delta - READ_ONCE(state->last_delta);
1167 WRITE_ONCE(state->last_delta, delta);
1168
1169 delta3 = delta2 - READ_ONCE(state->last_delta2);
1170 WRITE_ONCE(state->last_delta2, delta2);
1171
1172 if (delta < 0)
1173 delta = -delta;
1174 if (delta2 < 0)
1175 delta2 = -delta2;
1176 if (delta3 < 0)
1177 delta3 = -delta3;
1178 if (delta > delta2)
1179 delta = delta2;
1180 if (delta > delta3)
1181 delta = delta3;
1182
1183
1184
1185
1186
1187
1188 credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
1189}
1190
1191void add_input_randomness(unsigned int type, unsigned int code,
1192 unsigned int value)
1193{
1194 static unsigned char last_value;
1195
1196
1197 if (value == last_value)
1198 return;
1199
1200 last_value = value;
1201 add_timer_randomness(&input_timer_state,
1202 (type << 4) ^ code ^ (code >> 4) ^ value);
1203 trace_add_input_randomness(ENTROPY_BITS(&input_pool));
1204}
1205EXPORT_SYMBOL_GPL(add_input_randomness);
1206
1207static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
1208
1209#ifdef ADD_INTERRUPT_BENCH
1210static unsigned long avg_cycles, avg_deviation;
1211
1212#define AVG_SHIFT 8
1213#define FIXED_1_2 (1 << (AVG_SHIFT-1))
1214
1215static void add_interrupt_bench(cycles_t start)
1216{
1217 long delta = random_get_entropy() - start;
1218
1219
1220 delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT);
1221 avg_cycles += delta;
1222
1223 delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT);
1224 avg_deviation += delta;
1225}
1226#else
1227#define add_interrupt_bench(x)
1228#endif
1229
1230static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
1231{
1232 __u32 *ptr = (__u32 *) regs;
1233 unsigned int idx;
1234
1235 if (regs == NULL)
1236 return 0;
1237 idx = READ_ONCE(f->reg_idx);
1238 if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
1239 idx = 0;
1240 ptr += idx++;
1241 WRITE_ONCE(f->reg_idx, idx);
1242 return *ptr;
1243}
1244
1245void add_interrupt_randomness(int irq, int irq_flags)
1246{
1247 struct entropy_store *r;
1248 struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
1249 struct pt_regs *regs = get_irq_regs();
1250 unsigned long now = jiffies;
1251 cycles_t cycles = random_get_entropy();
1252 __u32 c_high, j_high;
1253 __u64 ip;
1254
1255 if (cycles == 0)
1256 cycles = get_reg(fast_pool, regs);
1257 c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
1258 j_high = (sizeof(now) > 4) ? now >> 32 : 0;
1259 fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
1260 fast_pool->pool[1] ^= now ^ c_high;
1261 ip = regs ? instruction_pointer(regs) : _RET_IP_;
1262 fast_pool->pool[2] ^= ip;
1263 fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
1264 get_reg(fast_pool, regs);
1265
1266 fast_mix(fast_pool);
1267 add_interrupt_bench(cycles);
1268
1269 if (unlikely(crng_init == 0)) {
1270 if ((fast_pool->count >= 64) &&
1271 crng_fast_load((char *) fast_pool->pool,
1272 sizeof(fast_pool->pool))) {
1273 fast_pool->count = 0;
1274 fast_pool->last = now;
1275 }
1276 return;
1277 }
1278
1279 if ((fast_pool->count < 64) &&
1280 !time_after(now, fast_pool->last + HZ))
1281 return;
1282
1283 r = &input_pool;
1284 if (!spin_trylock(&r->lock))
1285 return;
1286
1287 fast_pool->last = now;
1288 __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
1289 spin_unlock(&r->lock);
1290
1291 fast_pool->count = 0;
1292
1293
1294 credit_entropy_bits(r, 1);
1295}
1296EXPORT_SYMBOL_GPL(add_interrupt_randomness);
1297
1298#ifdef CONFIG_BLOCK
1299void add_disk_randomness(struct gendisk *disk)
1300{
1301 if (!disk || !disk->random)
1302 return;
1303
1304 add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
1305 trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool));
1306}
1307EXPORT_SYMBOL_GPL(add_disk_randomness);
1308#endif
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320static size_t account(struct entropy_store *r, size_t nbytes, int min,
1321 int reserved)
1322{
1323 int entropy_count, orig, have_bytes;
1324 size_t ibytes, nfrac;
1325
1326 BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
1327
1328
1329retry:
1330 entropy_count = orig = READ_ONCE(r->entropy_count);
1331 ibytes = nbytes;
1332
1333 have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
1334
1335 if ((have_bytes -= reserved) < 0)
1336 have_bytes = 0;
1337 ibytes = min_t(size_t, ibytes, have_bytes);
1338 if (ibytes < min)
1339 ibytes = 0;
1340
1341 if (WARN_ON(entropy_count < 0)) {
1342 pr_warn("negative entropy count: pool %s count %d\n",
1343 r->name, entropy_count);
1344 entropy_count = 0;
1345 }
1346 nfrac = ibytes << (ENTROPY_SHIFT + 3);
1347 if ((size_t) entropy_count > nfrac)
1348 entropy_count -= nfrac;
1349 else
1350 entropy_count = 0;
1351
1352 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
1353 goto retry;
1354
1355 trace_debit_entropy(r->name, 8 * ibytes);
1356 if (ibytes && ENTROPY_BITS(r) < random_write_wakeup_bits) {
1357 wake_up_interruptible(&random_write_wait);
1358 kill_fasync(&fasync, SIGIO, POLL_OUT);
1359 }
1360
1361 return ibytes;
1362}
1363
1364
1365
1366
1367
1368
1369static void extract_buf(struct entropy_store *r, __u8 *out)
1370{
1371 int i;
1372 union {
1373 __u32 w[5];
1374 unsigned long l[LONGS(20)];
1375 } hash;
1376 __u32 workspace[SHA1_WORKSPACE_WORDS];
1377 unsigned long flags;
1378
1379
1380
1381
1382
1383 sha1_init(hash.w);
1384 for (i = 0; i < LONGS(20); i++) {
1385 unsigned long v;
1386 if (!arch_get_random_long(&v))
1387 break;
1388 hash.l[i] = v;
1389 }
1390
1391
1392 spin_lock_irqsave(&r->lock, flags);
1393 for (i = 0; i < r->poolinfo->poolwords; i += 16)
1394 sha1_transform(hash.w, (__u8 *)(r->pool + i), workspace);
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405 __mix_pool_bytes(r, hash.w, sizeof(hash.w));
1406 spin_unlock_irqrestore(&r->lock, flags);
1407
1408 memzero_explicit(workspace, sizeof(workspace));
1409
1410
1411
1412
1413
1414
1415 hash.w[0] ^= hash.w[3];
1416 hash.w[1] ^= hash.w[4];
1417 hash.w[2] ^= rol32(hash.w[2], 16);
1418
1419 memcpy(out, &hash, EXTRACT_SIZE);
1420 memzero_explicit(&hash, sizeof(hash));
1421}
1422
1423static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
1424 size_t nbytes, int fips)
1425{
1426 ssize_t ret = 0, i;
1427 __u8 tmp[EXTRACT_SIZE];
1428 unsigned long flags;
1429
1430 while (nbytes) {
1431 extract_buf(r, tmp);
1432
1433 if (fips) {
1434 spin_lock_irqsave(&r->lock, flags);
1435 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
1436 panic("Hardware RNG duplicated output!\n");
1437 memcpy(r->last_data, tmp, EXTRACT_SIZE);
1438 spin_unlock_irqrestore(&r->lock, flags);
1439 }
1440 i = min_t(int, nbytes, EXTRACT_SIZE);
1441 memcpy(buf, tmp, i);
1442 nbytes -= i;
1443 buf += i;
1444 ret += i;
1445 }
1446
1447
1448 memzero_explicit(tmp, sizeof(tmp));
1449
1450 return ret;
1451}
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462static ssize_t extract_entropy(struct entropy_store *r, void *buf,
1463 size_t nbytes, int min, int reserved)
1464{
1465 __u8 tmp[EXTRACT_SIZE];
1466 unsigned long flags;
1467
1468
1469 if (fips_enabled) {
1470 spin_lock_irqsave(&r->lock, flags);
1471 if (!r->last_data_init) {
1472 r->last_data_init = 1;
1473 spin_unlock_irqrestore(&r->lock, flags);
1474 trace_extract_entropy(r->name, EXTRACT_SIZE,
1475 ENTROPY_BITS(r), _RET_IP_);
1476 extract_buf(r, tmp);
1477 spin_lock_irqsave(&r->lock, flags);
1478 memcpy(r->last_data, tmp, EXTRACT_SIZE);
1479 }
1480 spin_unlock_irqrestore(&r->lock, flags);
1481 }
1482
1483 trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
1484 nbytes = account(r, nbytes, min, reserved);
1485
1486 return _extract_entropy(r, buf, nbytes, fips_enabled);
1487}
1488
1489#define warn_unseeded_randomness(previous) \
1490 _warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous))
1491
1492static void _warn_unseeded_randomness(const char *func_name, void *caller,
1493 void **previous)
1494{
1495#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1496 const bool print_once = false;
1497#else
1498 static bool print_once __read_mostly;
1499#endif
1500
1501 if (print_once ||
1502 crng_ready() ||
1503 (previous && (caller == READ_ONCE(*previous))))
1504 return;
1505 WRITE_ONCE(*previous, caller);
1506#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1507 print_once = true;
1508#endif
1509 if (__ratelimit(&unseeded_warning))
1510 printk_deferred(KERN_NOTICE "random: %s called from %pS "
1511 "with crng_init=%d\n", func_name, caller,
1512 crng_init);
1513}
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525static void _get_random_bytes(void *buf, int nbytes)
1526{
1527 __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
1528
1529 trace_get_random_bytes(nbytes, _RET_IP_);
1530
1531 while (nbytes >= CHACHA_BLOCK_SIZE) {
1532 extract_crng(buf);
1533 buf += CHACHA_BLOCK_SIZE;
1534 nbytes -= CHACHA_BLOCK_SIZE;
1535 }
1536
1537 if (nbytes > 0) {
1538 extract_crng(tmp);
1539 memcpy(buf, tmp, nbytes);
1540 crng_backtrack_protect(tmp, nbytes);
1541 } else
1542 crng_backtrack_protect(tmp, CHACHA_BLOCK_SIZE);
1543 memzero_explicit(tmp, sizeof(tmp));
1544}
1545
1546void get_random_bytes(void *buf, int nbytes)
1547{
1548 static void *previous;
1549
1550 warn_unseeded_randomness(&previous);
1551 _get_random_bytes(buf, nbytes);
1552}
1553EXPORT_SYMBOL(get_random_bytes);
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569static void entropy_timer(struct timer_list *t)
1570{
1571 credit_entropy_bits(&input_pool, 1);
1572}
1573
1574
1575
1576
1577
1578static void try_to_generate_entropy(void)
1579{
1580 struct {
1581 unsigned long now;
1582 struct timer_list timer;
1583 } stack;
1584
1585 stack.now = random_get_entropy();
1586
1587
1588 if (stack.now == random_get_entropy())
1589 return;
1590
1591 timer_setup_on_stack(&stack.timer, entropy_timer, 0);
1592 while (!crng_ready()) {
1593 if (!timer_pending(&stack.timer))
1594 mod_timer(&stack.timer, jiffies+1);
1595 mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
1596 schedule();
1597 stack.now = random_get_entropy();
1598 }
1599
1600 del_timer_sync(&stack.timer);
1601 destroy_timer_on_stack(&stack.timer);
1602 mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
1603}
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615int wait_for_random_bytes(void)
1616{
1617 if (likely(crng_ready()))
1618 return 0;
1619
1620 do {
1621 int ret;
1622 ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
1623 if (ret)
1624 return ret > 0 ? 0 : ret;
1625
1626 try_to_generate_entropy();
1627 } while (!crng_ready());
1628
1629 return 0;
1630}
1631EXPORT_SYMBOL(wait_for_random_bytes);
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642bool rng_is_initialized(void)
1643{
1644 return crng_ready();
1645}
1646EXPORT_SYMBOL(rng_is_initialized);
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656int add_random_ready_callback(struct random_ready_callback *rdy)
1657{
1658 struct module *owner;
1659 unsigned long flags;
1660 int err = -EALREADY;
1661
1662 if (crng_ready())
1663 return err;
1664
1665 owner = rdy->owner;
1666 if (!try_module_get(owner))
1667 return -ENOENT;
1668
1669 spin_lock_irqsave(&random_ready_list_lock, flags);
1670 if (crng_ready())
1671 goto out;
1672
1673 owner = NULL;
1674
1675 list_add(&rdy->list, &random_ready_list);
1676 err = 0;
1677
1678out:
1679 spin_unlock_irqrestore(&random_ready_list_lock, flags);
1680
1681 module_put(owner);
1682
1683 return err;
1684}
1685EXPORT_SYMBOL(add_random_ready_callback);
1686
1687
1688
1689
1690void del_random_ready_callback(struct random_ready_callback *rdy)
1691{
1692 unsigned long flags;
1693 struct module *owner = NULL;
1694
1695 spin_lock_irqsave(&random_ready_list_lock, flags);
1696 if (!list_empty(&rdy->list)) {
1697 list_del_init(&rdy->list);
1698 owner = rdy->owner;
1699 }
1700 spin_unlock_irqrestore(&random_ready_list_lock, flags);
1701
1702 module_put(owner);
1703}
1704EXPORT_SYMBOL(del_random_ready_callback);
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718int __must_check get_random_bytes_arch(void *buf, int nbytes)
1719{
1720 int left = nbytes;
1721 char *p = buf;
1722
1723 trace_get_random_bytes_arch(left, _RET_IP_);
1724 while (left) {
1725 unsigned long v;
1726 int chunk = min_t(int, left, sizeof(unsigned long));
1727
1728 if (!arch_get_random_long(&v))
1729 break;
1730
1731 memcpy(p, &v, chunk);
1732 p += chunk;
1733 left -= chunk;
1734 }
1735
1736 return nbytes - left;
1737}
1738EXPORT_SYMBOL(get_random_bytes_arch);
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749static void __init init_std_data(struct entropy_store *r)
1750{
1751 int i;
1752 ktime_t now = ktime_get_real();
1753 unsigned long rv;
1754
1755 mix_pool_bytes(r, &now, sizeof(now));
1756 for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
1757 if (!arch_get_random_seed_long(&rv) &&
1758 !arch_get_random_long(&rv))
1759 rv = random_get_entropy();
1760 mix_pool_bytes(r, &rv, sizeof(rv));
1761 }
1762 mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
1763}
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775int __init rand_initialize(void)
1776{
1777 init_std_data(&input_pool);
1778 crng_initialize_primary(&primary_crng);
1779 crng_global_init_time = jiffies;
1780 if (ratelimit_disable) {
1781 urandom_warning.interval = 0;
1782 unseeded_warning.interval = 0;
1783 }
1784 return 0;
1785}
1786
1787#ifdef CONFIG_BLOCK
1788void rand_initialize_disk(struct gendisk *disk)
1789{
1790 struct timer_rand_state *state;
1791
1792
1793
1794
1795
1796 state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
1797 if (state) {
1798 state->last_time = INITIAL_JIFFIES;
1799 disk->random = state;
1800 }
1801}
1802#endif
1803
1804static ssize_t
1805urandom_read_nowarn(struct file *file, char __user *buf, size_t nbytes,
1806 loff_t *ppos)
1807{
1808 int ret;
1809
1810 nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
1811 ret = extract_crng_user(buf, nbytes);
1812 trace_urandom_read(8 * nbytes, 0, ENTROPY_BITS(&input_pool));
1813 return ret;
1814}
1815
1816static ssize_t
1817urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1818{
1819 unsigned long flags;
1820 static int maxwarn = 10;
1821
1822 if (!crng_ready() && maxwarn > 0) {
1823 maxwarn--;
1824 if (__ratelimit(&urandom_warning))
1825 pr_notice("%s: uninitialized urandom read (%zd bytes read)\n",
1826 current->comm, nbytes);
1827 spin_lock_irqsave(&primary_crng.lock, flags);
1828 crng_init_cnt = 0;
1829 spin_unlock_irqrestore(&primary_crng.lock, flags);
1830 }
1831
1832 return urandom_read_nowarn(file, buf, nbytes, ppos);
1833}
1834
1835static ssize_t
1836random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1837{
1838 int ret;
1839
1840 ret = wait_for_random_bytes();
1841 if (ret != 0)
1842 return ret;
1843 return urandom_read_nowarn(file, buf, nbytes, ppos);
1844}
1845
1846static __poll_t
1847random_poll(struct file *file, poll_table * wait)
1848{
1849 __poll_t mask;
1850
1851 poll_wait(file, &crng_init_wait, wait);
1852 poll_wait(file, &random_write_wait, wait);
1853 mask = 0;
1854 if (crng_ready())
1855 mask |= EPOLLIN | EPOLLRDNORM;
1856 if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
1857 mask |= EPOLLOUT | EPOLLWRNORM;
1858 return mask;
1859}
1860
1861static int
1862write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
1863{
1864 size_t bytes;
1865 __u32 t, buf[16];
1866 const char __user *p = buffer;
1867
1868 while (count > 0) {
1869 int b, i = 0;
1870
1871 bytes = min(count, sizeof(buf));
1872 if (copy_from_user(&buf, p, bytes))
1873 return -EFAULT;
1874
1875 for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
1876 if (!arch_get_random_int(&t))
1877 break;
1878 buf[i] ^= t;
1879 }
1880
1881 count -= bytes;
1882 p += bytes;
1883
1884 mix_pool_bytes(r, buf, bytes);
1885 cond_resched();
1886 }
1887
1888 return 0;
1889}
1890
1891static ssize_t random_write(struct file *file, const char __user *buffer,
1892 size_t count, loff_t *ppos)
1893{
1894 size_t ret;
1895
1896 ret = write_pool(&input_pool, buffer, count);
1897 if (ret)
1898 return ret;
1899
1900 return (ssize_t)count;
1901}
1902
1903static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1904{
1905 int size, ent_count;
1906 int __user *p = (int __user *)arg;
1907 int retval;
1908
1909 switch (cmd) {
1910 case RNDGETENTCNT:
1911
1912 ent_count = ENTROPY_BITS(&input_pool);
1913 if (put_user(ent_count, p))
1914 return -EFAULT;
1915 return 0;
1916 case RNDADDTOENTCNT:
1917 if (!capable(CAP_SYS_ADMIN))
1918 return -EPERM;
1919 if (get_user(ent_count, p))
1920 return -EFAULT;
1921 return credit_entropy_bits_safe(&input_pool, ent_count);
1922 case RNDADDENTROPY:
1923 if (!capable(CAP_SYS_ADMIN))
1924 return -EPERM;
1925 if (get_user(ent_count, p++))
1926 return -EFAULT;
1927 if (ent_count < 0)
1928 return -EINVAL;
1929 if (get_user(size, p++))
1930 return -EFAULT;
1931 retval = write_pool(&input_pool, (const char __user *)p,
1932 size);
1933 if (retval < 0)
1934 return retval;
1935 return credit_entropy_bits_safe(&input_pool, ent_count);
1936 case RNDZAPENTCNT:
1937 case RNDCLEARPOOL:
1938
1939
1940
1941
1942 if (!capable(CAP_SYS_ADMIN))
1943 return -EPERM;
1944 input_pool.entropy_count = 0;
1945 return 0;
1946 case RNDRESEEDCRNG:
1947 if (!capable(CAP_SYS_ADMIN))
1948 return -EPERM;
1949 if (crng_init < 2)
1950 return -ENODATA;
1951 crng_reseed(&primary_crng, &input_pool);
1952 crng_global_init_time = jiffies - 1;
1953 return 0;
1954 default:
1955 return -EINVAL;
1956 }
1957}
1958
1959static int random_fasync(int fd, struct file *filp, int on)
1960{
1961 return fasync_helper(fd, filp, on, &fasync);
1962}
1963
1964const struct file_operations random_fops = {
1965 .read = random_read,
1966 .write = random_write,
1967 .poll = random_poll,
1968 .unlocked_ioctl = random_ioctl,
1969 .compat_ioctl = compat_ptr_ioctl,
1970 .fasync = random_fasync,
1971 .llseek = noop_llseek,
1972};
1973
1974const struct file_operations urandom_fops = {
1975 .read = urandom_read,
1976 .write = random_write,
1977 .unlocked_ioctl = random_ioctl,
1978 .compat_ioctl = compat_ptr_ioctl,
1979 .fasync = random_fasync,
1980 .llseek = noop_llseek,
1981};
1982
1983SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
1984 unsigned int, flags)
1985{
1986 int ret;
1987
1988 if (flags & ~(GRND_NONBLOCK|GRND_RANDOM|GRND_INSECURE))
1989 return -EINVAL;
1990
1991
1992
1993
1994
1995 if ((flags & (GRND_INSECURE|GRND_RANDOM)) == (GRND_INSECURE|GRND_RANDOM))
1996 return -EINVAL;
1997
1998 if (count > INT_MAX)
1999 count = INT_MAX;
2000
2001 if (!(flags & GRND_INSECURE) && !crng_ready()) {
2002 if (flags & GRND_NONBLOCK)
2003 return -EAGAIN;
2004 ret = wait_for_random_bytes();
2005 if (unlikely(ret))
2006 return ret;
2007 }
2008 return urandom_read_nowarn(NULL, buf, count, NULL);
2009}
2010
2011
2012
2013
2014
2015
2016
2017#ifdef CONFIG_SYSCTL
2018
2019#include <linux/sysctl.h>
2020
2021static int min_write_thresh;
2022static int max_write_thresh = INPUT_POOL_WORDS * 32;
2023static int random_min_urandom_seed = 60;
2024static char sysctl_bootid[16];
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035static int proc_do_uuid(struct ctl_table *table, int write,
2036 void *buffer, size_t *lenp, loff_t *ppos)
2037{
2038 struct ctl_table fake_table;
2039 unsigned char buf[64], tmp_uuid[16], *uuid;
2040
2041 uuid = table->data;
2042 if (!uuid) {
2043 uuid = tmp_uuid;
2044 generate_random_uuid(uuid);
2045 } else {
2046 static DEFINE_SPINLOCK(bootid_spinlock);
2047
2048 spin_lock(&bootid_spinlock);
2049 if (!uuid[8])
2050 generate_random_uuid(uuid);
2051 spin_unlock(&bootid_spinlock);
2052 }
2053
2054 sprintf(buf, "%pU", uuid);
2055
2056 fake_table.data = buf;
2057 fake_table.maxlen = sizeof(buf);
2058
2059 return proc_dostring(&fake_table, write, buffer, lenp, ppos);
2060}
2061
2062
2063
2064
2065static int proc_do_entropy(struct ctl_table *table, int write,
2066 void *buffer, size_t *lenp, loff_t *ppos)
2067{
2068 struct ctl_table fake_table;
2069 int entropy_count;
2070
2071 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
2072
2073 fake_table.data = &entropy_count;
2074 fake_table.maxlen = sizeof(entropy_count);
2075
2076 return proc_dointvec(&fake_table, write, buffer, lenp, ppos);
2077}
2078
2079static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
2080extern struct ctl_table random_table[];
2081struct ctl_table random_table[] = {
2082 {
2083 .procname = "poolsize",
2084 .data = &sysctl_poolsize,
2085 .maxlen = sizeof(int),
2086 .mode = 0444,
2087 .proc_handler = proc_dointvec,
2088 },
2089 {
2090 .procname = "entropy_avail",
2091 .maxlen = sizeof(int),
2092 .mode = 0444,
2093 .proc_handler = proc_do_entropy,
2094 .data = &input_pool.entropy_count,
2095 },
2096 {
2097 .procname = "write_wakeup_threshold",
2098 .data = &random_write_wakeup_bits,
2099 .maxlen = sizeof(int),
2100 .mode = 0644,
2101 .proc_handler = proc_dointvec_minmax,
2102 .extra1 = &min_write_thresh,
2103 .extra2 = &max_write_thresh,
2104 },
2105 {
2106 .procname = "urandom_min_reseed_secs",
2107 .data = &random_min_urandom_seed,
2108 .maxlen = sizeof(int),
2109 .mode = 0644,
2110 .proc_handler = proc_dointvec,
2111 },
2112 {
2113 .procname = "boot_id",
2114 .data = &sysctl_bootid,
2115 .maxlen = 16,
2116 .mode = 0444,
2117 .proc_handler = proc_do_uuid,
2118 },
2119 {
2120 .procname = "uuid",
2121 .maxlen = 16,
2122 .mode = 0444,
2123 .proc_handler = proc_do_uuid,
2124 },
2125#ifdef ADD_INTERRUPT_BENCH
2126 {
2127 .procname = "add_interrupt_avg_cycles",
2128 .data = &avg_cycles,
2129 .maxlen = sizeof(avg_cycles),
2130 .mode = 0444,
2131 .proc_handler = proc_doulongvec_minmax,
2132 },
2133 {
2134 .procname = "add_interrupt_avg_deviation",
2135 .data = &avg_deviation,
2136 .maxlen = sizeof(avg_deviation),
2137 .mode = 0444,
2138 .proc_handler = proc_doulongvec_minmax,
2139 },
2140#endif
2141 { }
2142};
2143#endif
2144
2145struct batched_entropy {
2146 union {
2147 u64 entropy_u64[CHACHA_BLOCK_SIZE / sizeof(u64)];
2148 u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)];
2149 };
2150 unsigned int position;
2151 spinlock_t batch_lock;
2152};
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
2163 .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
2164};
2165
2166u64 get_random_u64(void)
2167{
2168 u64 ret;
2169 unsigned long flags;
2170 struct batched_entropy *batch;
2171 static void *previous;
2172
2173 warn_unseeded_randomness(&previous);
2174
2175 batch = raw_cpu_ptr(&batched_entropy_u64);
2176 spin_lock_irqsave(&batch->batch_lock, flags);
2177 if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
2178 extract_crng((u8 *)batch->entropy_u64);
2179 batch->position = 0;
2180 }
2181 ret = batch->entropy_u64[batch->position++];
2182 spin_unlock_irqrestore(&batch->batch_lock, flags);
2183 return ret;
2184}
2185EXPORT_SYMBOL(get_random_u64);
2186
2187static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
2188 .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
2189};
2190u32 get_random_u32(void)
2191{
2192 u32 ret;
2193 unsigned long flags;
2194 struct batched_entropy *batch;
2195 static void *previous;
2196
2197 warn_unseeded_randomness(&previous);
2198
2199 batch = raw_cpu_ptr(&batched_entropy_u32);
2200 spin_lock_irqsave(&batch->batch_lock, flags);
2201 if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
2202 extract_crng((u8 *)batch->entropy_u32);
2203 batch->position = 0;
2204 }
2205 ret = batch->entropy_u32[batch->position++];
2206 spin_unlock_irqrestore(&batch->batch_lock, flags);
2207 return ret;
2208}
2209EXPORT_SYMBOL(get_random_u32);
2210
2211
2212
2213
2214
2215static void invalidate_batched_entropy(void)
2216{
2217 int cpu;
2218 unsigned long flags;
2219
2220 for_each_possible_cpu (cpu) {
2221 struct batched_entropy *batched_entropy;
2222
2223 batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
2224 spin_lock_irqsave(&batched_entropy->batch_lock, flags);
2225 batched_entropy->position = 0;
2226 spin_unlock(&batched_entropy->batch_lock);
2227
2228 batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
2229 spin_lock(&batched_entropy->batch_lock);
2230 batched_entropy->position = 0;
2231 spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
2232 }
2233}
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249unsigned long
2250randomize_page(unsigned long start, unsigned long range)
2251{
2252 if (!PAGE_ALIGNED(start)) {
2253 range -= PAGE_ALIGN(start) - start;
2254 start = PAGE_ALIGN(start);
2255 }
2256
2257 if (start > ULONG_MAX - range)
2258 range = ULONG_MAX - start;
2259
2260 range >>= PAGE_SHIFT;
2261
2262 if (range == 0)
2263 return start;
2264
2265 return start + (get_random_long() % range << PAGE_SHIFT);
2266}
2267
2268
2269
2270
2271
2272void add_hwgenerator_randomness(const char *buffer, size_t count,
2273 size_t entropy)
2274{
2275 struct entropy_store *poolp = &input_pool;
2276
2277 if (unlikely(crng_init == 0)) {
2278 crng_fast_load(buffer, count);
2279 return;
2280 }
2281
2282
2283
2284
2285
2286 wait_event_interruptible(random_write_wait, kthread_should_stop() ||
2287 ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
2288 mix_pool_bytes(poolp, buffer, count);
2289 credit_entropy_bits(poolp, entropy);
2290}
2291EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
2292
2293
2294
2295
2296
2297
2298void add_bootloader_randomness(const void *buf, unsigned int size)
2299{
2300 if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER))
2301 add_hwgenerator_randomness(buf, size, size * 8);
2302 else
2303 add_device_randomness(buf, size);
2304}
2305EXPORT_SYMBOL_GPL(add_bootloader_randomness);
2306