linux/drivers/char/random.c
<<
>>
Prefs
   1/*
   2 * random.c -- A strong random number generator
   3 *
   4 * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All
   5 * Rights Reserved.
   6 *
   7 * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
   8 *
   9 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999.  All
  10 * rights reserved.
  11 *
  12 * Redistribution and use in source and binary forms, with or without
  13 * modification, are permitted provided that the following conditions
  14 * are met:
  15 * 1. Redistributions of source code must retain the above copyright
  16 *    notice, and the entire permission notice in its entirety,
  17 *    including the disclaimer of warranties.
  18 * 2. Redistributions in binary form must reproduce the above copyright
  19 *    notice, this list of conditions and the following disclaimer in the
  20 *    documentation and/or other materials provided with the distribution.
  21 * 3. The name of the author may not be used to endorse or promote
  22 *    products derived from this software without specific prior
  23 *    written permission.
  24 *
  25 * ALTERNATIVELY, this product may be distributed under the terms of
  26 * the GNU General Public License, in which case the provisions of the GPL are
  27 * required INSTEAD OF the above restrictions.  (This clause is
  28 * necessary due to a potential bad interaction between the GPL and
  29 * the restrictions contained in a BSD-style copyright.)
  30 *
  31 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
  32 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  33 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
  34 * WHICH ARE HEREBY DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE
  35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
  37 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  39 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  41 * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
  42 * DAMAGE.
  43 */
  44
  45/*
  46 * (now, with legal B.S. out of the way.....)
  47 *
  48 * This routine gathers environmental noise from device drivers, etc.,
  49 * and returns good random numbers, suitable for cryptographic use.
  50 * Besides the obvious cryptographic uses, these numbers are also good
  51 * for seeding TCP sequence numbers, and other places where it is
  52 * desirable to have numbers which are not only random, but hard to
  53 * predict by an attacker.
  54 *
  55 * Theory of operation
  56 * ===================
  57 *
  58 * Computers are very predictable devices.  Hence it is extremely hard
  59 * to produce truly random numbers on a computer --- as opposed to
  60 * pseudo-random numbers, which can easily generated by using a
  61 * algorithm.  Unfortunately, it is very easy for attackers to guess
  62 * the sequence of pseudo-random number generators, and for some
  63 * applications this is not acceptable.  So instead, we must try to
  64 * gather "environmental noise" from the computer's environment, which
  65 * must be hard for outside attackers to observe, and use that to
  66 * generate random numbers.  In a Unix environment, this is best done
  67 * from inside the kernel.
  68 *
  69 * Sources of randomness from the environment include inter-keyboard
  70 * timings, inter-interrupt timings from some interrupts, and other
  71 * events which are both (a) non-deterministic and (b) hard for an
  72 * outside observer to measure.  Randomness from these sources are
  73 * added to an "entropy pool", which is mixed using a CRC-like function.
  74 * This is not cryptographically strong, but it is adequate assuming
  75 * the randomness is not chosen maliciously, and it is fast enough that
  76 * the overhead of doing it on every interrupt is very reasonable.
  77 * As random bytes are mixed into the entropy pool, the routines keep
  78 * an *estimate* of how many bits of randomness have been stored into
  79 * the random number generator's internal state.
  80 *
  81 * When random bytes are desired, they are obtained by taking the SHA
  82 * hash of the contents of the "entropy pool".  The SHA hash avoids
  83 * exposing the internal state of the entropy pool.  It is believed to
  84 * be computationally infeasible to derive any useful information
  85 * about the input of SHA from its output.  Even if it is possible to
  86 * analyze SHA in some clever way, as long as the amount of data
  87 * returned from the generator is less than the inherent entropy in
  88 * the pool, the output data is totally unpredictable.  For this
  89 * reason, the routine decreases its internal estimate of how many
  90 * bits of "true randomness" are contained in the entropy pool as it
  91 * outputs random numbers.
  92 *
  93 * If this estimate goes to zero, the routine can still generate
  94 * random numbers; however, an attacker may (at least in theory) be
  95 * able to infer the future output of the generator from prior
  96 * outputs.  This requires successful cryptanalysis of SHA, which is
  97 * not believed to be feasible, but there is a remote possibility.
  98 * Nonetheless, these numbers should be useful for the vast majority
  99 * of purposes.
 100 *
 101 * Exported interfaces ---- output
 102 * ===============================
 103 *
 104 * There are four exported interfaces; two for use within the kernel,
 105 * and two or use from userspace.
 106 *
 107 * Exported interfaces ---- userspace output
 108 * -----------------------------------------
 109 *
 110 * The userspace interfaces are two character devices /dev/random and
 111 * /dev/urandom.  /dev/random is suitable for use when very high
 112 * quality randomness is desired (for example, for key generation or
 113 * one-time pads), as it will only return a maximum of the number of
 114 * bits of randomness (as estimated by the random number generator)
 115 * contained in the entropy pool.
 116 *
 117 * The /dev/urandom device does not have this limit, and will return
 118 * as many bytes as are requested.  As more and more random bytes are
 119 * requested without giving time for the entropy pool to recharge,
 120 * this will result in random numbers that are merely cryptographically
 121 * strong.  For many applications, however, this is acceptable.
 122 *
 123 * Exported interfaces ---- kernel output
 124 * --------------------------------------
 125 *
 126 * The primary kernel interface is
 127 *
 128 *      void get_random_bytes(void *buf, int nbytes);
 129 *
 130 * This interface will return the requested number of random bytes,
 131 * and place it in the requested buffer.  This is equivalent to a
 132 * read from /dev/urandom.
 133 *
 134 * For less critical applications, there are the functions:
 135 *
 136 *      u32 get_random_u32()
 137 *      u64 get_random_u64()
 138 *      unsigned int get_random_int()
 139 *      unsigned long get_random_long()
 140 *
 141 * These are produced by a cryptographic RNG seeded from get_random_bytes,
 142 * and so do not deplete the entropy pool as much.  These are recommended
 143 * for most in-kernel operations *if the result is going to be stored in
 144 * the kernel*.
 145 *
 146 * Specifically, the get_random_int() family do not attempt to do
 147 * "anti-backtracking".  If you capture the state of the kernel (e.g.
 148 * by snapshotting the VM), you can figure out previous get_random_int()
 149 * return values.  But if the value is stored in the kernel anyway,
 150 * this is not a problem.
 151 *
 152 * It *is* safe to expose get_random_int() output to attackers (e.g. as
 153 * network cookies); given outputs 1..n, it's not feasible to predict
 154 * outputs 0 or n+1.  The only concern is an attacker who breaks into
 155 * the kernel later; the get_random_int() engine is not reseeded as
 156 * often as the get_random_bytes() one.
 157 *
 158 * get_random_bytes() is needed for keys that need to stay secret after
 159 * they are erased from the kernel.  For example, any key that will
 160 * be wrapped and stored encrypted.  And session encryption keys: we'd
 161 * like to know that after the session is closed and the keys erased,
 162 * the plaintext is unrecoverable to someone who recorded the ciphertext.
 163 *
 164 * But for network ports/cookies, stack canaries, PRNG seeds, address
 165 * space layout randomization, session *authentication* keys, or other
 166 * applications where the sensitive data is stored in the kernel in
 167 * plaintext for as long as it's sensitive, the get_random_int() family
 168 * is just fine.
 169 *
 170 * Consider ASLR.  We want to keep the address space secret from an
 171 * outside attacker while the process is running, but once the address
 172 * space is torn down, it's of no use to an attacker any more.  And it's
 173 * stored in kernel data structures as long as it's alive, so worrying
 174 * about an attacker's ability to extrapolate it from the get_random_int()
 175 * CRNG is silly.
 176 *
 177 * Even some cryptographic keys are safe to generate with get_random_int().
 178 * In particular, keys for SipHash are generally fine.  Here, knowledge
 179 * of the key authorizes you to do something to a kernel object (inject
 180 * packets to a network connection, or flood a hash table), and the
 181 * key is stored with the object being protected.  Once it goes away,
 182 * we no longer care if anyone knows the key.
 183 *
 184 * prandom_u32()
 185 * -------------
 186 *
 187 * For even weaker applications, see the pseudorandom generator
 188 * prandom_u32(), prandom_max(), and prandom_bytes().  If the random
 189 * numbers aren't security-critical at all, these are *far* cheaper.
 190 * Useful for self-tests, random error simulation, randomized backoffs,
 191 * and any other application where you trust that nobody is trying to
 192 * maliciously mess with you by guessing the "random" numbers.
 193 *
 194 * Exported interfaces ---- input
 195 * ==============================
 196 *
 197 * The current exported interfaces for gathering environmental noise
 198 * from the devices are:
 199 *
 200 *      void add_device_randomness(const void *buf, unsigned int size);
 201 *      void add_input_randomness(unsigned int type, unsigned int code,
 202 *                                unsigned int value);
 203 *      void add_interrupt_randomness(int irq, int irq_flags);
 204 *      void add_disk_randomness(struct gendisk *disk);
 205 *
 206 * add_device_randomness() is for adding data to the random pool that
 207 * is likely to differ between two devices (or possibly even per boot).
 208 * This would be things like MAC addresses or serial numbers, or the
 209 * read-out of the RTC. This does *not* add any actual entropy to the
 210 * pool, but it initializes the pool to different values for devices
 211 * that might otherwise be identical and have very little entropy
 212 * available to them (particularly common in the embedded world).
 213 *
 214 * add_input_randomness() uses the input layer interrupt timing, as well as
 215 * the event type information from the hardware.
 216 *
 217 * add_interrupt_randomness() uses the interrupt timing as random
 218 * inputs to the entropy pool. Using the cycle counters and the irq source
 219 * as inputs, it feeds the randomness roughly once a second.
 220 *
 221 * add_disk_randomness() uses what amounts to the seek time of block
 222 * layer request events, on a per-disk_devt basis, as input to the
 223 * entropy pool. Note that high-speed solid state drives with very low
 224 * seek times do not make for good sources of entropy, as their seek
 225 * times are usually fairly consistent.
 226 *
 227 * All of these routines try to estimate how many bits of randomness a
 228 * particular randomness source.  They do this by keeping track of the
 229 * first and second order deltas of the event timings.
 230 *
 231 * Ensuring unpredictability at system startup
 232 * ============================================
 233 *
 234 * When any operating system starts up, it will go through a sequence
 235 * of actions that are fairly predictable by an adversary, especially
 236 * if the start-up does not involve interaction with a human operator.
 237 * This reduces the actual number of bits of unpredictability in the
 238 * entropy pool below the value in entropy_count.  In order to
 239 * counteract this effect, it helps to carry information in the
 240 * entropy pool across shut-downs and start-ups.  To do this, put the
 241 * following lines an appropriate script which is run during the boot
 242 * sequence:
 243 *
 244 *      echo "Initializing random number generator..."
 245 *      random_seed=/var/run/random-seed
 246 *      # Carry a random seed from start-up to start-up
 247 *      # Load and then save the whole entropy pool
 248 *      if [ -f $random_seed ]; then
 249 *              cat $random_seed >/dev/urandom
 250 *      else
 251 *              touch $random_seed
 252 *      fi
 253 *      chmod 600 $random_seed
 254 *      dd if=/dev/urandom of=$random_seed count=1 bs=512
 255 *
 256 * and the following lines in an appropriate script which is run as
 257 * the system is shutdown:
 258 *
 259 *      # Carry a random seed from shut-down to start-up
 260 *      # Save the whole entropy pool
 261 *      echo "Saving random seed..."
 262 *      random_seed=/var/run/random-seed
 263 *      touch $random_seed
 264 *      chmod 600 $random_seed
 265 *      dd if=/dev/urandom of=$random_seed count=1 bs=512
 266 *
 267 * For example, on most modern systems using the System V init
 268 * scripts, such code fragments would be found in
 269 * /etc/rc.d/init.d/random.  On older Linux systems, the correct script
 270 * location might be in /etc/rcb.d/rc.local or /etc/rc.d/rc.0.
 271 *
 272 * Effectively, these commands cause the contents of the entropy pool
 273 * to be saved at shut-down time and reloaded into the entropy pool at
 274 * start-up.  (The 'dd' in the addition to the bootup script is to
 275 * make sure that /etc/random-seed is different for every start-up,
 276 * even if the system crashes without executing rc.0.)  Even with
 277 * complete knowledge of the start-up activities, predicting the state
 278 * of the entropy pool requires knowledge of the previous history of
 279 * the system.
 280 *
 281 * Configuring the /dev/random driver under Linux
 282 * ==============================================
 283 *
 284 * The /dev/random driver under Linux uses minor numbers 8 and 9 of
 285 * the /dev/mem major number (#1).  So if your system does not have
 286 * /dev/random and /dev/urandom created already, they can be created
 287 * by using the commands:
 288 *
 289 *      mknod /dev/random c 1 8
 290 *      mknod /dev/urandom c 1 9
 291 *
 292 * Acknowledgements:
 293 * =================
 294 *
 295 * Ideas for constructing this random number generator were derived
 296 * from Pretty Good Privacy's random number generator, and from private
 297 * discussions with Phil Karn.  Colin Plumb provided a faster random
 298 * number generator, which speed up the mixing function of the entropy
 299 * pool, taken from PGPfone.  Dale Worley has also contributed many
 300 * useful ideas and suggestions to improve this driver.
 301 *
 302 * Any flaws in the design are solely my responsibility, and should
 303 * not be attributed to the Phil, Colin, or any of authors of PGP.
 304 *
 305 * Further background information on this topic may be obtained from
 306 * RFC 1750, "Randomness Recommendations for Security", by Donald
 307 * Eastlake, Steve Crocker, and Jeff Schiller.
 308 */
 309
 310#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 311
 312#include <linux/utsname.h>
 313#include <linux/module.h>
 314#include <linux/kernel.h>
 315#include <linux/major.h>
 316#include <linux/string.h>
 317#include <linux/fcntl.h>
 318#include <linux/slab.h>
 319#include <linux/random.h>
 320#include <linux/poll.h>
 321#include <linux/init.h>
 322#include <linux/fs.h>
 323#include <linux/genhd.h>
 324#include <linux/interrupt.h>
 325#include <linux/mm.h>
 326#include <linux/nodemask.h>
 327#include <linux/spinlock.h>
 328#include <linux/kthread.h>
 329#include <linux/percpu.h>
 330#include <linux/fips.h>
 331#include <linux/ptrace.h>
 332#include <linux/workqueue.h>
 333#include <linux/irq.h>
 334#include <linux/ratelimit.h>
 335#include <linux/syscalls.h>
 336#include <linux/completion.h>
 337#include <linux/uuid.h>
 338#include <crypto/chacha.h>
 339#include <crypto/sha1.h>
 340
 341#include <asm/processor.h>
 342#include <linux/uaccess.h>
 343#include <asm/irq.h>
 344#include <asm/irq_regs.h>
 345#include <asm/io.h>
 346
 347#define CREATE_TRACE_POINTS
 348#include <trace/events/random.h>
 349
 350/* #define ADD_INTERRUPT_BENCH */
 351
 352/*
 353 * Configuration information
 354 */
 355#define INPUT_POOL_SHIFT        12
 356#define INPUT_POOL_WORDS        (1 << (INPUT_POOL_SHIFT-5))
 357#define OUTPUT_POOL_SHIFT       10
 358#define OUTPUT_POOL_WORDS       (1 << (OUTPUT_POOL_SHIFT-5))
 359#define EXTRACT_SIZE            10
 360
 361
 362#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
 363
 364/*
 365 * To allow fractional bits to be tracked, the entropy_count field is
 366 * denominated in units of 1/8th bits.
 367 *
 368 * 2*(ENTROPY_SHIFT + poolbitshift) must <= 31, or the multiply in
 369 * credit_entropy_bits() needs to be 64 bits wide.
 370 */
 371#define ENTROPY_SHIFT 3
 372#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
 373
 374/*
 375 * If the entropy count falls under this number of bits, then we
 376 * should wake up processes which are selecting or polling on write
 377 * access to /dev/random.
 378 */
 379static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
 380
 381/*
 382 * Originally, we used a primitive polynomial of degree .poolwords
 383 * over GF(2).  The taps for various sizes are defined below.  They
 384 * were chosen to be evenly spaced except for the last tap, which is 1
 385 * to get the twisting happening as fast as possible.
 386 *
 387 * For the purposes of better mixing, we use the CRC-32 polynomial as
 388 * well to make a (modified) twisted Generalized Feedback Shift
 389 * Register.  (See M. Matsumoto & Y. Kurita, 1992.  Twisted GFSR
 390 * generators.  ACM Transactions on Modeling and Computer Simulation
 391 * 2(3):179-194.  Also see M. Matsumoto & Y. Kurita, 1994.  Twisted
 392 * GFSR generators II.  ACM Transactions on Modeling and Computer
 393 * Simulation 4:254-266)
 394 *
 395 * Thanks to Colin Plumb for suggesting this.
 396 *
 397 * The mixing operation is much less sensitive than the output hash,
 398 * where we use SHA-1.  All that we want of mixing operation is that
 399 * it be a good non-cryptographic hash; i.e. it not produce collisions
 400 * when fed "random" data of the sort we expect to see.  As long as
 401 * the pool state differs for different inputs, we have preserved the
 402 * input entropy and done a good job.  The fact that an intelligent
 403 * attacker can construct inputs that will produce controlled
 404 * alterations to the pool's state is not important because we don't
 405 * consider such inputs to contribute any randomness.  The only
 406 * property we need with respect to them is that the attacker can't
 407 * increase his/her knowledge of the pool's state.  Since all
 408 * additions are reversible (knowing the final state and the input,
 409 * you can reconstruct the initial state), if an attacker has any
 410 * uncertainty about the initial state, he/she can only shuffle that
 411 * uncertainty about, but never cause any collisions (which would
 412 * decrease the uncertainty).
 413 *
 414 * Our mixing functions were analyzed by Lacharme, Roeck, Strubel, and
 415 * Videau in their paper, "The Linux Pseudorandom Number Generator
 416 * Revisited" (see: http://eprint.iacr.org/2012/251.pdf).  In their
 417 * paper, they point out that we are not using a true Twisted GFSR,
 418 * since Matsumoto & Kurita used a trinomial feedback polynomial (that
 419 * is, with only three taps, instead of the six that we are using).
 420 * As a result, the resulting polynomial is neither primitive nor
 421 * irreducible, and hence does not have a maximal period over
 422 * GF(2**32).  They suggest a slight change to the generator
 423 * polynomial which improves the resulting TGFSR polynomial to be
 424 * irreducible, which we have made here.
 425 */
 426static const struct poolinfo {
 427        int poolbitshift, poolwords, poolbytes, poolfracbits;
 428#define S(x) ilog2(x)+5, (x), (x)*4, (x) << (ENTROPY_SHIFT+5)
 429        int tap1, tap2, tap3, tap4, tap5;
 430} poolinfo_table[] = {
 431        /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
 432        /* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
 433        { S(128),       104,    76,     51,     25,     1 },
 434};
 435
 436/*
 437 * Static global variables
 438 */
 439static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
 440static struct fasync_struct *fasync;
 441
 442static DEFINE_SPINLOCK(random_ready_list_lock);
 443static LIST_HEAD(random_ready_list);
 444
 445struct crng_state {
 446        __u32           state[16];
 447        unsigned long   init_time;
 448        spinlock_t      lock;
 449};
 450
 451static struct crng_state primary_crng = {
 452        .lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock),
 453};
 454
 455/*
 456 * crng_init =  0 --> Uninitialized
 457 *              1 --> Initialized
 458 *              2 --> Initialized from input_pool
 459 *
 460 * crng_init is protected by primary_crng->lock, and only increases
 461 * its value (from 0->1->2).
 462 */
 463static int crng_init = 0;
 464#define crng_ready() (likely(crng_init > 1))
 465static int crng_init_cnt = 0;
 466static unsigned long crng_global_init_time = 0;
 467#define CRNG_INIT_CNT_THRESH (2*CHACHA_KEY_SIZE)
 468static void _extract_crng(struct crng_state *crng, __u8 out[CHACHA_BLOCK_SIZE]);
 469static void _crng_backtrack_protect(struct crng_state *crng,
 470                                    __u8 tmp[CHACHA_BLOCK_SIZE], int used);
 471static void process_random_ready_list(void);
 472static void _get_random_bytes(void *buf, int nbytes);
 473
 474static struct ratelimit_state unseeded_warning =
 475        RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
 476static struct ratelimit_state urandom_warning =
 477        RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
 478
 479static int ratelimit_disable __read_mostly;
 480
 481module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
 482MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
 483
 484/**********************************************************************
 485 *
 486 * OS independent entropy store.   Here are the functions which handle
 487 * storing entropy in an entropy pool.
 488 *
 489 **********************************************************************/
 490
 491struct entropy_store;
 492struct entropy_store {
 493        /* read-only data: */
 494        const struct poolinfo *poolinfo;
 495        __u32 *pool;
 496        const char *name;
 497
 498        /* read-write data: */
 499        spinlock_t lock;
 500        unsigned short add_ptr;
 501        unsigned short input_rotate;
 502        int entropy_count;
 503        unsigned int last_data_init:1;
 504        __u8 last_data[EXTRACT_SIZE];
 505};
 506
 507static ssize_t extract_entropy(struct entropy_store *r, void *buf,
 508                               size_t nbytes, int min, int rsvd);
 509static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
 510                                size_t nbytes, int fips);
 511
 512static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
 513static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
 514
 515static struct entropy_store input_pool = {
 516        .poolinfo = &poolinfo_table[0],
 517        .name = "input",
 518        .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
 519        .pool = input_pool_data
 520};
 521
 522static __u32 const twist_table[8] = {
 523        0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
 524        0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
 525
 526/*
 527 * This function adds bytes into the entropy "pool".  It does not
 528 * update the entropy estimate.  The caller should call
 529 * credit_entropy_bits if this is appropriate.
 530 *
 531 * The pool is stirred with a primitive polynomial of the appropriate
 532 * degree, and then twisted.  We twist by three bits at a time because
 533 * it's cheap to do so and helps slightly in the expected case where
 534 * the entropy is concentrated in the low-order bits.
 535 */
 536static void _mix_pool_bytes(struct entropy_store *r, const void *in,
 537                            int nbytes)
 538{
 539        unsigned long i, tap1, tap2, tap3, tap4, tap5;
 540        int input_rotate;
 541        int wordmask = r->poolinfo->poolwords - 1;
 542        const char *bytes = in;
 543        __u32 w;
 544
 545        tap1 = r->poolinfo->tap1;
 546        tap2 = r->poolinfo->tap2;
 547        tap3 = r->poolinfo->tap3;
 548        tap4 = r->poolinfo->tap4;
 549        tap5 = r->poolinfo->tap5;
 550
 551        input_rotate = r->input_rotate;
 552        i = r->add_ptr;
 553
 554        /* mix one byte at a time to simplify size handling and churn faster */
 555        while (nbytes--) {
 556                w = rol32(*bytes++, input_rotate);
 557                i = (i - 1) & wordmask;
 558
 559                /* XOR in the various taps */
 560                w ^= r->pool[i];
 561                w ^= r->pool[(i + tap1) & wordmask];
 562                w ^= r->pool[(i + tap2) & wordmask];
 563                w ^= r->pool[(i + tap3) & wordmask];
 564                w ^= r->pool[(i + tap4) & wordmask];
 565                w ^= r->pool[(i + tap5) & wordmask];
 566
 567                /* Mix the result back in with a twist */
 568                r->pool[i] = (w >> 3) ^ twist_table[w & 7];
 569
 570                /*
 571                 * Normally, we add 7 bits of rotation to the pool.
 572                 * At the beginning of the pool, add an extra 7 bits
 573                 * rotation, so that successive passes spread the
 574                 * input bits across the pool evenly.
 575                 */
 576                input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
 577        }
 578
 579        r->input_rotate = input_rotate;
 580        r->add_ptr = i;
 581}
 582
 583static void __mix_pool_bytes(struct entropy_store *r, const void *in,
 584                             int nbytes)
 585{
 586        trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
 587        _mix_pool_bytes(r, in, nbytes);
 588}
 589
 590static void mix_pool_bytes(struct entropy_store *r, const void *in,
 591                           int nbytes)
 592{
 593        unsigned long flags;
 594
 595        trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
 596        spin_lock_irqsave(&r->lock, flags);
 597        _mix_pool_bytes(r, in, nbytes);
 598        spin_unlock_irqrestore(&r->lock, flags);
 599}
 600
 601struct fast_pool {
 602        __u32           pool[4];
 603        unsigned long   last;
 604        unsigned short  reg_idx;
 605        unsigned char   count;
 606};
 607
 608/*
 609 * This is a fast mixing routine used by the interrupt randomness
 610 * collector.  It's hardcoded for an 128 bit pool and assumes that any
 611 * locks that might be needed are taken by the caller.
 612 */
 613static void fast_mix(struct fast_pool *f)
 614{
 615        __u32 a = f->pool[0],   b = f->pool[1];
 616        __u32 c = f->pool[2],   d = f->pool[3];
 617
 618        a += b;                 c += d;
 619        b = rol32(b, 6);        d = rol32(d, 27);
 620        d ^= a;                 b ^= c;
 621
 622        a += b;                 c += d;
 623        b = rol32(b, 16);       d = rol32(d, 14);
 624        d ^= a;                 b ^= c;
 625
 626        a += b;                 c += d;
 627        b = rol32(b, 6);        d = rol32(d, 27);
 628        d ^= a;                 b ^= c;
 629
 630        a += b;                 c += d;
 631        b = rol32(b, 16);       d = rol32(d, 14);
 632        d ^= a;                 b ^= c;
 633
 634        f->pool[0] = a;  f->pool[1] = b;
 635        f->pool[2] = c;  f->pool[3] = d;
 636        f->count++;
 637}
 638
 639static void process_random_ready_list(void)
 640{
 641        unsigned long flags;
 642        struct random_ready_callback *rdy, *tmp;
 643
 644        spin_lock_irqsave(&random_ready_list_lock, flags);
 645        list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
 646                struct module *owner = rdy->owner;
 647
 648                list_del_init(&rdy->list);
 649                rdy->func(rdy);
 650                module_put(owner);
 651        }
 652        spin_unlock_irqrestore(&random_ready_list_lock, flags);
 653}
 654
 655/*
 656 * Credit (or debit) the entropy store with n bits of entropy.
 657 * Use credit_entropy_bits_safe() if the value comes from userspace
 658 * or otherwise should be checked for extreme values.
 659 */
 660static void credit_entropy_bits(struct entropy_store *r, int nbits)
 661{
 662        int entropy_count, orig;
 663        const int pool_size = r->poolinfo->poolfracbits;
 664        int nfrac = nbits << ENTROPY_SHIFT;
 665
 666        if (!nbits)
 667                return;
 668
 669retry:
 670        entropy_count = orig = READ_ONCE(r->entropy_count);
 671        if (nfrac < 0) {
 672                /* Debit */
 673                entropy_count += nfrac;
 674        } else {
 675                /*
 676                 * Credit: we have to account for the possibility of
 677                 * overwriting already present entropy.  Even in the
 678                 * ideal case of pure Shannon entropy, new contributions
 679                 * approach the full value asymptotically:
 680                 *
 681                 * entropy <- entropy + (pool_size - entropy) *
 682                 *      (1 - exp(-add_entropy/pool_size))
 683                 *
 684                 * For add_entropy <= pool_size/2 then
 685                 * (1 - exp(-add_entropy/pool_size)) >=
 686                 *    (add_entropy/pool_size)*0.7869...
 687                 * so we can approximate the exponential with
 688                 * 3/4*add_entropy/pool_size and still be on the
 689                 * safe side by adding at most pool_size/2 at a time.
 690                 *
 691                 * The use of pool_size-2 in the while statement is to
 692                 * prevent rounding artifacts from making the loop
 693                 * arbitrarily long; this limits the loop to log2(pool_size)*2
 694                 * turns no matter how large nbits is.
 695                 */
 696                int pnfrac = nfrac;
 697                const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2;
 698                /* The +2 corresponds to the /4 in the denominator */
 699
 700                do {
 701                        unsigned int anfrac = min(pnfrac, pool_size/2);
 702                        unsigned int add =
 703                                ((pool_size - entropy_count)*anfrac*3) >> s;
 704
 705                        entropy_count += add;
 706                        pnfrac -= anfrac;
 707                } while (unlikely(entropy_count < pool_size-2 && pnfrac));
 708        }
 709
 710        if (WARN_ON(entropy_count < 0)) {
 711                pr_warn("negative entropy/overflow: pool %s count %d\n",
 712                        r->name, entropy_count);
 713                entropy_count = 0;
 714        } else if (entropy_count > pool_size)
 715                entropy_count = pool_size;
 716        if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
 717                goto retry;
 718
 719        trace_credit_entropy_bits(r->name, nbits,
 720                                  entropy_count >> ENTROPY_SHIFT, _RET_IP_);
 721
 722        if (r == &input_pool) {
 723                int entropy_bits = entropy_count >> ENTROPY_SHIFT;
 724
 725                if (crng_init < 2 && entropy_bits >= 128)
 726                        crng_reseed(&primary_crng, r);
 727        }
 728}
 729
 730static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
 731{
 732        const int nbits_max = r->poolinfo->poolwords * 32;
 733
 734        if (nbits < 0)
 735                return -EINVAL;
 736
 737        /* Cap the value to avoid overflows */
 738        nbits = min(nbits,  nbits_max);
 739
 740        credit_entropy_bits(r, nbits);
 741        return 0;
 742}
 743
 744/*********************************************************************
 745 *
 746 * CRNG using CHACHA20
 747 *
 748 *********************************************************************/
 749
 750#define CRNG_RESEED_INTERVAL (300*HZ)
 751
 752static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
 753
 754#ifdef CONFIG_NUMA
 755/*
 756 * Hack to deal with crazy userspace progams when they are all trying
 757 * to access /dev/urandom in parallel.  The programs are almost
 758 * certainly doing something terribly wrong, but we'll work around
 759 * their brain damage.
 760 */
 761static struct crng_state **crng_node_pool __read_mostly;
 762#endif
 763
 764static void invalidate_batched_entropy(void);
 765static void numa_crng_init(void);
 766
 767static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
 768static int __init parse_trust_cpu(char *arg)
 769{
 770        return kstrtobool(arg, &trust_cpu);
 771}
 772early_param("random.trust_cpu", parse_trust_cpu);
 773
 774static bool crng_init_try_arch(struct crng_state *crng)
 775{
 776        int             i;
 777        bool            arch_init = true;
 778        unsigned long   rv;
 779
 780        for (i = 4; i < 16; i++) {
 781                if (!arch_get_random_seed_long(&rv) &&
 782                    !arch_get_random_long(&rv)) {
 783                        rv = random_get_entropy();
 784                        arch_init = false;
 785                }
 786                crng->state[i] ^= rv;
 787        }
 788
 789        return arch_init;
 790}
 791
 792static bool __init crng_init_try_arch_early(struct crng_state *crng)
 793{
 794        int             i;
 795        bool            arch_init = true;
 796        unsigned long   rv;
 797
 798        for (i = 4; i < 16; i++) {
 799                if (!arch_get_random_seed_long_early(&rv) &&
 800                    !arch_get_random_long_early(&rv)) {
 801                        rv = random_get_entropy();
 802                        arch_init = false;
 803                }
 804                crng->state[i] ^= rv;
 805        }
 806
 807        return arch_init;
 808}
 809
 810static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
 811{
 812        chacha_init_consts(crng->state);
 813        _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
 814        crng_init_try_arch(crng);
 815        crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
 816}
 817
 818static void __init crng_initialize_primary(struct crng_state *crng)
 819{
 820        chacha_init_consts(crng->state);
 821        _extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0);
 822        if (crng_init_try_arch_early(crng) && trust_cpu) {
 823                invalidate_batched_entropy();
 824                numa_crng_init();
 825                crng_init = 2;
 826                pr_notice("crng done (trusting CPU's manufacturer)\n");
 827        }
 828        crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
 829}
 830
 831#ifdef CONFIG_NUMA
 832static void do_numa_crng_init(struct work_struct *work)
 833{
 834        int i;
 835        struct crng_state *crng;
 836        struct crng_state **pool;
 837
 838        pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
 839        for_each_online_node(i) {
 840                crng = kmalloc_node(sizeof(struct crng_state),
 841                                    GFP_KERNEL | __GFP_NOFAIL, i);
 842                spin_lock_init(&crng->lock);
 843                crng_initialize_secondary(crng);
 844                pool[i] = crng;
 845        }
 846        mb();
 847        if (cmpxchg(&crng_node_pool, NULL, pool)) {
 848                for_each_node(i)
 849                        kfree(pool[i]);
 850                kfree(pool);
 851        }
 852}
 853
 854static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
 855
 856static void numa_crng_init(void)
 857{
 858        schedule_work(&numa_crng_init_work);
 859}
 860#else
 861static void numa_crng_init(void) {}
 862#endif
 863
 864/*
 865 * crng_fast_load() can be called by code in the interrupt service
 866 * path.  So we can't afford to dilly-dally.
 867 */
 868static int crng_fast_load(const char *cp, size_t len)
 869{
 870        unsigned long flags;
 871        char *p;
 872
 873        if (!spin_trylock_irqsave(&primary_crng.lock, flags))
 874                return 0;
 875        if (crng_init != 0) {
 876                spin_unlock_irqrestore(&primary_crng.lock, flags);
 877                return 0;
 878        }
 879        p = (unsigned char *) &primary_crng.state[4];
 880        while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
 881                p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp;
 882                cp++; crng_init_cnt++; len--;
 883        }
 884        spin_unlock_irqrestore(&primary_crng.lock, flags);
 885        if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
 886                invalidate_batched_entropy();
 887                crng_init = 1;
 888                pr_notice("fast init done\n");
 889        }
 890        return 1;
 891}
 892
 893/*
 894 * crng_slow_load() is called by add_device_randomness, which has two
 895 * attributes.  (1) We can't trust the buffer passed to it is
 896 * guaranteed to be unpredictable (so it might not have any entropy at
 897 * all), and (2) it doesn't have the performance constraints of
 898 * crng_fast_load().
 899 *
 900 * So we do something more comprehensive which is guaranteed to touch
 901 * all of the primary_crng's state, and which uses a LFSR with a
 902 * period of 255 as part of the mixing algorithm.  Finally, we do
 903 * *not* advance crng_init_cnt since buffer we may get may be something
 904 * like a fixed DMI table (for example), which might very well be
 905 * unique to the machine, but is otherwise unvarying.
 906 */
 907static int crng_slow_load(const char *cp, size_t len)
 908{
 909        unsigned long           flags;
 910        static unsigned char    lfsr = 1;
 911        unsigned char           tmp;
 912        unsigned                i, max = CHACHA_KEY_SIZE;
 913        const char *            src_buf = cp;
 914        char *                  dest_buf = (char *) &primary_crng.state[4];
 915
 916        if (!spin_trylock_irqsave(&primary_crng.lock, flags))
 917                return 0;
 918        if (crng_init != 0) {
 919                spin_unlock_irqrestore(&primary_crng.lock, flags);
 920                return 0;
 921        }
 922        if (len > max)
 923                max = len;
 924
 925        for (i = 0; i < max ; i++) {
 926                tmp = lfsr;
 927                lfsr >>= 1;
 928                if (tmp & 1)
 929                        lfsr ^= 0xE1;
 930                tmp = dest_buf[i % CHACHA_KEY_SIZE];
 931                dest_buf[i % CHACHA_KEY_SIZE] ^= src_buf[i % len] ^ lfsr;
 932                lfsr += (tmp << 3) | (tmp >> 5);
 933        }
 934        spin_unlock_irqrestore(&primary_crng.lock, flags);
 935        return 1;
 936}
 937
 938static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
 939{
 940        unsigned long   flags;
 941        int             i, num;
 942        union {
 943                __u8    block[CHACHA_BLOCK_SIZE];
 944                __u32   key[8];
 945        } buf;
 946
 947        if (r) {
 948                num = extract_entropy(r, &buf, 32, 16, 0);
 949                if (num == 0)
 950                        return;
 951        } else {
 952                _extract_crng(&primary_crng, buf.block);
 953                _crng_backtrack_protect(&primary_crng, buf.block,
 954                                        CHACHA_KEY_SIZE);
 955        }
 956        spin_lock_irqsave(&crng->lock, flags);
 957        for (i = 0; i < 8; i++) {
 958                unsigned long   rv;
 959                if (!arch_get_random_seed_long(&rv) &&
 960                    !arch_get_random_long(&rv))
 961                        rv = random_get_entropy();
 962                crng->state[i+4] ^= buf.key[i] ^ rv;
 963        }
 964        memzero_explicit(&buf, sizeof(buf));
 965        crng->init_time = jiffies;
 966        spin_unlock_irqrestore(&crng->lock, flags);
 967        if (crng == &primary_crng && crng_init < 2) {
 968                invalidate_batched_entropy();
 969                numa_crng_init();
 970                crng_init = 2;
 971                process_random_ready_list();
 972                wake_up_interruptible(&crng_init_wait);
 973                kill_fasync(&fasync, SIGIO, POLL_IN);
 974                pr_notice("crng init done\n");
 975                if (unseeded_warning.missed) {
 976                        pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
 977                                  unseeded_warning.missed);
 978                        unseeded_warning.missed = 0;
 979                }
 980                if (urandom_warning.missed) {
 981                        pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
 982                                  urandom_warning.missed);
 983                        urandom_warning.missed = 0;
 984                }
 985        }
 986}
 987
 988static void _extract_crng(struct crng_state *crng,
 989                          __u8 out[CHACHA_BLOCK_SIZE])
 990{
 991        unsigned long v, flags;
 992
 993        if (crng_ready() &&
 994            (time_after(crng_global_init_time, crng->init_time) ||
 995             time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)))
 996                crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL);
 997        spin_lock_irqsave(&crng->lock, flags);
 998        if (arch_get_random_long(&v))
 999                crng->state[14] ^= v;
1000        chacha20_block(&crng->state[0], out);
1001        if (crng->state[12] == 0)
1002                crng->state[13]++;
1003        spin_unlock_irqrestore(&crng->lock, flags);
1004}
1005
1006static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE])
1007{
1008        struct crng_state *crng = NULL;
1009
1010#ifdef CONFIG_NUMA
1011        if (crng_node_pool)
1012                crng = crng_node_pool[numa_node_id()];
1013        if (crng == NULL)
1014#endif
1015                crng = &primary_crng;
1016        _extract_crng(crng, out);
1017}
1018
1019/*
1020 * Use the leftover bytes from the CRNG block output (if there is
1021 * enough) to mutate the CRNG key to provide backtracking protection.
1022 */
1023static void _crng_backtrack_protect(struct crng_state *crng,
1024                                    __u8 tmp[CHACHA_BLOCK_SIZE], int used)
1025{
1026        unsigned long   flags;
1027        __u32           *s, *d;
1028        int             i;
1029
1030        used = round_up(used, sizeof(__u32));
1031        if (used + CHACHA_KEY_SIZE > CHACHA_BLOCK_SIZE) {
1032                extract_crng(tmp);
1033                used = 0;
1034        }
1035        spin_lock_irqsave(&crng->lock, flags);
1036        s = (__u32 *) &tmp[used];
1037        d = &crng->state[4];
1038        for (i=0; i < 8; i++)
1039                *d++ ^= *s++;
1040        spin_unlock_irqrestore(&crng->lock, flags);
1041}
1042
1043static void crng_backtrack_protect(__u8 tmp[CHACHA_BLOCK_SIZE], int used)
1044{
1045        struct crng_state *crng = NULL;
1046
1047#ifdef CONFIG_NUMA
1048        if (crng_node_pool)
1049                crng = crng_node_pool[numa_node_id()];
1050        if (crng == NULL)
1051#endif
1052                crng = &primary_crng;
1053        _crng_backtrack_protect(crng, tmp, used);
1054}
1055
1056static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
1057{
1058        ssize_t ret = 0, i = CHACHA_BLOCK_SIZE;
1059        __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
1060        int large_request = (nbytes > 256);
1061
1062        while (nbytes) {
1063                if (large_request && need_resched()) {
1064                        if (signal_pending(current)) {
1065                                if (ret == 0)
1066                                        ret = -ERESTARTSYS;
1067                                break;
1068                        }
1069                        schedule();
1070                }
1071
1072                extract_crng(tmp);
1073                i = min_t(int, nbytes, CHACHA_BLOCK_SIZE);
1074                if (copy_to_user(buf, tmp, i)) {
1075                        ret = -EFAULT;
1076                        break;
1077                }
1078
1079                nbytes -= i;
1080                buf += i;
1081                ret += i;
1082        }
1083        crng_backtrack_protect(tmp, i);
1084
1085        /* Wipe data just written to memory */
1086        memzero_explicit(tmp, sizeof(tmp));
1087
1088        return ret;
1089}
1090
1091
1092/*********************************************************************
1093 *
1094 * Entropy input management
1095 *
1096 *********************************************************************/
1097
1098/* There is one of these per entropy source */
1099struct timer_rand_state {
1100        cycles_t last_time;
1101        long last_delta, last_delta2;
1102};
1103
1104#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
1105
1106/*
1107 * Add device- or boot-specific data to the input pool to help
1108 * initialize it.
1109 *
1110 * None of this adds any entropy; it is meant to avoid the problem of
1111 * the entropy pool having similar initial state across largely
1112 * identical devices.
1113 */
1114void add_device_randomness(const void *buf, unsigned int size)
1115{
1116        unsigned long time = random_get_entropy() ^ jiffies;
1117        unsigned long flags;
1118
1119        if (!crng_ready() && size)
1120                crng_slow_load(buf, size);
1121
1122        trace_add_device_randomness(size, _RET_IP_);
1123        spin_lock_irqsave(&input_pool.lock, flags);
1124        _mix_pool_bytes(&input_pool, buf, size);
1125        _mix_pool_bytes(&input_pool, &time, sizeof(time));
1126        spin_unlock_irqrestore(&input_pool.lock, flags);
1127}
1128EXPORT_SYMBOL(add_device_randomness);
1129
1130static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
1131
1132/*
1133 * This function adds entropy to the entropy "pool" by using timing
1134 * delays.  It uses the timer_rand_state structure to make an estimate
1135 * of how many bits of entropy this call has added to the pool.
1136 *
1137 * The number "num" is also added to the pool - it should somehow describe
1138 * the type of event which just happened.  This is currently 0-255 for
1139 * keyboard scan codes, and 256 upwards for interrupts.
1140 *
1141 */
1142static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
1143{
1144        struct entropy_store    *r;
1145        struct {
1146                long jiffies;
1147                unsigned cycles;
1148                unsigned num;
1149        } sample;
1150        long delta, delta2, delta3;
1151
1152        sample.jiffies = jiffies;
1153        sample.cycles = random_get_entropy();
1154        sample.num = num;
1155        r = &input_pool;
1156        mix_pool_bytes(r, &sample, sizeof(sample));
1157
1158        /*
1159         * Calculate number of bits of randomness we probably added.
1160         * We take into account the first, second and third-order deltas
1161         * in order to make our estimate.
1162         */
1163        delta = sample.jiffies - READ_ONCE(state->last_time);
1164        WRITE_ONCE(state->last_time, sample.jiffies);
1165
1166        delta2 = delta - READ_ONCE(state->last_delta);
1167        WRITE_ONCE(state->last_delta, delta);
1168
1169        delta3 = delta2 - READ_ONCE(state->last_delta2);
1170        WRITE_ONCE(state->last_delta2, delta2);
1171
1172        if (delta < 0)
1173                delta = -delta;
1174        if (delta2 < 0)
1175                delta2 = -delta2;
1176        if (delta3 < 0)
1177                delta3 = -delta3;
1178        if (delta > delta2)
1179                delta = delta2;
1180        if (delta > delta3)
1181                delta = delta3;
1182
1183        /*
1184         * delta is now minimum absolute delta.
1185         * Round down by 1 bit on general principles,
1186         * and limit entropy estimate to 12 bits.
1187         */
1188        credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
1189}
1190
1191void add_input_randomness(unsigned int type, unsigned int code,
1192                                 unsigned int value)
1193{
1194        static unsigned char last_value;
1195
1196        /* ignore autorepeat and the like */
1197        if (value == last_value)
1198                return;
1199
1200        last_value = value;
1201        add_timer_randomness(&input_timer_state,
1202                             (type << 4) ^ code ^ (code >> 4) ^ value);
1203        trace_add_input_randomness(ENTROPY_BITS(&input_pool));
1204}
1205EXPORT_SYMBOL_GPL(add_input_randomness);
1206
1207static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
1208
1209#ifdef ADD_INTERRUPT_BENCH
1210static unsigned long avg_cycles, avg_deviation;
1211
1212#define AVG_SHIFT 8     /* Exponential average factor k=1/256 */
1213#define FIXED_1_2 (1 << (AVG_SHIFT-1))
1214
1215static void add_interrupt_bench(cycles_t start)
1216{
1217        long delta = random_get_entropy() - start;
1218
1219        /* Use a weighted moving average */
1220        delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT);
1221        avg_cycles += delta;
1222        /* And average deviation */
1223        delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT);
1224        avg_deviation += delta;
1225}
1226#else
1227#define add_interrupt_bench(x)
1228#endif
1229
1230static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
1231{
1232        __u32 *ptr = (__u32 *) regs;
1233        unsigned int idx;
1234
1235        if (regs == NULL)
1236                return 0;
1237        idx = READ_ONCE(f->reg_idx);
1238        if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
1239                idx = 0;
1240        ptr += idx++;
1241        WRITE_ONCE(f->reg_idx, idx);
1242        return *ptr;
1243}
1244
1245void add_interrupt_randomness(int irq, int irq_flags)
1246{
1247        struct entropy_store    *r;
1248        struct fast_pool        *fast_pool = this_cpu_ptr(&irq_randomness);
1249        struct pt_regs          *regs = get_irq_regs();
1250        unsigned long           now = jiffies;
1251        cycles_t                cycles = random_get_entropy();
1252        __u32                   c_high, j_high;
1253        __u64                   ip;
1254
1255        if (cycles == 0)
1256                cycles = get_reg(fast_pool, regs);
1257        c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
1258        j_high = (sizeof(now) > 4) ? now >> 32 : 0;
1259        fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
1260        fast_pool->pool[1] ^= now ^ c_high;
1261        ip = regs ? instruction_pointer(regs) : _RET_IP_;
1262        fast_pool->pool[2] ^= ip;
1263        fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
1264                get_reg(fast_pool, regs);
1265
1266        fast_mix(fast_pool);
1267        add_interrupt_bench(cycles);
1268
1269        if (unlikely(crng_init == 0)) {
1270                if ((fast_pool->count >= 64) &&
1271                    crng_fast_load((char *) fast_pool->pool,
1272                                   sizeof(fast_pool->pool))) {
1273                        fast_pool->count = 0;
1274                        fast_pool->last = now;
1275                }
1276                return;
1277        }
1278
1279        if ((fast_pool->count < 64) &&
1280            !time_after(now, fast_pool->last + HZ))
1281                return;
1282
1283        r = &input_pool;
1284        if (!spin_trylock(&r->lock))
1285                return;
1286
1287        fast_pool->last = now;
1288        __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
1289        spin_unlock(&r->lock);
1290
1291        fast_pool->count = 0;
1292
1293        /* award one bit for the contents of the fast pool */
1294        credit_entropy_bits(r, 1);
1295}
1296EXPORT_SYMBOL_GPL(add_interrupt_randomness);
1297
1298#ifdef CONFIG_BLOCK
1299void add_disk_randomness(struct gendisk *disk)
1300{
1301        if (!disk || !disk->random)
1302                return;
1303        /* first major is 1, so we get >= 0x200 here */
1304        add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
1305        trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool));
1306}
1307EXPORT_SYMBOL_GPL(add_disk_randomness);
1308#endif
1309
1310/*********************************************************************
1311 *
1312 * Entropy extraction routines
1313 *
1314 *********************************************************************/
1315
1316/*
1317 * This function decides how many bytes to actually take from the
1318 * given pool, and also debits the entropy count accordingly.
1319 */
1320static size_t account(struct entropy_store *r, size_t nbytes, int min,
1321                      int reserved)
1322{
1323        int entropy_count, orig, have_bytes;
1324        size_t ibytes, nfrac;
1325
1326        BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
1327
1328        /* Can we pull enough? */
1329retry:
1330        entropy_count = orig = READ_ONCE(r->entropy_count);
1331        ibytes = nbytes;
1332        /* never pull more than available */
1333        have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
1334
1335        if ((have_bytes -= reserved) < 0)
1336                have_bytes = 0;
1337        ibytes = min_t(size_t, ibytes, have_bytes);
1338        if (ibytes < min)
1339                ibytes = 0;
1340
1341        if (WARN_ON(entropy_count < 0)) {
1342                pr_warn("negative entropy count: pool %s count %d\n",
1343                        r->name, entropy_count);
1344                entropy_count = 0;
1345        }
1346        nfrac = ibytes << (ENTROPY_SHIFT + 3);
1347        if ((size_t) entropy_count > nfrac)
1348                entropy_count -= nfrac;
1349        else
1350                entropy_count = 0;
1351
1352        if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
1353                goto retry;
1354
1355        trace_debit_entropy(r->name, 8 * ibytes);
1356        if (ibytes && ENTROPY_BITS(r) < random_write_wakeup_bits) {
1357                wake_up_interruptible(&random_write_wait);
1358                kill_fasync(&fasync, SIGIO, POLL_OUT);
1359        }
1360
1361        return ibytes;
1362}
1363
1364/*
1365 * This function does the actual extraction for extract_entropy.
1366 *
1367 * Note: we assume that .poolwords is a multiple of 16 words.
1368 */
1369static void extract_buf(struct entropy_store *r, __u8 *out)
1370{
1371        int i;
1372        union {
1373                __u32 w[5];
1374                unsigned long l[LONGS(20)];
1375        } hash;
1376        __u32 workspace[SHA1_WORKSPACE_WORDS];
1377        unsigned long flags;
1378
1379        /*
1380         * If we have an architectural hardware random number
1381         * generator, use it for SHA's initial vector
1382         */
1383        sha1_init(hash.w);
1384        for (i = 0; i < LONGS(20); i++) {
1385                unsigned long v;
1386                if (!arch_get_random_long(&v))
1387                        break;
1388                hash.l[i] = v;
1389        }
1390
1391        /* Generate a hash across the pool, 16 words (512 bits) at a time */
1392        spin_lock_irqsave(&r->lock, flags);
1393        for (i = 0; i < r->poolinfo->poolwords; i += 16)
1394                sha1_transform(hash.w, (__u8 *)(r->pool + i), workspace);
1395
1396        /*
1397         * We mix the hash back into the pool to prevent backtracking
1398         * attacks (where the attacker knows the state of the pool
1399         * plus the current outputs, and attempts to find previous
1400         * ouputs), unless the hash function can be inverted. By
1401         * mixing at least a SHA1 worth of hash data back, we make
1402         * brute-forcing the feedback as hard as brute-forcing the
1403         * hash.
1404         */
1405        __mix_pool_bytes(r, hash.w, sizeof(hash.w));
1406        spin_unlock_irqrestore(&r->lock, flags);
1407
1408        memzero_explicit(workspace, sizeof(workspace));
1409
1410        /*
1411         * In case the hash function has some recognizable output
1412         * pattern, we fold it in half. Thus, we always feed back
1413         * twice as much data as we output.
1414         */
1415        hash.w[0] ^= hash.w[3];
1416        hash.w[1] ^= hash.w[4];
1417        hash.w[2] ^= rol32(hash.w[2], 16);
1418
1419        memcpy(out, &hash, EXTRACT_SIZE);
1420        memzero_explicit(&hash, sizeof(hash));
1421}
1422
1423static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
1424                                size_t nbytes, int fips)
1425{
1426        ssize_t ret = 0, i;
1427        __u8 tmp[EXTRACT_SIZE];
1428        unsigned long flags;
1429
1430        while (nbytes) {
1431                extract_buf(r, tmp);
1432
1433                if (fips) {
1434                        spin_lock_irqsave(&r->lock, flags);
1435                        if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
1436                                panic("Hardware RNG duplicated output!\n");
1437                        memcpy(r->last_data, tmp, EXTRACT_SIZE);
1438                        spin_unlock_irqrestore(&r->lock, flags);
1439                }
1440                i = min_t(int, nbytes, EXTRACT_SIZE);
1441                memcpy(buf, tmp, i);
1442                nbytes -= i;
1443                buf += i;
1444                ret += i;
1445        }
1446
1447        /* Wipe data just returned from memory */
1448        memzero_explicit(tmp, sizeof(tmp));
1449
1450        return ret;
1451}
1452
1453/*
1454 * This function extracts randomness from the "entropy pool", and
1455 * returns it in a buffer.
1456 *
1457 * The min parameter specifies the minimum amount we can pull before
1458 * failing to avoid races that defeat catastrophic reseeding while the
1459 * reserved parameter indicates how much entropy we must leave in the
1460 * pool after each pull to avoid starving other readers.
1461 */
1462static ssize_t extract_entropy(struct entropy_store *r, void *buf,
1463                                 size_t nbytes, int min, int reserved)
1464{
1465        __u8 tmp[EXTRACT_SIZE];
1466        unsigned long flags;
1467
1468        /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
1469        if (fips_enabled) {
1470                spin_lock_irqsave(&r->lock, flags);
1471                if (!r->last_data_init) {
1472                        r->last_data_init = 1;
1473                        spin_unlock_irqrestore(&r->lock, flags);
1474                        trace_extract_entropy(r->name, EXTRACT_SIZE,
1475                                              ENTROPY_BITS(r), _RET_IP_);
1476                        extract_buf(r, tmp);
1477                        spin_lock_irqsave(&r->lock, flags);
1478                        memcpy(r->last_data, tmp, EXTRACT_SIZE);
1479                }
1480                spin_unlock_irqrestore(&r->lock, flags);
1481        }
1482
1483        trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
1484        nbytes = account(r, nbytes, min, reserved);
1485
1486        return _extract_entropy(r, buf, nbytes, fips_enabled);
1487}
1488
1489#define warn_unseeded_randomness(previous) \
1490        _warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous))
1491
1492static void _warn_unseeded_randomness(const char *func_name, void *caller,
1493                                      void **previous)
1494{
1495#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1496        const bool print_once = false;
1497#else
1498        static bool print_once __read_mostly;
1499#endif
1500
1501        if (print_once ||
1502            crng_ready() ||
1503            (previous && (caller == READ_ONCE(*previous))))
1504                return;
1505        WRITE_ONCE(*previous, caller);
1506#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1507        print_once = true;
1508#endif
1509        if (__ratelimit(&unseeded_warning))
1510                printk_deferred(KERN_NOTICE "random: %s called from %pS "
1511                                "with crng_init=%d\n", func_name, caller,
1512                                crng_init);
1513}
1514
1515/*
1516 * This function is the exported kernel interface.  It returns some
1517 * number of good random numbers, suitable for key generation, seeding
1518 * TCP sequence numbers, etc.  It does not rely on the hardware random
1519 * number generator.  For random bytes direct from the hardware RNG
1520 * (when available), use get_random_bytes_arch(). In order to ensure
1521 * that the randomness provided by this function is okay, the function
1522 * wait_for_random_bytes() should be called and return 0 at least once
1523 * at any point prior.
1524 */
1525static void _get_random_bytes(void *buf, int nbytes)
1526{
1527        __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
1528
1529        trace_get_random_bytes(nbytes, _RET_IP_);
1530
1531        while (nbytes >= CHACHA_BLOCK_SIZE) {
1532                extract_crng(buf);
1533                buf += CHACHA_BLOCK_SIZE;
1534                nbytes -= CHACHA_BLOCK_SIZE;
1535        }
1536
1537        if (nbytes > 0) {
1538                extract_crng(tmp);
1539                memcpy(buf, tmp, nbytes);
1540                crng_backtrack_protect(tmp, nbytes);
1541        } else
1542                crng_backtrack_protect(tmp, CHACHA_BLOCK_SIZE);
1543        memzero_explicit(tmp, sizeof(tmp));
1544}
1545
1546void get_random_bytes(void *buf, int nbytes)
1547{
1548        static void *previous;
1549
1550        warn_unseeded_randomness(&previous);
1551        _get_random_bytes(buf, nbytes);
1552}
1553EXPORT_SYMBOL(get_random_bytes);
1554
1555
1556/*
1557 * Each time the timer fires, we expect that we got an unpredictable
1558 * jump in the cycle counter. Even if the timer is running on another
1559 * CPU, the timer activity will be touching the stack of the CPU that is
1560 * generating entropy..
1561 *
1562 * Note that we don't re-arm the timer in the timer itself - we are
1563 * happy to be scheduled away, since that just makes the load more
1564 * complex, but we do not want the timer to keep ticking unless the
1565 * entropy loop is running.
1566 *
1567 * So the re-arming always happens in the entropy loop itself.
1568 */
1569static void entropy_timer(struct timer_list *t)
1570{
1571        credit_entropy_bits(&input_pool, 1);
1572}
1573
1574/*
1575 * If we have an actual cycle counter, see if we can
1576 * generate enough entropy with timing noise
1577 */
1578static void try_to_generate_entropy(void)
1579{
1580        struct {
1581                unsigned long now;
1582                struct timer_list timer;
1583        } stack;
1584
1585        stack.now = random_get_entropy();
1586
1587        /* Slow counter - or none. Don't even bother */
1588        if (stack.now == random_get_entropy())
1589                return;
1590
1591        timer_setup_on_stack(&stack.timer, entropy_timer, 0);
1592        while (!crng_ready()) {
1593                if (!timer_pending(&stack.timer))
1594                        mod_timer(&stack.timer, jiffies+1);
1595                mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
1596                schedule();
1597                stack.now = random_get_entropy();
1598        }
1599
1600        del_timer_sync(&stack.timer);
1601        destroy_timer_on_stack(&stack.timer);
1602        mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
1603}
1604
1605/*
1606 * Wait for the urandom pool to be seeded and thus guaranteed to supply
1607 * cryptographically secure random numbers. This applies to: the /dev/urandom
1608 * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
1609 * family of functions. Using any of these functions without first calling
1610 * this function forfeits the guarantee of security.
1611 *
1612 * Returns: 0 if the urandom pool has been seeded.
1613 *          -ERESTARTSYS if the function was interrupted by a signal.
1614 */
1615int wait_for_random_bytes(void)
1616{
1617        if (likely(crng_ready()))
1618                return 0;
1619
1620        do {
1621                int ret;
1622                ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
1623                if (ret)
1624                        return ret > 0 ? 0 : ret;
1625
1626                try_to_generate_entropy();
1627        } while (!crng_ready());
1628
1629        return 0;
1630}
1631EXPORT_SYMBOL(wait_for_random_bytes);
1632
1633/*
1634 * Returns whether or not the urandom pool has been seeded and thus guaranteed
1635 * to supply cryptographically secure random numbers. This applies to: the
1636 * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
1637 * ,u64,int,long} family of functions.
1638 *
1639 * Returns: true if the urandom pool has been seeded.
1640 *          false if the urandom pool has not been seeded.
1641 */
1642bool rng_is_initialized(void)
1643{
1644        return crng_ready();
1645}
1646EXPORT_SYMBOL(rng_is_initialized);
1647
1648/*
1649 * Add a callback function that will be invoked when the nonblocking
1650 * pool is initialised.
1651 *
1652 * returns: 0 if callback is successfully added
1653 *          -EALREADY if pool is already initialised (callback not called)
1654 *          -ENOENT if module for callback is not alive
1655 */
1656int add_random_ready_callback(struct random_ready_callback *rdy)
1657{
1658        struct module *owner;
1659        unsigned long flags;
1660        int err = -EALREADY;
1661
1662        if (crng_ready())
1663                return err;
1664
1665        owner = rdy->owner;
1666        if (!try_module_get(owner))
1667                return -ENOENT;
1668
1669        spin_lock_irqsave(&random_ready_list_lock, flags);
1670        if (crng_ready())
1671                goto out;
1672
1673        owner = NULL;
1674
1675        list_add(&rdy->list, &random_ready_list);
1676        err = 0;
1677
1678out:
1679        spin_unlock_irqrestore(&random_ready_list_lock, flags);
1680
1681        module_put(owner);
1682
1683        return err;
1684}
1685EXPORT_SYMBOL(add_random_ready_callback);
1686
1687/*
1688 * Delete a previously registered readiness callback function.
1689 */
1690void del_random_ready_callback(struct random_ready_callback *rdy)
1691{
1692        unsigned long flags;
1693        struct module *owner = NULL;
1694
1695        spin_lock_irqsave(&random_ready_list_lock, flags);
1696        if (!list_empty(&rdy->list)) {
1697                list_del_init(&rdy->list);
1698                owner = rdy->owner;
1699        }
1700        spin_unlock_irqrestore(&random_ready_list_lock, flags);
1701
1702        module_put(owner);
1703}
1704EXPORT_SYMBOL(del_random_ready_callback);
1705
1706/*
1707 * This function will use the architecture-specific hardware random
1708 * number generator if it is available.  The arch-specific hw RNG will
1709 * almost certainly be faster than what we can do in software, but it
1710 * is impossible to verify that it is implemented securely (as
1711 * opposed, to, say, the AES encryption of a sequence number using a
1712 * key known by the NSA).  So it's useful if we need the speed, but
1713 * only if we're willing to trust the hardware manufacturer not to
1714 * have put in a back door.
1715 *
1716 * Return number of bytes filled in.
1717 */
1718int __must_check get_random_bytes_arch(void *buf, int nbytes)
1719{
1720        int left = nbytes;
1721        char *p = buf;
1722
1723        trace_get_random_bytes_arch(left, _RET_IP_);
1724        while (left) {
1725                unsigned long v;
1726                int chunk = min_t(int, left, sizeof(unsigned long));
1727
1728                if (!arch_get_random_long(&v))
1729                        break;
1730
1731                memcpy(p, &v, chunk);
1732                p += chunk;
1733                left -= chunk;
1734        }
1735
1736        return nbytes - left;
1737}
1738EXPORT_SYMBOL(get_random_bytes_arch);
1739
1740/*
1741 * init_std_data - initialize pool with system data
1742 *
1743 * @r: pool to initialize
1744 *
1745 * This function clears the pool's entropy count and mixes some system
1746 * data into the pool to prepare it for use. The pool is not cleared
1747 * as that can only decrease the entropy in the pool.
1748 */
1749static void __init init_std_data(struct entropy_store *r)
1750{
1751        int i;
1752        ktime_t now = ktime_get_real();
1753        unsigned long rv;
1754
1755        mix_pool_bytes(r, &now, sizeof(now));
1756        for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
1757                if (!arch_get_random_seed_long(&rv) &&
1758                    !arch_get_random_long(&rv))
1759                        rv = random_get_entropy();
1760                mix_pool_bytes(r, &rv, sizeof(rv));
1761        }
1762        mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
1763}
1764
1765/*
1766 * Note that setup_arch() may call add_device_randomness()
1767 * long before we get here. This allows seeding of the pools
1768 * with some platform dependent data very early in the boot
1769 * process. But it limits our options here. We must use
1770 * statically allocated structures that already have all
1771 * initializations complete at compile time. We should also
1772 * take care not to overwrite the precious per platform data
1773 * we were given.
1774 */
1775int __init rand_initialize(void)
1776{
1777        init_std_data(&input_pool);
1778        crng_initialize_primary(&primary_crng);
1779        crng_global_init_time = jiffies;
1780        if (ratelimit_disable) {
1781                urandom_warning.interval = 0;
1782                unseeded_warning.interval = 0;
1783        }
1784        return 0;
1785}
1786
1787#ifdef CONFIG_BLOCK
1788void rand_initialize_disk(struct gendisk *disk)
1789{
1790        struct timer_rand_state *state;
1791
1792        /*
1793         * If kzalloc returns null, we just won't use that entropy
1794         * source.
1795         */
1796        state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
1797        if (state) {
1798                state->last_time = INITIAL_JIFFIES;
1799                disk->random = state;
1800        }
1801}
1802#endif
1803
1804static ssize_t
1805urandom_read_nowarn(struct file *file, char __user *buf, size_t nbytes,
1806                    loff_t *ppos)
1807{
1808        int ret;
1809
1810        nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
1811        ret = extract_crng_user(buf, nbytes);
1812        trace_urandom_read(8 * nbytes, 0, ENTROPY_BITS(&input_pool));
1813        return ret;
1814}
1815
1816static ssize_t
1817urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1818{
1819        unsigned long flags;
1820        static int maxwarn = 10;
1821
1822        if (!crng_ready() && maxwarn > 0) {
1823                maxwarn--;
1824                if (__ratelimit(&urandom_warning))
1825                        pr_notice("%s: uninitialized urandom read (%zd bytes read)\n",
1826                                  current->comm, nbytes);
1827                spin_lock_irqsave(&primary_crng.lock, flags);
1828                crng_init_cnt = 0;
1829                spin_unlock_irqrestore(&primary_crng.lock, flags);
1830        }
1831
1832        return urandom_read_nowarn(file, buf, nbytes, ppos);
1833}
1834
1835static ssize_t
1836random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1837{
1838        int ret;
1839
1840        ret = wait_for_random_bytes();
1841        if (ret != 0)
1842                return ret;
1843        return urandom_read_nowarn(file, buf, nbytes, ppos);
1844}
1845
1846static __poll_t
1847random_poll(struct file *file, poll_table * wait)
1848{
1849        __poll_t mask;
1850
1851        poll_wait(file, &crng_init_wait, wait);
1852        poll_wait(file, &random_write_wait, wait);
1853        mask = 0;
1854        if (crng_ready())
1855                mask |= EPOLLIN | EPOLLRDNORM;
1856        if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
1857                mask |= EPOLLOUT | EPOLLWRNORM;
1858        return mask;
1859}
1860
1861static int
1862write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
1863{
1864        size_t bytes;
1865        __u32 t, buf[16];
1866        const char __user *p = buffer;
1867
1868        while (count > 0) {
1869                int b, i = 0;
1870
1871                bytes = min(count, sizeof(buf));
1872                if (copy_from_user(&buf, p, bytes))
1873                        return -EFAULT;
1874
1875                for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
1876                        if (!arch_get_random_int(&t))
1877                                break;
1878                        buf[i] ^= t;
1879                }
1880
1881                count -= bytes;
1882                p += bytes;
1883
1884                mix_pool_bytes(r, buf, bytes);
1885                cond_resched();
1886        }
1887
1888        return 0;
1889}
1890
1891static ssize_t random_write(struct file *file, const char __user *buffer,
1892                            size_t count, loff_t *ppos)
1893{
1894        size_t ret;
1895
1896        ret = write_pool(&input_pool, buffer, count);
1897        if (ret)
1898                return ret;
1899
1900        return (ssize_t)count;
1901}
1902
1903static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1904{
1905        int size, ent_count;
1906        int __user *p = (int __user *)arg;
1907        int retval;
1908
1909        switch (cmd) {
1910        case RNDGETENTCNT:
1911                /* inherently racy, no point locking */
1912                ent_count = ENTROPY_BITS(&input_pool);
1913                if (put_user(ent_count, p))
1914                        return -EFAULT;
1915                return 0;
1916        case RNDADDTOENTCNT:
1917                if (!capable(CAP_SYS_ADMIN))
1918                        return -EPERM;
1919                if (get_user(ent_count, p))
1920                        return -EFAULT;
1921                return credit_entropy_bits_safe(&input_pool, ent_count);
1922        case RNDADDENTROPY:
1923                if (!capable(CAP_SYS_ADMIN))
1924                        return -EPERM;
1925                if (get_user(ent_count, p++))
1926                        return -EFAULT;
1927                if (ent_count < 0)
1928                        return -EINVAL;
1929                if (get_user(size, p++))
1930                        return -EFAULT;
1931                retval = write_pool(&input_pool, (const char __user *)p,
1932                                    size);
1933                if (retval < 0)
1934                        return retval;
1935                return credit_entropy_bits_safe(&input_pool, ent_count);
1936        case RNDZAPENTCNT:
1937        case RNDCLEARPOOL:
1938                /*
1939                 * Clear the entropy pool counters. We no longer clear
1940                 * the entropy pool, as that's silly.
1941                 */
1942                if (!capable(CAP_SYS_ADMIN))
1943                        return -EPERM;
1944                input_pool.entropy_count = 0;
1945                return 0;
1946        case RNDRESEEDCRNG:
1947                if (!capable(CAP_SYS_ADMIN))
1948                        return -EPERM;
1949                if (crng_init < 2)
1950                        return -ENODATA;
1951                crng_reseed(&primary_crng, &input_pool);
1952                crng_global_init_time = jiffies - 1;
1953                return 0;
1954        default:
1955                return -EINVAL;
1956        }
1957}
1958
1959static int random_fasync(int fd, struct file *filp, int on)
1960{
1961        return fasync_helper(fd, filp, on, &fasync);
1962}
1963
1964const struct file_operations random_fops = {
1965        .read  = random_read,
1966        .write = random_write,
1967        .poll  = random_poll,
1968        .unlocked_ioctl = random_ioctl,
1969        .compat_ioctl = compat_ptr_ioctl,
1970        .fasync = random_fasync,
1971        .llseek = noop_llseek,
1972};
1973
1974const struct file_operations urandom_fops = {
1975        .read  = urandom_read,
1976        .write = random_write,
1977        .unlocked_ioctl = random_ioctl,
1978        .compat_ioctl = compat_ptr_ioctl,
1979        .fasync = random_fasync,
1980        .llseek = noop_llseek,
1981};
1982
1983SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
1984                unsigned int, flags)
1985{
1986        int ret;
1987
1988        if (flags & ~(GRND_NONBLOCK|GRND_RANDOM|GRND_INSECURE))
1989                return -EINVAL;
1990
1991        /*
1992         * Requesting insecure and blocking randomness at the same time makes
1993         * no sense.
1994         */
1995        if ((flags & (GRND_INSECURE|GRND_RANDOM)) == (GRND_INSECURE|GRND_RANDOM))
1996                return -EINVAL;
1997
1998        if (count > INT_MAX)
1999                count = INT_MAX;
2000
2001        if (!(flags & GRND_INSECURE) && !crng_ready()) {
2002                if (flags & GRND_NONBLOCK)
2003                        return -EAGAIN;
2004                ret = wait_for_random_bytes();
2005                if (unlikely(ret))
2006                        return ret;
2007        }
2008        return urandom_read_nowarn(NULL, buf, count, NULL);
2009}
2010
2011/********************************************************************
2012 *
2013 * Sysctl interface
2014 *
2015 ********************************************************************/
2016
2017#ifdef CONFIG_SYSCTL
2018
2019#include <linux/sysctl.h>
2020
2021static int min_write_thresh;
2022static int max_write_thresh = INPUT_POOL_WORDS * 32;
2023static int random_min_urandom_seed = 60;
2024static char sysctl_bootid[16];
2025
2026/*
2027 * This function is used to return both the bootid UUID, and random
2028 * UUID.  The difference is in whether table->data is NULL; if it is,
2029 * then a new UUID is generated and returned to the user.
2030 *
2031 * If the user accesses this via the proc interface, the UUID will be
2032 * returned as an ASCII string in the standard UUID format; if via the
2033 * sysctl system call, as 16 bytes of binary data.
2034 */
2035static int proc_do_uuid(struct ctl_table *table, int write,
2036                        void *buffer, size_t *lenp, loff_t *ppos)
2037{
2038        struct ctl_table fake_table;
2039        unsigned char buf[64], tmp_uuid[16], *uuid;
2040
2041        uuid = table->data;
2042        if (!uuid) {
2043                uuid = tmp_uuid;
2044                generate_random_uuid(uuid);
2045        } else {
2046                static DEFINE_SPINLOCK(bootid_spinlock);
2047
2048                spin_lock(&bootid_spinlock);
2049                if (!uuid[8])
2050                        generate_random_uuid(uuid);
2051                spin_unlock(&bootid_spinlock);
2052        }
2053
2054        sprintf(buf, "%pU", uuid);
2055
2056        fake_table.data = buf;
2057        fake_table.maxlen = sizeof(buf);
2058
2059        return proc_dostring(&fake_table, write, buffer, lenp, ppos);
2060}
2061
2062/*
2063 * Return entropy available scaled to integral bits
2064 */
2065static int proc_do_entropy(struct ctl_table *table, int write,
2066                           void *buffer, size_t *lenp, loff_t *ppos)
2067{
2068        struct ctl_table fake_table;
2069        int entropy_count;
2070
2071        entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
2072
2073        fake_table.data = &entropy_count;
2074        fake_table.maxlen = sizeof(entropy_count);
2075
2076        return proc_dointvec(&fake_table, write, buffer, lenp, ppos);
2077}
2078
2079static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
2080extern struct ctl_table random_table[];
2081struct ctl_table random_table[] = {
2082        {
2083                .procname       = "poolsize",
2084                .data           = &sysctl_poolsize,
2085                .maxlen         = sizeof(int),
2086                .mode           = 0444,
2087                .proc_handler   = proc_dointvec,
2088        },
2089        {
2090                .procname       = "entropy_avail",
2091                .maxlen         = sizeof(int),
2092                .mode           = 0444,
2093                .proc_handler   = proc_do_entropy,
2094                .data           = &input_pool.entropy_count,
2095        },
2096        {
2097                .procname       = "write_wakeup_threshold",
2098                .data           = &random_write_wakeup_bits,
2099                .maxlen         = sizeof(int),
2100                .mode           = 0644,
2101                .proc_handler   = proc_dointvec_minmax,
2102                .extra1         = &min_write_thresh,
2103                .extra2         = &max_write_thresh,
2104        },
2105        {
2106                .procname       = "urandom_min_reseed_secs",
2107                .data           = &random_min_urandom_seed,
2108                .maxlen         = sizeof(int),
2109                .mode           = 0644,
2110                .proc_handler   = proc_dointvec,
2111        },
2112        {
2113                .procname       = "boot_id",
2114                .data           = &sysctl_bootid,
2115                .maxlen         = 16,
2116                .mode           = 0444,
2117                .proc_handler   = proc_do_uuid,
2118        },
2119        {
2120                .procname       = "uuid",
2121                .maxlen         = 16,
2122                .mode           = 0444,
2123                .proc_handler   = proc_do_uuid,
2124        },
2125#ifdef ADD_INTERRUPT_BENCH
2126        {
2127                .procname       = "add_interrupt_avg_cycles",
2128                .data           = &avg_cycles,
2129                .maxlen         = sizeof(avg_cycles),
2130                .mode           = 0444,
2131                .proc_handler   = proc_doulongvec_minmax,
2132        },
2133        {
2134                .procname       = "add_interrupt_avg_deviation",
2135                .data           = &avg_deviation,
2136                .maxlen         = sizeof(avg_deviation),
2137                .mode           = 0444,
2138                .proc_handler   = proc_doulongvec_minmax,
2139        },
2140#endif
2141        { }
2142};
2143#endif  /* CONFIG_SYSCTL */
2144
2145struct batched_entropy {
2146        union {
2147                u64 entropy_u64[CHACHA_BLOCK_SIZE / sizeof(u64)];
2148                u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)];
2149        };
2150        unsigned int position;
2151        spinlock_t batch_lock;
2152};
2153
2154/*
2155 * Get a random word for internal kernel use only. The quality of the random
2156 * number is good as /dev/urandom, but there is no backtrack protection, with
2157 * the goal of being quite fast and not depleting entropy. In order to ensure
2158 * that the randomness provided by this function is okay, the function
2159 * wait_for_random_bytes() should be called and return 0 at least once at any
2160 * point prior.
2161 */
2162static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
2163        .batch_lock     = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
2164};
2165
2166u64 get_random_u64(void)
2167{
2168        u64 ret;
2169        unsigned long flags;
2170        struct batched_entropy *batch;
2171        static void *previous;
2172
2173        warn_unseeded_randomness(&previous);
2174
2175        batch = raw_cpu_ptr(&batched_entropy_u64);
2176        spin_lock_irqsave(&batch->batch_lock, flags);
2177        if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
2178                extract_crng((u8 *)batch->entropy_u64);
2179                batch->position = 0;
2180        }
2181        ret = batch->entropy_u64[batch->position++];
2182        spin_unlock_irqrestore(&batch->batch_lock, flags);
2183        return ret;
2184}
2185EXPORT_SYMBOL(get_random_u64);
2186
2187static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
2188        .batch_lock     = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
2189};
2190u32 get_random_u32(void)
2191{
2192        u32 ret;
2193        unsigned long flags;
2194        struct batched_entropy *batch;
2195        static void *previous;
2196
2197        warn_unseeded_randomness(&previous);
2198
2199        batch = raw_cpu_ptr(&batched_entropy_u32);
2200        spin_lock_irqsave(&batch->batch_lock, flags);
2201        if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
2202                extract_crng((u8 *)batch->entropy_u32);
2203                batch->position = 0;
2204        }
2205        ret = batch->entropy_u32[batch->position++];
2206        spin_unlock_irqrestore(&batch->batch_lock, flags);
2207        return ret;
2208}
2209EXPORT_SYMBOL(get_random_u32);
2210
2211/* It's important to invalidate all potential batched entropy that might
2212 * be stored before the crng is initialized, which we can do lazily by
2213 * simply resetting the counter to zero so that it's re-extracted on the
2214 * next usage. */
2215static void invalidate_batched_entropy(void)
2216{
2217        int cpu;
2218        unsigned long flags;
2219
2220        for_each_possible_cpu (cpu) {
2221                struct batched_entropy *batched_entropy;
2222
2223                batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
2224                spin_lock_irqsave(&batched_entropy->batch_lock, flags);
2225                batched_entropy->position = 0;
2226                spin_unlock(&batched_entropy->batch_lock);
2227
2228                batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
2229                spin_lock(&batched_entropy->batch_lock);
2230                batched_entropy->position = 0;
2231                spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
2232        }
2233}
2234
2235/**
2236 * randomize_page - Generate a random, page aligned address
2237 * @start:      The smallest acceptable address the caller will take.
2238 * @range:      The size of the area, starting at @start, within which the
2239 *              random address must fall.
2240 *
2241 * If @start + @range would overflow, @range is capped.
2242 *
2243 * NOTE: Historical use of randomize_range, which this replaces, presumed that
2244 * @start was already page aligned.  We now align it regardless.
2245 *
2246 * Return: A page aligned address within [start, start + range).  On error,
2247 * @start is returned.
2248 */
2249unsigned long
2250randomize_page(unsigned long start, unsigned long range)
2251{
2252        if (!PAGE_ALIGNED(start)) {
2253                range -= PAGE_ALIGN(start) - start;
2254                start = PAGE_ALIGN(start);
2255        }
2256
2257        if (start > ULONG_MAX - range)
2258                range = ULONG_MAX - start;
2259
2260        range >>= PAGE_SHIFT;
2261
2262        if (range == 0)
2263                return start;
2264
2265        return start + (get_random_long() % range << PAGE_SHIFT);
2266}
2267
2268/* Interface for in-kernel drivers of true hardware RNGs.
2269 * Those devices may produce endless random bits and will be throttled
2270 * when our pool is full.
2271 */
2272void add_hwgenerator_randomness(const char *buffer, size_t count,
2273                                size_t entropy)
2274{
2275        struct entropy_store *poolp = &input_pool;
2276
2277        if (unlikely(crng_init == 0)) {
2278                crng_fast_load(buffer, count);
2279                return;
2280        }
2281
2282        /* Suspend writing if we're above the trickle threshold.
2283         * We'll be woken up again once below random_write_wakeup_thresh,
2284         * or when the calling thread is about to terminate.
2285         */
2286        wait_event_interruptible(random_write_wait, kthread_should_stop() ||
2287                        ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
2288        mix_pool_bytes(poolp, buffer, count);
2289        credit_entropy_bits(poolp, entropy);
2290}
2291EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
2292
2293/* Handle random seed passed by bootloader.
2294 * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise
2295 * it would be regarded as device data.
2296 * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER.
2297 */
2298void add_bootloader_randomness(const void *buf, unsigned int size)
2299{
2300        if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER))
2301                add_hwgenerator_randomness(buf, size, size * 8);
2302        else
2303                add_device_randomness(buf, size);
2304}
2305EXPORT_SYMBOL_GPL(add_bootloader_randomness);
2306
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.