From 369d11587339ce74f8ebc76f2607fe55545eaf7d Mon Sep 17 00:00:00 2001 From: garhve Date: Tue, 20 Dec 2022 11:04:25 +0800 Subject: Build small project following the book --- .../doc/src/rand/distributions/bernoulli.rs.html | 441 +++ .../src/rand/distributions/distribution.rs.html | 547 ++++ .../doc/src/rand/distributions/float.rs.html | 627 ++++ .../doc/src/rand/distributions/integer.rs.html | 551 ++++ .../target/doc/src/rand/distributions/mod.rs.html | 439 +++ .../doc/src/rand/distributions/other.rs.html | 733 +++++ .../doc/src/rand/distributions/slice.rs.html | 237 ++ .../doc/src/rand/distributions/uniform.rs.html | 3319 ++++++++++++++++++++ .../doc/src/rand/distributions/utils.rs.html | 861 +++++ .../doc/src/rand/distributions/weighted.rs.html | 97 + .../src/rand/distributions/weighted_index.rs.html | 919 ++++++ 11 files changed, 8771 insertions(+) create mode 100644 rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/bernoulli.rs.html create mode 100644 rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/distribution.rs.html create mode 100644 rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/float.rs.html create mode 100644 rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/integer.rs.html create mode 100644 rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/mod.rs.html create mode 100644 rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/other.rs.html create mode 100644 rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/slice.rs.html create mode 100644 rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/uniform.rs.html create mode 100644 rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/utils.rs.html create mode 100644 rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/weighted.rs.html create mode 100644 rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/weighted_index.rs.html (limited to 'rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions') diff --git a/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/bernoulli.rs.html b/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/bernoulli.rs.html new file mode 100644 index 0000000..8b882e8 --- /dev/null +++ b/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/bernoulli.rs.html @@ -0,0 +1,441 @@ +bernoulli.rs - source
1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
+97
+98
+99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+
// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The Bernoulli distribution.
+
+use crate::distributions::Distribution;
+use crate::Rng;
+use core::{fmt, u64};
+
+#[cfg(feature = "serde1")]
+use serde::{Serialize, Deserialize};
+/// The Bernoulli distribution.
+///
+/// This is a special case of the Binomial distribution where `n = 1`.
+///
+/// # Example
+///
+/// ```rust
+/// use rand::distributions::{Bernoulli, Distribution};
+///
+/// let d = Bernoulli::new(0.3).unwrap();
+/// let v = d.sample(&mut rand::thread_rng());
+/// println!("{} is from a Bernoulli distribution", v);
+/// ```
+///
+/// # Precision
+///
+/// This `Bernoulli` distribution uses 64 bits from the RNG (a `u64`),
+/// so only probabilities that are multiples of 2<sup>-64</sup> can be
+/// represented.
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
+pub struct Bernoulli {
+    /// Probability of success, relative to the maximal integer.
+    p_int: u64,
+}
+
+// To sample from the Bernoulli distribution we use a method that compares a
+// random `u64` value `v < (p * 2^64)`.
+//
+// If `p == 1.0`, the integer `v` to compare against can not represented as a
+// `u64`. We manually set it to `u64::MAX` instead (2^64 - 1 instead of 2^64).
+// Note that  value of `p < 1.0` can never result in `u64::MAX`, because an
+// `f64` only has 53 bits of precision, and the next largest value of `p` will
+// result in `2^64 - 2048`.
+//
+// Also there is a 100% theoretical concern: if someone consistently wants to
+// generate `true` using the Bernoulli distribution (i.e. by using a probability
+// of `1.0`), just using `u64::MAX` is not enough. On average it would return
+// false once every 2^64 iterations. Some people apparently care about this
+// case.
+//
+// That is why we special-case `u64::MAX` to always return `true`, without using
+// the RNG, and pay the performance price for all uses that *are* reasonable.
+// Luckily, if `new()` and `sample` are close, the compiler can optimize out the
+// extra check.
+const ALWAYS_TRUE: u64 = u64::MAX;
+
+// This is just `2.0.powi(64)`, but written this way because it is not available
+// in `no_std` mode.
+const SCALE: f64 = 2.0 * (1u64 << 63) as f64;
+
+/// Error type returned from `Bernoulli::new`.
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum BernoulliError {
+    /// `p < 0` or `p > 1`.
+    InvalidProbability,
+}
+
+impl fmt::Display for BernoulliError {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.write_str(match self {
+            BernoulliError::InvalidProbability => "p is outside [0, 1] in Bernoulli distribution",
+        })
+    }
+}
+
+#[cfg(feature = "std")]
+impl ::std::error::Error for BernoulliError {}
+
+impl Bernoulli {
+    /// Construct a new `Bernoulli` with the given probability of success `p`.
+    ///
+    /// # Precision
+    ///
+    /// For `p = 1.0`, the resulting distribution will always generate true.
+    /// For `p = 0.0`, the resulting distribution will always generate false.
+    ///
+    /// This method is accurate for any input `p` in the range `[0, 1]` which is
+    /// a multiple of 2<sup>-64</sup>. (Note that not all multiples of
+    /// 2<sup>-64</sup> in `[0, 1]` can be represented as a `f64`.)
+    #[inline]
+    pub fn new(p: f64) -> Result<Bernoulli, BernoulliError> {
+        if !(0.0..1.0).contains(&p) {
+            if p == 1.0 {
+                return Ok(Bernoulli { p_int: ALWAYS_TRUE });
+            }
+            return Err(BernoulliError::InvalidProbability);
+        }
+        Ok(Bernoulli {
+            p_int: (p * SCALE) as u64,
+        })
+    }
+
+    /// Construct a new `Bernoulli` with the probability of success of
+    /// `numerator`-in-`denominator`. I.e. `new_ratio(2, 3)` will return
+    /// a `Bernoulli` with a 2-in-3 chance, or about 67%, of returning `true`.
+    ///
+    /// return `true`. If `numerator == 0` it will always return `false`.
+    /// For `numerator > denominator` and `denominator == 0`, this returns an
+    /// error. Otherwise, for `numerator == denominator`, samples are always
+    /// true; for `numerator == 0` samples are always false.
+    #[inline]
+    pub fn from_ratio(numerator: u32, denominator: u32) -> Result<Bernoulli, BernoulliError> {
+        if numerator > denominator || denominator == 0 {
+            return Err(BernoulliError::InvalidProbability);
+        }
+        if numerator == denominator {
+            return Ok(Bernoulli { p_int: ALWAYS_TRUE });
+        }
+        let p_int = ((f64::from(numerator) / f64::from(denominator)) * SCALE) as u64;
+        Ok(Bernoulli { p_int })
+    }
+}
+
+impl Distribution<bool> for Bernoulli {
+    #[inline]
+    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> bool {
+        // Make sure to always return true for p = 1.0.
+        if self.p_int == ALWAYS_TRUE {
+            return true;
+        }
+        let v: u64 = rng.gen();
+        v < self.p_int
+    }
+}
+
+#[cfg(test)]
+mod test {
+    use super::Bernoulli;
+    use crate::distributions::Distribution;
+    use crate::Rng;
+
+    #[test]
+    #[cfg(feature="serde1")]
+    fn test_serializing_deserializing_bernoulli() {
+        let coin_flip = Bernoulli::new(0.5).unwrap();
+        let de_coin_flip : Bernoulli = bincode::deserialize(&bincode::serialize(&coin_flip).unwrap()).unwrap();
+
+        assert_eq!(coin_flip.p_int, de_coin_flip.p_int);
+    }
+
+    #[test]
+    fn test_trivial() {
+        // We prefer to be explicit here.
+        #![allow(clippy::bool_assert_comparison)]
+
+        let mut r = crate::test::rng(1);
+        let always_false = Bernoulli::new(0.0).unwrap();
+        let always_true = Bernoulli::new(1.0).unwrap();
+        for _ in 0..5 {
+            assert_eq!(r.sample::<bool, _>(&always_false), false);
+            assert_eq!(r.sample::<bool, _>(&always_true), true);
+            assert_eq!(Distribution::<bool>::sample(&always_false, &mut r), false);
+            assert_eq!(Distribution::<bool>::sample(&always_true, &mut r), true);
+        }
+    }
+
+    #[test]
+    #[cfg_attr(miri, ignore)] // Miri is too slow
+    fn test_average() {
+        const P: f64 = 0.3;
+        const NUM: u32 = 3;
+        const DENOM: u32 = 10;
+        let d1 = Bernoulli::new(P).unwrap();
+        let d2 = Bernoulli::from_ratio(NUM, DENOM).unwrap();
+        const N: u32 = 100_000;
+
+        let mut sum1: u32 = 0;
+        let mut sum2: u32 = 0;
+        let mut rng = crate::test::rng(2);
+        for _ in 0..N {
+            if d1.sample(&mut rng) {
+                sum1 += 1;
+            }
+            if d2.sample(&mut rng) {
+                sum2 += 1;
+            }
+        }
+        let avg1 = (sum1 as f64) / (N as f64);
+        assert!((avg1 - P).abs() < 5e-3);
+
+        let avg2 = (sum2 as f64) / (N as f64);
+        assert!((avg2 - (NUM as f64) / (DENOM as f64)).abs() < 5e-3);
+    }
+
+    #[test]
+    fn value_stability() {
+        let mut rng = crate::test::rng(3);
+        let distr = Bernoulli::new(0.4532).unwrap();
+        let mut buf = [false; 10];
+        for x in &mut buf {
+            *x = rng.sample(&distr);
+        }
+        assert_eq!(buf, [
+            true, false, false, true, false, false, true, true, true, true
+        ]);
+    }
+
+    #[test]
+    fn bernoulli_distributions_can_be_compared() {
+        assert_eq!(Bernoulli::new(1.0), Bernoulli::new(1.0));
+    }
+}
+
+
\ No newline at end of file diff --git a/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/distribution.rs.html b/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/distribution.rs.html new file mode 100644 index 0000000..afc8925 --- /dev/null +++ b/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/distribution.rs.html @@ -0,0 +1,547 @@ +distribution.rs - source
1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
+97
+98
+99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+
// Copyright 2018 Developers of the Rand project.
+// Copyright 2013-2017 The Rust Project Developers.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Distribution trait and associates
+
+use crate::Rng;
+use core::iter;
+#[cfg(feature = "alloc")]
+use alloc::string::String;
+
+/// Types (distributions) that can be used to create a random instance of `T`.
+///
+/// It is possible to sample from a distribution through both the
+/// `Distribution` and [`Rng`] traits, via `distr.sample(&mut rng)` and
+/// `rng.sample(distr)`. They also both offer the [`sample_iter`] method, which
+/// produces an iterator that samples from the distribution.
+///
+/// All implementations are expected to be immutable; this has the significant
+/// advantage of not needing to consider thread safety, and for most
+/// distributions efficient state-less sampling algorithms are available.
+///
+/// Implementations are typically expected to be portable with reproducible
+/// results when used with a PRNG with fixed seed; see the
+/// [portability chapter](https://rust-random.github.io/book/portability.html)
+/// of The Rust Rand Book. In some cases this does not apply, e.g. the `usize`
+/// type requires different sampling on 32-bit and 64-bit machines.
+///
+/// [`sample_iter`]: Distribution::sample_iter
+pub trait Distribution<T> {
+    /// Generate a random value of `T`, using `rng` as the source of randomness.
+    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> T;
+
+    /// Create an iterator that generates random values of `T`, using `rng` as
+    /// the source of randomness.
+    ///
+    /// Note that this function takes `self` by value. This works since
+    /// `Distribution<T>` is impl'd for `&D` where `D: Distribution<T>`,
+    /// however borrowing is not automatic hence `distr.sample_iter(...)` may
+    /// need to be replaced with `(&distr).sample_iter(...)` to borrow or
+    /// `(&*distr).sample_iter(...)` to reborrow an existing reference.
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// use rand::thread_rng;
+    /// use rand::distributions::{Distribution, Alphanumeric, Uniform, Standard};
+    ///
+    /// let mut rng = thread_rng();
+    ///
+    /// // Vec of 16 x f32:
+    /// let v: Vec<f32> = Standard.sample_iter(&mut rng).take(16).collect();
+    ///
+    /// // String:
+    /// let s: String = Alphanumeric
+    ///     .sample_iter(&mut rng)
+    ///     .take(7)
+    ///     .map(char::from)
+    ///     .collect();
+    ///
+    /// // Dice-rolling:
+    /// let die_range = Uniform::new_inclusive(1, 6);
+    /// let mut roll_die = die_range.sample_iter(&mut rng);
+    /// while roll_die.next().unwrap() != 6 {
+    ///     println!("Not a 6; rolling again!");
+    /// }
+    /// ```
+    fn sample_iter<R>(self, rng: R) -> DistIter<Self, R, T>
+    where
+        R: Rng,
+        Self: Sized,
+    {
+        DistIter {
+            distr: self,
+            rng,
+            phantom: ::core::marker::PhantomData,
+        }
+    }
+
+    /// Create a distribution of values of 'S' by mapping the output of `Self`
+    /// through the closure `F`
+    ///
+    /// # Example
+    ///
+    /// ```
+    /// use rand::thread_rng;
+    /// use rand::distributions::{Distribution, Uniform};
+    ///
+    /// let mut rng = thread_rng();
+    ///
+    /// let die = Uniform::new_inclusive(1, 6);
+    /// let even_number = die.map(|num| num % 2 == 0);
+    /// while !even_number.sample(&mut rng) {
+    ///     println!("Still odd; rolling again!");
+    /// }
+    /// ```
+    fn map<F, S>(self, func: F) -> DistMap<Self, F, T, S>
+    where
+        F: Fn(T) -> S,
+        Self: Sized,
+    {
+        DistMap {
+            distr: self,
+            func,
+            phantom: ::core::marker::PhantomData,
+        }
+    }
+}
+
+impl<'a, T, D: Distribution<T>> Distribution<T> for &'a D {
+    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> T {
+        (*self).sample(rng)
+    }
+}
+
+/// An iterator that generates random values of `T` with distribution `D`,
+/// using `R` as the source of randomness.
+///
+/// This `struct` is created by the [`sample_iter`] method on [`Distribution`].
+/// See its documentation for more.
+///
+/// [`sample_iter`]: Distribution::sample_iter
+#[derive(Debug)]
+pub struct DistIter<D, R, T> {
+    distr: D,
+    rng: R,
+    phantom: ::core::marker::PhantomData<T>,
+}
+
+impl<D, R, T> Iterator for DistIter<D, R, T>
+where
+    D: Distribution<T>,
+    R: Rng,
+{
+    type Item = T;
+
+    #[inline(always)]
+    fn next(&mut self) -> Option<T> {
+        // Here, self.rng may be a reference, but we must take &mut anyway.
+        // Even if sample could take an R: Rng by value, we would need to do this
+        // since Rng is not copyable and we cannot enforce that this is "reborrowable".
+        Some(self.distr.sample(&mut self.rng))
+    }
+
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        (usize::max_value(), None)
+    }
+}
+
+impl<D, R, T> iter::FusedIterator for DistIter<D, R, T>
+where
+    D: Distribution<T>,
+    R: Rng,
+{
+}
+
+#[cfg(features = "nightly")]
+impl<D, R, T> iter::TrustedLen for DistIter<D, R, T>
+where
+    D: Distribution<T>,
+    R: Rng,
+{
+}
+
+/// A distribution of values of type `S` derived from the distribution `D`
+/// by mapping its output of type `T` through the closure `F`.
+///
+/// This `struct` is created by the [`Distribution::map`] method.
+/// See its documentation for more.
+#[derive(Debug)]
+pub struct DistMap<D, F, T, S> {
+    distr: D,
+    func: F,
+    phantom: ::core::marker::PhantomData<fn(T) -> S>,
+}
+
+impl<D, F, T, S> Distribution<S> for DistMap<D, F, T, S>
+where
+    D: Distribution<T>,
+    F: Fn(T) -> S,
+{
+    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> S {
+        (self.func)(self.distr.sample(rng))
+    }
+}
+
+/// `String` sampler
+///
+/// Sampling a `String` of random characters is not quite the same as collecting
+/// a sequence of chars. This trait contains some helpers.
+#[cfg(feature = "alloc")]
+pub trait DistString {
+    /// Append `len` random chars to `string`
+    fn append_string<R: Rng + ?Sized>(&self, rng: &mut R, string: &mut String, len: usize);
+
+    /// Generate a `String` of `len` random chars
+    #[inline]
+    fn sample_string<R: Rng + ?Sized>(&self, rng: &mut R, len: usize) -> String {
+        let mut s = String::new();
+        self.append_string(rng, &mut s, len);
+        s
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use crate::distributions::{Distribution, Uniform};
+    use crate::Rng;
+
+    #[test]
+    fn test_distributions_iter() {
+        use crate::distributions::Open01;
+        let mut rng = crate::test::rng(210);
+        let distr = Open01;
+        let mut iter = Distribution::<f32>::sample_iter(distr, &mut rng);
+        let mut sum: f32 = 0.;
+        for _ in 0..100 {
+            sum += iter.next().unwrap();
+        }
+        assert!(0. < sum && sum < 100.);
+    }
+
+    #[test]
+    fn test_distributions_map() {
+        let dist = Uniform::new_inclusive(0, 5).map(|val| val + 15);
+
+        let mut rng = crate::test::rng(212);
+        let val = dist.sample(&mut rng);
+        assert!((15..=20).contains(&val));
+    }
+
+    #[test]
+    fn test_make_an_iter() {
+        fn ten_dice_rolls_other_than_five<R: Rng>(
+            rng: &mut R,
+        ) -> impl Iterator<Item = i32> + '_ {
+            Uniform::new_inclusive(1, 6)
+                .sample_iter(rng)
+                .filter(|x| *x != 5)
+                .take(10)
+        }
+
+        let mut rng = crate::test::rng(211);
+        let mut count = 0;
+        for val in ten_dice_rolls_other_than_five(&mut rng) {
+            assert!((1..=6).contains(&val) && val != 5);
+            count += 1;
+        }
+        assert_eq!(count, 10);
+    }
+
+    #[test]
+    #[cfg(feature = "alloc")]
+    fn test_dist_string() {
+        use core::str;
+        use crate::distributions::{Alphanumeric, DistString, Standard};
+        let mut rng = crate::test::rng(213);
+
+        let s1 = Alphanumeric.sample_string(&mut rng, 20);
+        assert_eq!(s1.len(), 20);
+        assert_eq!(str::from_utf8(s1.as_bytes()), Ok(s1.as_str()));
+
+        let s2 = Standard.sample_string(&mut rng, 20);
+        assert_eq!(s2.chars().count(), 20);
+        assert_eq!(str::from_utf8(s2.as_bytes()), Ok(s2.as_str()));
+    }
+}
+
+
\ No newline at end of file diff --git a/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/float.rs.html b/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/float.rs.html new file mode 100644 index 0000000..cba1f5b --- /dev/null +++ b/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/float.rs.html @@ -0,0 +1,627 @@ +float.rs - source
1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
+97
+98
+99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+
// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Basic floating-point number distributions
+
+use crate::distributions::utils::FloatSIMDUtils;
+use crate::distributions::{Distribution, Standard};
+use crate::Rng;
+use core::mem;
+#[cfg(feature = "simd_support")] use packed_simd::*;
+
+#[cfg(feature = "serde1")]
+use serde::{Serialize, Deserialize};
+
+/// A distribution to sample floating point numbers uniformly in the half-open
+/// interval `(0, 1]`, i.e. including 1 but not 0.
+///
+/// All values that can be generated are of the form `n * ε/2`. For `f32`
+/// the 24 most significant random bits of a `u32` are used and for `f64` the
+/// 53 most significant bits of a `u64` are used. The conversion uses the
+/// multiplicative method.
+///
+/// See also: [`Standard`] which samples from `[0, 1)`, [`Open01`]
+/// which samples from `(0, 1)` and [`Uniform`] which samples from arbitrary
+/// ranges.
+///
+/// # Example
+/// ```
+/// use rand::{thread_rng, Rng};
+/// use rand::distributions::OpenClosed01;
+///
+/// let val: f32 = thread_rng().sample(OpenClosed01);
+/// println!("f32 from (0, 1): {}", val);
+/// ```
+///
+/// [`Standard`]: crate::distributions::Standard
+/// [`Open01`]: crate::distributions::Open01
+/// [`Uniform`]: crate::distributions::uniform::Uniform
+#[derive(Clone, Copy, Debug)]
+#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
+pub struct OpenClosed01;
+
+/// A distribution to sample floating point numbers uniformly in the open
+/// interval `(0, 1)`, i.e. not including either endpoint.
+///
+/// All values that can be generated are of the form `n * ε + ε/2`. For `f32`
+/// the 23 most significant random bits of an `u32` are used, for `f64` 52 from
+/// an `u64`. The conversion uses a transmute-based method.
+///
+/// See also: [`Standard`] which samples from `[0, 1)`, [`OpenClosed01`]
+/// which samples from `(0, 1]` and [`Uniform`] which samples from arbitrary
+/// ranges.
+///
+/// # Example
+/// ```
+/// use rand::{thread_rng, Rng};
+/// use rand::distributions::Open01;
+///
+/// let val: f32 = thread_rng().sample(Open01);
+/// println!("f32 from (0, 1): {}", val);
+/// ```
+///
+/// [`Standard`]: crate::distributions::Standard
+/// [`OpenClosed01`]: crate::distributions::OpenClosed01
+/// [`Uniform`]: crate::distributions::uniform::Uniform
+#[derive(Clone, Copy, Debug)]
+#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
+pub struct Open01;
+
+
+// This trait is needed by both this lib and rand_distr hence is a hidden export
+#[doc(hidden)]
+pub trait IntoFloat {
+    type F;
+
+    /// Helper method to combine the fraction and a constant exponent into a
+    /// float.
+    ///
+    /// Only the least significant bits of `self` may be set, 23 for `f32` and
+    /// 52 for `f64`.
+    /// The resulting value will fall in a range that depends on the exponent.
+    /// As an example the range with exponent 0 will be
+    /// [2<sup>0</sup>..2<sup>1</sup>), which is [1..2).
+    fn into_float_with_exponent(self, exponent: i32) -> Self::F;
+}
+
+macro_rules! float_impls {
+    ($ty:ident, $uty:ident, $f_scalar:ident, $u_scalar:ty,
+     $fraction_bits:expr, $exponent_bias:expr) => {
+        impl IntoFloat for $uty {
+            type F = $ty;
+            #[inline(always)]
+            fn into_float_with_exponent(self, exponent: i32) -> $ty {
+                // The exponent is encoded using an offset-binary representation
+                let exponent_bits: $u_scalar =
+                    (($exponent_bias + exponent) as $u_scalar) << $fraction_bits;
+                $ty::from_bits(self | exponent_bits)
+            }
+        }
+
+        impl Distribution<$ty> for Standard {
+            fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty {
+                // Multiply-based method; 24/53 random bits; [0, 1) interval.
+                // We use the most significant bits because for simple RNGs
+                // those are usually more random.
+                let float_size = mem::size_of::<$f_scalar>() as u32 * 8;
+                let precision = $fraction_bits + 1;
+                let scale = 1.0 / ((1 as $u_scalar << precision) as $f_scalar);
+
+                let value: $uty = rng.gen();
+                let value = value >> (float_size - precision);
+                scale * $ty::cast_from_int(value)
+            }
+        }
+
+        impl Distribution<$ty> for OpenClosed01 {
+            fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty {
+                // Multiply-based method; 24/53 random bits; (0, 1] interval.
+                // We use the most significant bits because for simple RNGs
+                // those are usually more random.
+                let float_size = mem::size_of::<$f_scalar>() as u32 * 8;
+                let precision = $fraction_bits + 1;
+                let scale = 1.0 / ((1 as $u_scalar << precision) as $f_scalar);
+
+                let value: $uty = rng.gen();
+                let value = value >> (float_size - precision);
+                // Add 1 to shift up; will not overflow because of right-shift:
+                scale * $ty::cast_from_int(value + 1)
+            }
+        }
+
+        impl Distribution<$ty> for Open01 {
+            fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty {
+                // Transmute-based method; 23/52 random bits; (0, 1) interval.
+                // We use the most significant bits because for simple RNGs
+                // those are usually more random.
+                use core::$f_scalar::EPSILON;
+                let float_size = mem::size_of::<$f_scalar>() as u32 * 8;
+
+                let value: $uty = rng.gen();
+                let fraction = value >> (float_size - $fraction_bits);
+                fraction.into_float_with_exponent(0) - (1.0 - EPSILON / 2.0)
+            }
+        }
+    }
+}
+
+float_impls! { f32, u32, f32, u32, 23, 127 }
+float_impls! { f64, u64, f64, u64, 52, 1023 }
+
+#[cfg(feature = "simd_support")]
+float_impls! { f32x2, u32x2, f32, u32, 23, 127 }
+#[cfg(feature = "simd_support")]
+float_impls! { f32x4, u32x4, f32, u32, 23, 127 }
+#[cfg(feature = "simd_support")]
+float_impls! { f32x8, u32x8, f32, u32, 23, 127 }
+#[cfg(feature = "simd_support")]
+float_impls! { f32x16, u32x16, f32, u32, 23, 127 }
+
+#[cfg(feature = "simd_support")]
+float_impls! { f64x2, u64x2, f64, u64, 52, 1023 }
+#[cfg(feature = "simd_support")]
+float_impls! { f64x4, u64x4, f64, u64, 52, 1023 }
+#[cfg(feature = "simd_support")]
+float_impls! { f64x8, u64x8, f64, u64, 52, 1023 }
+
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use crate::rngs::mock::StepRng;
+
+    const EPSILON32: f32 = ::core::f32::EPSILON;
+    const EPSILON64: f64 = ::core::f64::EPSILON;
+
+    macro_rules! test_f32 {
+        ($fnn:ident, $ty:ident, $ZERO:expr, $EPSILON:expr) => {
+            #[test]
+            fn $fnn() {
+                // Standard
+                let mut zeros = StepRng::new(0, 0);
+                assert_eq!(zeros.gen::<$ty>(), $ZERO);
+                let mut one = StepRng::new(1 << 8 | 1 << (8 + 32), 0);
+                assert_eq!(one.gen::<$ty>(), $EPSILON / 2.0);
+                let mut max = StepRng::new(!0, 0);
+                assert_eq!(max.gen::<$ty>(), 1.0 - $EPSILON / 2.0);
+
+                // OpenClosed01
+                let mut zeros = StepRng::new(0, 0);
+                assert_eq!(zeros.sample::<$ty, _>(OpenClosed01), 0.0 + $EPSILON / 2.0);
+                let mut one = StepRng::new(1 << 8 | 1 << (8 + 32), 0);
+                assert_eq!(one.sample::<$ty, _>(OpenClosed01), $EPSILON);
+                let mut max = StepRng::new(!0, 0);
+                assert_eq!(max.sample::<$ty, _>(OpenClosed01), $ZERO + 1.0);
+
+                // Open01
+                let mut zeros = StepRng::new(0, 0);
+                assert_eq!(zeros.sample::<$ty, _>(Open01), 0.0 + $EPSILON / 2.0);
+                let mut one = StepRng::new(1 << 9 | 1 << (9 + 32), 0);
+                assert_eq!(one.sample::<$ty, _>(Open01), $EPSILON / 2.0 * 3.0);
+                let mut max = StepRng::new(!0, 0);
+                assert_eq!(max.sample::<$ty, _>(Open01), 1.0 - $EPSILON / 2.0);
+            }
+        };
+    }
+    test_f32! { f32_edge_cases, f32, 0.0, EPSILON32 }
+    #[cfg(feature = "simd_support")]
+    test_f32! { f32x2_edge_cases, f32x2, f32x2::splat(0.0), f32x2::splat(EPSILON32) }
+    #[cfg(feature = "simd_support")]
+    test_f32! { f32x4_edge_cases, f32x4, f32x4::splat(0.0), f32x4::splat(EPSILON32) }
+    #[cfg(feature = "simd_support")]
+    test_f32! { f32x8_edge_cases, f32x8, f32x8::splat(0.0), f32x8::splat(EPSILON32) }
+    #[cfg(feature = "simd_support")]
+    test_f32! { f32x16_edge_cases, f32x16, f32x16::splat(0.0), f32x16::splat(EPSILON32) }
+
+    macro_rules! test_f64 {
+        ($fnn:ident, $ty:ident, $ZERO:expr, $EPSILON:expr) => {
+            #[test]
+            fn $fnn() {
+                // Standard
+                let mut zeros = StepRng::new(0, 0);
+                assert_eq!(zeros.gen::<$ty>(), $ZERO);
+                let mut one = StepRng::new(1 << 11, 0);
+                assert_eq!(one.gen::<$ty>(), $EPSILON / 2.0);
+                let mut max = StepRng::new(!0, 0);
+                assert_eq!(max.gen::<$ty>(), 1.0 - $EPSILON / 2.0);
+
+                // OpenClosed01
+                let mut zeros = StepRng::new(0, 0);
+                assert_eq!(zeros.sample::<$ty, _>(OpenClosed01), 0.0 + $EPSILON / 2.0);
+                let mut one = StepRng::new(1 << 11, 0);
+                assert_eq!(one.sample::<$ty, _>(OpenClosed01), $EPSILON);
+                let mut max = StepRng::new(!0, 0);
+                assert_eq!(max.sample::<$ty, _>(OpenClosed01), $ZERO + 1.0);
+
+                // Open01
+                let mut zeros = StepRng::new(0, 0);
+                assert_eq!(zeros.sample::<$ty, _>(Open01), 0.0 + $EPSILON / 2.0);
+                let mut one = StepRng::new(1 << 12, 0);
+                assert_eq!(one.sample::<$ty, _>(Open01), $EPSILON / 2.0 * 3.0);
+                let mut max = StepRng::new(!0, 0);
+                assert_eq!(max.sample::<$ty, _>(Open01), 1.0 - $EPSILON / 2.0);
+            }
+        };
+    }
+    test_f64! { f64_edge_cases, f64, 0.0, EPSILON64 }
+    #[cfg(feature = "simd_support")]
+    test_f64! { f64x2_edge_cases, f64x2, f64x2::splat(0.0), f64x2::splat(EPSILON64) }
+    #[cfg(feature = "simd_support")]
+    test_f64! { f64x4_edge_cases, f64x4, f64x4::splat(0.0), f64x4::splat(EPSILON64) }
+    #[cfg(feature = "simd_support")]
+    test_f64! { f64x8_edge_cases, f64x8, f64x8::splat(0.0), f64x8::splat(EPSILON64) }
+
+    #[test]
+    fn value_stability() {
+        fn test_samples<T: Copy + core::fmt::Debug + PartialEq, D: Distribution<T>>(
+            distr: &D, zero: T, expected: &[T],
+        ) {
+            let mut rng = crate::test::rng(0x6f44f5646c2a7334);
+            let mut buf = [zero; 3];
+            for x in &mut buf {
+                *x = rng.sample(&distr);
+            }
+            assert_eq!(&buf, expected);
+        }
+
+        test_samples(&Standard, 0f32, &[0.0035963655, 0.7346052, 0.09778172]);
+        test_samples(&Standard, 0f64, &[
+            0.7346051961657583,
+            0.20298547462974248,
+            0.8166436635290655,
+        ]);
+
+        test_samples(&OpenClosed01, 0f32, &[0.003596425, 0.73460525, 0.09778178]);
+        test_samples(&OpenClosed01, 0f64, &[
+            0.7346051961657584,
+            0.2029854746297426,
+            0.8166436635290656,
+        ]);
+
+        test_samples(&Open01, 0f32, &[0.0035963655, 0.73460525, 0.09778172]);
+        test_samples(&Open01, 0f64, &[
+            0.7346051961657584,
+            0.20298547462974248,
+            0.8166436635290656,
+        ]);
+
+        #[cfg(feature = "simd_support")]
+        {
+            // We only test a sub-set of types here. Values are identical to
+            // non-SIMD types; we assume this pattern continues across all
+            // SIMD types.
+
+            test_samples(&Standard, f32x2::new(0.0, 0.0), &[
+                f32x2::new(0.0035963655, 0.7346052),
+                f32x2::new(0.09778172, 0.20298547),
+                f32x2::new(0.34296435, 0.81664366),
+            ]);
+
+            test_samples(&Standard, f64x2::new(0.0, 0.0), &[
+                f64x2::new(0.7346051961657583, 0.20298547462974248),
+                f64x2::new(0.8166436635290655, 0.7423708925400552),
+                f64x2::new(0.16387782224016323, 0.9087068770169618),
+            ]);
+        }
+    }
+}
+
+
\ No newline at end of file diff --git a/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/integer.rs.html b/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/integer.rs.html new file mode 100644 index 0000000..0fbac64 --- /dev/null +++ b/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/integer.rs.html @@ -0,0 +1,551 @@ +integer.rs - source
1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
+97
+98
+99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+
// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The implementations of the `Standard` distribution for integer types.
+
+use crate::distributions::{Distribution, Standard};
+use crate::Rng;
+#[cfg(all(target_arch = "x86", feature = "simd_support"))]
+use core::arch::x86::{__m128i, __m256i};
+#[cfg(all(target_arch = "x86_64", feature = "simd_support"))]
+use core::arch::x86_64::{__m128i, __m256i};
+use core::num::{NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize,
+    NonZeroU128};
+#[cfg(feature = "simd_support")] use packed_simd::*;
+
+impl Distribution<u8> for Standard {
+    #[inline]
+    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u8 {
+        rng.next_u32() as u8
+    }
+}
+
+impl Distribution<u16> for Standard {
+    #[inline]
+    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u16 {
+        rng.next_u32() as u16
+    }
+}
+
+impl Distribution<u32> for Standard {
+    #[inline]
+    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u32 {
+        rng.next_u32()
+    }
+}
+
+impl Distribution<u64> for Standard {
+    #[inline]
+    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u64 {
+        rng.next_u64()
+    }
+}
+
+impl Distribution<u128> for Standard {
+    #[inline]
+    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u128 {
+        // Use LE; we explicitly generate one value before the next.
+        let x = u128::from(rng.next_u64());
+        let y = u128::from(rng.next_u64());
+        (y << 64) | x
+    }
+}
+
+impl Distribution<usize> for Standard {
+    #[inline]
+    #[cfg(any(target_pointer_width = "32", target_pointer_width = "16"))]
+    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> usize {
+        rng.next_u32() as usize
+    }
+
+    #[inline]
+    #[cfg(target_pointer_width = "64")]
+    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> usize {
+        rng.next_u64() as usize
+    }
+}
+
+macro_rules! impl_int_from_uint {
+    ($ty:ty, $uty:ty) => {
+        impl Distribution<$ty> for Standard {
+            #[inline]
+            fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty {
+                rng.gen::<$uty>() as $ty
+            }
+        }
+    };
+}
+
+impl_int_from_uint! { i8, u8 }
+impl_int_from_uint! { i16, u16 }
+impl_int_from_uint! { i32, u32 }
+impl_int_from_uint! { i64, u64 }
+impl_int_from_uint! { i128, u128 }
+impl_int_from_uint! { isize, usize }
+
+macro_rules! impl_nzint {
+    ($ty:ty, $new:path) => {
+        impl Distribution<$ty> for Standard {
+            fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty {
+                loop {
+                    if let Some(nz) = $new(rng.gen()) {
+                        break nz;
+                    }
+                }
+            }
+        }
+    };
+}
+
+impl_nzint!(NonZeroU8, NonZeroU8::new);
+impl_nzint!(NonZeroU16, NonZeroU16::new);
+impl_nzint!(NonZeroU32, NonZeroU32::new);
+impl_nzint!(NonZeroU64, NonZeroU64::new);
+impl_nzint!(NonZeroU128, NonZeroU128::new);
+impl_nzint!(NonZeroUsize, NonZeroUsize::new);
+
+#[cfg(feature = "simd_support")]
+macro_rules! simd_impl {
+    ($(($intrinsic:ident, $vec:ty),)+) => {$(
+        impl Distribution<$intrinsic> for Standard {
+            #[inline]
+            fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $intrinsic {
+                $intrinsic::from_bits(rng.gen::<$vec>())
+            }
+        }
+    )+};
+
+    ($bits:expr,) => {};
+    ($bits:expr, $ty:ty, $($ty_more:ty,)*) => {
+        simd_impl!($bits, $($ty_more,)*);
+
+        impl Distribution<$ty> for Standard {
+            #[inline]
+            fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty {
+                let mut vec: $ty = Default::default();
+                unsafe {
+                    let ptr = &mut vec;
+                    let b_ptr = &mut *(ptr as *mut $ty as *mut [u8; $bits/8]);
+                    rng.fill_bytes(b_ptr);
+                }
+                vec.to_le()
+            }
+        }
+    };
+}
+
+#[cfg(feature = "simd_support")]
+simd_impl!(16, u8x2, i8x2,);
+#[cfg(feature = "simd_support")]
+simd_impl!(32, u8x4, i8x4, u16x2, i16x2,);
+#[cfg(feature = "simd_support")]
+simd_impl!(64, u8x8, i8x8, u16x4, i16x4, u32x2, i32x2,);
+#[cfg(feature = "simd_support")]
+simd_impl!(128, u8x16, i8x16, u16x8, i16x8, u32x4, i32x4, u64x2, i64x2,);
+#[cfg(feature = "simd_support")]
+simd_impl!(256, u8x32, i8x32, u16x16, i16x16, u32x8, i32x8, u64x4, i64x4,);
+#[cfg(feature = "simd_support")]
+simd_impl!(512, u8x64, i8x64, u16x32, i16x32, u32x16, i32x16, u64x8, i64x8,);
+#[cfg(all(
+    feature = "simd_support",
+    any(target_arch = "x86", target_arch = "x86_64")
+))]
+simd_impl!((__m128i, u8x16), (__m256i, u8x32),);
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn test_integers() {
+        let mut rng = crate::test::rng(806);
+
+        rng.sample::<isize, _>(Standard);
+        rng.sample::<i8, _>(Standard);
+        rng.sample::<i16, _>(Standard);
+        rng.sample::<i32, _>(Standard);
+        rng.sample::<i64, _>(Standard);
+        rng.sample::<i128, _>(Standard);
+
+        rng.sample::<usize, _>(Standard);
+        rng.sample::<u8, _>(Standard);
+        rng.sample::<u16, _>(Standard);
+        rng.sample::<u32, _>(Standard);
+        rng.sample::<u64, _>(Standard);
+        rng.sample::<u128, _>(Standard);
+    }
+
+    #[test]
+    fn value_stability() {
+        fn test_samples<T: Copy + core::fmt::Debug + PartialEq>(zero: T, expected: &[T])
+        where Standard: Distribution<T> {
+            let mut rng = crate::test::rng(807);
+            let mut buf = [zero; 3];
+            for x in &mut buf {
+                *x = rng.sample(Standard);
+            }
+            assert_eq!(&buf, expected);
+        }
+
+        test_samples(0u8, &[9, 247, 111]);
+        test_samples(0u16, &[32265, 42999, 38255]);
+        test_samples(0u32, &[2220326409, 2575017975, 2018088303]);
+        test_samples(0u64, &[
+            11059617991457472009,
+            16096616328739788143,
+            1487364411147516184,
+        ]);
+        test_samples(0u128, &[
+            296930161868957086625409848350820761097,
+            145644820879247630242265036535529306392,
+            111087889832015897993126088499035356354,
+        ]);
+        #[cfg(any(target_pointer_width = "32", target_pointer_width = "16"))]
+        test_samples(0usize, &[2220326409, 2575017975, 2018088303]);
+        #[cfg(target_pointer_width = "64")]
+        test_samples(0usize, &[
+            11059617991457472009,
+            16096616328739788143,
+            1487364411147516184,
+        ]);
+
+        test_samples(0i8, &[9, -9, 111]);
+        // Skip further i* types: they are simple reinterpretation of u* samples
+
+        #[cfg(feature = "simd_support")]
+        {
+            // We only test a sub-set of types here and make assumptions about the rest.
+
+            test_samples(u8x2::default(), &[
+                u8x2::new(9, 126),
+                u8x2::new(247, 167),
+                u8x2::new(111, 149),
+            ]);
+            test_samples(u8x4::default(), &[
+                u8x4::new(9, 126, 87, 132),
+                u8x4::new(247, 167, 123, 153),
+                u8x4::new(111, 149, 73, 120),
+            ]);
+            test_samples(u8x8::default(), &[
+                u8x8::new(9, 126, 87, 132, 247, 167, 123, 153),
+                u8x8::new(111, 149, 73, 120, 68, 171, 98, 223),
+                u8x8::new(24, 121, 1, 50, 13, 46, 164, 20),
+            ]);
+
+            test_samples(i64x8::default(), &[
+                i64x8::new(
+                    -7387126082252079607,
+                    -2350127744969763473,
+                    1487364411147516184,
+                    7895421560427121838,
+                    602190064936008898,
+                    6022086574635100741,
+                    -5080089175222015595,
+                    -4066367846667249123,
+                ),
+                i64x8::new(
+                    9180885022207963908,
+                    3095981199532211089,
+                    6586075293021332726,
+                    419343203796414657,
+                    3186951873057035255,
+                    5287129228749947252,
+                    444726432079249540,
+                    -1587028029513790706,
+                ),
+                i64x8::new(
+                    6075236523189346388,
+                    1351763722368165432,
+                    -6192309979959753740,
+                    -7697775502176768592,
+                    -4482022114172078123,
+                    7522501477800909500,
+                    -1837258847956201231,
+                    -586926753024886735,
+                ),
+            ]);
+        }
+    }
+}
+
+
\ No newline at end of file diff --git a/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/mod.rs.html b/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/mod.rs.html new file mode 100644 index 0000000..8e93c46 --- /dev/null +++ b/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/mod.rs.html @@ -0,0 +1,439 @@ +mod.rs - source
1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
+97
+98
+99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+
// Copyright 2018 Developers of the Rand project.
+// Copyright 2013-2017 The Rust Project Developers.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Generating random samples from probability distributions
+//!
+//! This module is the home of the [`Distribution`] trait and several of its
+//! implementations. It is the workhorse behind some of the convenient
+//! functionality of the [`Rng`] trait, e.g. [`Rng::gen`] and of course
+//! [`Rng::sample`].
+//!
+//! Abstractly, a [probability distribution] describes the probability of
+//! occurrence of each value in its sample space.
+//!
+//! More concretely, an implementation of `Distribution<T>` for type `X` is an
+//! algorithm for choosing values from the sample space (a subset of `T`)
+//! according to the distribution `X` represents, using an external source of
+//! randomness (an RNG supplied to the `sample` function).
+//!
+//! A type `X` may implement `Distribution<T>` for multiple types `T`.
+//! Any type implementing [`Distribution`] is stateless (i.e. immutable),
+//! but it may have internal parameters set at construction time (for example,
+//! [`Uniform`] allows specification of its sample space as a range within `T`).
+//!
+//!
+//! # The `Standard` distribution
+//!
+//! The [`Standard`] distribution is important to mention. This is the
+//! distribution used by [`Rng::gen`] and represents the "default" way to
+//! produce a random value for many different types, including most primitive
+//! types, tuples, arrays, and a few derived types. See the documentation of
+//! [`Standard`] for more details.
+//!
+//! Implementing `Distribution<T>` for [`Standard`] for user types `T` makes it
+//! possible to generate type `T` with [`Rng::gen`], and by extension also
+//! with the [`random`] function.
+//!
+//! ## Random characters
+//!
+//! [`Alphanumeric`] is a simple distribution to sample random letters and
+//! numbers of the `char` type; in contrast [`Standard`] may sample any valid
+//! `char`.
+//!
+//!
+//! # Uniform numeric ranges
+//!
+//! The [`Uniform`] distribution is more flexible than [`Standard`], but also
+//! more specialised: it supports fewer target types, but allows the sample
+//! space to be specified as an arbitrary range within its target type `T`.
+//! Both [`Standard`] and [`Uniform`] are in some sense uniform distributions.
+//!
+//! Values may be sampled from this distribution using [`Rng::sample(Range)`] or
+//! by creating a distribution object with [`Uniform::new`],
+//! [`Uniform::new_inclusive`] or `From<Range>`. When the range limits are not
+//! known at compile time it is typically faster to reuse an existing
+//! `Uniform` object than to call [`Rng::sample(Range)`].
+//!
+//! User types `T` may also implement `Distribution<T>` for [`Uniform`],
+//! although this is less straightforward than for [`Standard`] (see the
+//! documentation in the [`uniform`] module). Doing so enables generation of
+//! values of type `T` with  [`Rng::sample(Range)`].
+//!
+//! ## Open and half-open ranges
+//!
+//! There are surprisingly many ways to uniformly generate random floats. A
+//! range between 0 and 1 is standard, but the exact bounds (open vs closed)
+//! and accuracy differ. In addition to the [`Standard`] distribution Rand offers
+//! [`Open01`] and [`OpenClosed01`]. See "Floating point implementation" section of
+//! [`Standard`] documentation for more details.
+//!
+//! # Non-uniform sampling
+//!
+//! Sampling a simple true/false outcome with a given probability has a name:
+//! the [`Bernoulli`] distribution (this is used by [`Rng::gen_bool`]).
+//!
+//! For weighted sampling from a sequence of discrete values, use the
+//! [`WeightedIndex`] distribution.
+//!
+//! This crate no longer includes other non-uniform distributions; instead
+//! it is recommended that you use either [`rand_distr`] or [`statrs`].
+//!
+//!
+//! [probability distribution]: https://en.wikipedia.org/wiki/Probability_distribution
+//! [`rand_distr`]: https://crates.io/crates/rand_distr
+//! [`statrs`]: https://crates.io/crates/statrs
+
+//! [`random`]: crate::random
+//! [`rand_distr`]: https://crates.io/crates/rand_distr
+//! [`statrs`]: https://crates.io/crates/statrs
+
+mod bernoulli;
+mod distribution;
+mod float;
+mod integer;
+mod other;
+mod slice;
+mod utils;
+#[cfg(feature = "alloc")]
+mod weighted_index;
+
+#[doc(hidden)]
+pub mod hidden_export {
+    pub use super::float::IntoFloat; // used by rand_distr
+}
+pub mod uniform;
+#[deprecated(
+    since = "0.8.0",
+    note = "use rand::distributions::{WeightedIndex, WeightedError} instead"
+)]
+#[cfg(feature = "alloc")]
+#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
+pub mod weighted;
+
+pub use self::bernoulli::{Bernoulli, BernoulliError};
+pub use self::distribution::{Distribution, DistIter, DistMap};
+#[cfg(feature = "alloc")]
+pub use self::distribution::DistString;
+pub use self::float::{Open01, OpenClosed01};
+pub use self::other::Alphanumeric;
+pub use self::slice::Slice;
+#[doc(inline)]
+pub use self::uniform::Uniform;
+#[cfg(feature = "alloc")]
+pub use self::weighted_index::{WeightedError, WeightedIndex};
+
+#[allow(unused)]
+use crate::Rng;
+
+/// A generic random value distribution, implemented for many primitive types.
+/// Usually generates values with a numerically uniform distribution, and with a
+/// range appropriate to the type.
+///
+/// ## Provided implementations
+///
+/// Assuming the provided `Rng` is well-behaved, these implementations
+/// generate values with the following ranges and distributions:
+///
+/// * Integers (`i32`, `u32`, `isize`, `usize`, etc.): Uniformly distributed
+///   over all values of the type.
+/// * `char`: Uniformly distributed over all Unicode scalar values, i.e. all
+///   code points in the range `0...0x10_FFFF`, except for the range
+///   `0xD800...0xDFFF` (the surrogate code points). This includes
+///   unassigned/reserved code points.
+/// * `bool`: Generates `false` or `true`, each with probability 0.5.
+/// * Floating point types (`f32` and `f64`): Uniformly distributed in the
+///   half-open range `[0, 1)`. See notes below.
+/// * Wrapping integers (`Wrapping<T>`), besides the type identical to their
+///   normal integer variants.
+///
+/// The `Standard` distribution also supports generation of the following
+/// compound types where all component types are supported:
+///
+/// *   Tuples (up to 12 elements): each element is generated sequentially.
+/// *   Arrays (up to 32 elements): each element is generated sequentially;
+///     see also [`Rng::fill`] which supports arbitrary array length for integer
+///     and float types and tends to be faster for `u32` and smaller types.
+///     When using `rustc` ≥ 1.51, enable the `min_const_gen` feature to support
+///     arrays larger than 32 elements.
+///     Note that [`Rng::fill`] and `Standard`'s array support are *not* equivalent:
+///     the former is optimised for integer types (using fewer RNG calls for
+///     element types smaller than the RNG word size), while the latter supports
+///     any element type supported by `Standard`.
+/// *   `Option<T>` first generates a `bool`, and if true generates and returns
+///     `Some(value)` where `value: T`, otherwise returning `None`.
+///
+/// ## Custom implementations
+///
+/// The [`Standard`] distribution may be implemented for user types as follows:
+///
+/// ```
+/// # #![allow(dead_code)]
+/// use rand::Rng;
+/// use rand::distributions::{Distribution, Standard};
+///
+/// struct MyF32 {
+///     x: f32,
+/// }
+///
+/// impl Distribution<MyF32> for Standard {
+///     fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> MyF32 {
+///         MyF32 { x: rng.gen() }
+///     }
+/// }
+/// ```
+///
+/// ## Example usage
+/// ```
+/// use rand::prelude::*;
+/// use rand::distributions::Standard;
+///
+/// let val: f32 = StdRng::from_entropy().sample(Standard);
+/// println!("f32 from [0, 1): {}", val);
+/// ```
+///
+/// # Floating point implementation
+/// The floating point implementations for `Standard` generate a random value in
+/// the half-open interval `[0, 1)`, i.e. including 0 but not 1.
+///
+/// All values that can be generated are of the form `n * ε/2`. For `f32`
+/// the 24 most significant random bits of a `u32` are used and for `f64` the
+/// 53 most significant bits of a `u64` are used. The conversion uses the
+/// multiplicative method: `(rng.gen::<$uty>() >> N) as $ty * (ε/2)`.
+///
+/// See also: [`Open01`] which samples from `(0, 1)`, [`OpenClosed01`] which
+/// samples from `(0, 1]` and `Rng::gen_range(0..1)` which also samples from
+/// `[0, 1)`. Note that `Open01` uses transmute-based methods which yield 1 bit
+/// less precision but may perform faster on some architectures (on modern Intel
+/// CPUs all methods have approximately equal performance).
+///
+/// [`Uniform`]: uniform::Uniform
+#[derive(Clone, Copy, Debug)]
+#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
+pub struct Standard;
+
+
\ No newline at end of file diff --git a/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/other.rs.html b/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/other.rs.html new file mode 100644 index 0000000..415f1da --- /dev/null +++ b/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/other.rs.html @@ -0,0 +1,733 @@ +other.rs - source
1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
+97
+98
+99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+
// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The implementations of the `Standard` distribution for other built-in types.
+
+use core::char;
+use core::num::Wrapping;
+#[cfg(feature = "alloc")]
+use alloc::string::String;
+
+use crate::distributions::{Distribution, Standard, Uniform};
+#[cfg(feature = "alloc")]
+use crate::distributions::DistString;
+use crate::Rng;
+
+#[cfg(feature = "serde1")]
+use serde::{Serialize, Deserialize};
+#[cfg(feature = "min_const_gen")]
+use core::mem::{self, MaybeUninit};
+
+
+// ----- Sampling distributions -----
+
+/// Sample a `u8`, uniformly distributed over ASCII letters and numbers:
+/// a-z, A-Z and 0-9.
+///
+/// # Example
+///
+/// ```
+/// use rand::{Rng, thread_rng};
+/// use rand::distributions::Alphanumeric;
+///
+/// let mut rng = thread_rng();
+/// let chars: String = (0..7).map(|_| rng.sample(Alphanumeric) as char).collect();
+/// println!("Random chars: {}", chars);
+/// ```
+///
+/// The [`DistString`] trait provides an easier method of generating
+/// a random `String`, and offers more efficient allocation:
+/// ```
+/// use rand::distributions::{Alphanumeric, DistString};
+/// let string = Alphanumeric.sample_string(&mut rand::thread_rng(), 16);
+/// println!("Random string: {}", string);
+/// ```
+///
+/// # Passwords
+///
+/// Users sometimes ask whether it is safe to use a string of random characters
+/// as a password. In principle, all RNGs in Rand implementing `CryptoRng` are
+/// suitable as a source of randomness for generating passwords (if they are
+/// properly seeded), but it is more conservative to only use randomness
+/// directly from the operating system via the `getrandom` crate, or the
+/// corresponding bindings of a crypto library.
+///
+/// When generating passwords or keys, it is important to consider the threat
+/// model and in some cases the memorability of the password. This is out of
+/// scope of the Rand project, and therefore we defer to the following
+/// references:
+///
+/// - [Wikipedia article on Password Strength](https://en.wikipedia.org/wiki/Password_strength)
+/// - [Diceware for generating memorable passwords](https://en.wikipedia.org/wiki/Diceware)
+#[derive(Debug, Clone, Copy)]
+#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
+pub struct Alphanumeric;
+
+
+// ----- Implementations of distributions -----
+
+impl Distribution<char> for Standard {
+    #[inline]
+    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> char {
+        // A valid `char` is either in the interval `[0, 0xD800)` or
+        // `(0xDFFF, 0x11_0000)`. All `char`s must therefore be in
+        // `[0, 0x11_0000)` but not in the "gap" `[0xD800, 0xDFFF]` which is
+        // reserved for surrogates. This is the size of that gap.
+        const GAP_SIZE: u32 = 0xDFFF - 0xD800 + 1;
+
+        // Uniform::new(0, 0x11_0000 - GAP_SIZE) can also be used but it
+        // seemed slower.
+        let range = Uniform::new(GAP_SIZE, 0x11_0000);
+
+        let mut n = range.sample(rng);
+        if n <= 0xDFFF {
+            n -= GAP_SIZE;
+        }
+        unsafe { char::from_u32_unchecked(n) }
+    }
+}
+
+/// Note: the `String` is potentially left with excess capacity; optionally the
+/// user may call `string.shrink_to_fit()` afterwards.
+#[cfg(feature = "alloc")]
+impl DistString for Standard {
+    fn append_string<R: Rng + ?Sized>(&self, rng: &mut R, s: &mut String, len: usize) {
+        // A char is encoded with at most four bytes, thus this reservation is
+        // guaranteed to be sufficient. We do not shrink_to_fit afterwards so
+        // that repeated usage on the same `String` buffer does not reallocate.
+        s.reserve(4 * len);
+        s.extend(Distribution::<char>::sample_iter(self, rng).take(len));
+    }
+}
+
+impl Distribution<u8> for Alphanumeric {
+    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u8 {
+        const RANGE: u32 = 26 + 26 + 10;
+        const GEN_ASCII_STR_CHARSET: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\
+                abcdefghijklmnopqrstuvwxyz\
+                0123456789";
+        // We can pick from 62 characters. This is so close to a power of 2, 64,
+        // that we can do better than `Uniform`. Use a simple bitshift and
+        // rejection sampling. We do not use a bitmask, because for small RNGs
+        // the most significant bits are usually of higher quality.
+        loop {
+            let var = rng.next_u32() >> (32 - 6);
+            if var < RANGE {
+                return GEN_ASCII_STR_CHARSET[var as usize];
+            }
+        }
+    }
+}
+
+#[cfg(feature = "alloc")]
+impl DistString for Alphanumeric {
+    fn append_string<R: Rng + ?Sized>(&self, rng: &mut R, string: &mut String, len: usize) {
+        unsafe {
+            let v = string.as_mut_vec();
+            v.extend(self.sample_iter(rng).take(len));
+        }
+    }
+}
+
+impl Distribution<bool> for Standard {
+    #[inline]
+    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> bool {
+        // We can compare against an arbitrary bit of an u32 to get a bool.
+        // Because the least significant bits of a lower quality RNG can have
+        // simple patterns, we compare against the most significant bit. This is
+        // easiest done using a sign test.
+        (rng.next_u32() as i32) < 0
+    }
+}
+
+macro_rules! tuple_impl {
+    // use variables to indicate the arity of the tuple
+    ($($tyvar:ident),* ) => {
+        // the trailing commas are for the 1 tuple
+        impl< $( $tyvar ),* >
+            Distribution<( $( $tyvar ),* , )>
+            for Standard
+            where $( Standard: Distribution<$tyvar> ),*
+        {
+            #[inline]
+            fn sample<R: Rng + ?Sized>(&self, _rng: &mut R) -> ( $( $tyvar ),* , ) {
+                (
+                    // use the $tyvar's to get the appropriate number of
+                    // repeats (they're not actually needed)
+                    $(
+                        _rng.gen::<$tyvar>()
+                    ),*
+                    ,
+                )
+            }
+        }
+    }
+}
+
+impl Distribution<()> for Standard {
+    #[allow(clippy::unused_unit)]
+    #[inline]
+    fn sample<R: Rng + ?Sized>(&self, _: &mut R) -> () {
+        ()
+    }
+}
+tuple_impl! {A}
+tuple_impl! {A, B}
+tuple_impl! {A, B, C}
+tuple_impl! {A, B, C, D}
+tuple_impl! {A, B, C, D, E}
+tuple_impl! {A, B, C, D, E, F}
+tuple_impl! {A, B, C, D, E, F, G}
+tuple_impl! {A, B, C, D, E, F, G, H}
+tuple_impl! {A, B, C, D, E, F, G, H, I}
+tuple_impl! {A, B, C, D, E, F, G, H, I, J}
+tuple_impl! {A, B, C, D, E, F, G, H, I, J, K}
+tuple_impl! {A, B, C, D, E, F, G, H, I, J, K, L}
+
+#[cfg(feature = "min_const_gen")]
+#[cfg_attr(doc_cfg, doc(cfg(feature = "min_const_gen")))]
+impl<T, const N: usize> Distribution<[T; N]> for Standard
+where Standard: Distribution<T>
+{
+    #[inline]
+    fn sample<R: Rng + ?Sized>(&self, _rng: &mut R) -> [T; N] {
+        let mut buff: [MaybeUninit<T>; N] = unsafe { MaybeUninit::uninit().assume_init() };
+
+        for elem in &mut buff {
+            *elem = MaybeUninit::new(_rng.gen());
+        }
+
+        unsafe { mem::transmute_copy::<_, _>(&buff) }
+    }
+}
+
+#[cfg(not(feature = "min_const_gen"))]
+macro_rules! array_impl {
+    // recursive, given at least one type parameter:
+    {$n:expr, $t:ident, $($ts:ident,)*} => {
+        array_impl!{($n - 1), $($ts,)*}
+
+        impl<T> Distribution<[T; $n]> for Standard where Standard: Distribution<T> {
+            #[inline]
+            fn sample<R: Rng + ?Sized>(&self, _rng: &mut R) -> [T; $n] {
+                [_rng.gen::<$t>(), $(_rng.gen::<$ts>()),*]
+            }
+        }
+    };
+    // empty case:
+    {$n:expr,} => {
+        impl<T> Distribution<[T; $n]> for Standard {
+            fn sample<R: Rng + ?Sized>(&self, _rng: &mut R) -> [T; $n] { [] }
+        }
+    };
+}
+
+#[cfg(not(feature = "min_const_gen"))]
+array_impl! {32, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T,}
+
+impl<T> Distribution<Option<T>> for Standard
+where Standard: Distribution<T>
+{
+    #[inline]
+    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Option<T> {
+        // UFCS is needed here: https://github.com/rust-lang/rust/issues/24066
+        if rng.gen::<bool>() {
+            Some(rng.gen())
+        } else {
+            None
+        }
+    }
+}
+
+impl<T> Distribution<Wrapping<T>> for Standard
+where Standard: Distribution<T>
+{
+    #[inline]
+    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Wrapping<T> {
+        Wrapping(rng.gen())
+    }
+}
+
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use crate::RngCore;
+    #[cfg(feature = "alloc")] use alloc::string::String;
+
+    #[test]
+    fn test_misc() {
+        let rng: &mut dyn RngCore = &mut crate::test::rng(820);
+
+        rng.sample::<char, _>(Standard);
+        rng.sample::<bool, _>(Standard);
+    }
+
+    #[cfg(feature = "alloc")]
+    #[test]
+    fn test_chars() {
+        use core::iter;
+        let mut rng = crate::test::rng(805);
+
+        // Test by generating a relatively large number of chars, so we also
+        // take the rejection sampling path.
+        let word: String = iter::repeat(())
+            .map(|()| rng.gen::<char>())
+            .take(1000)
+            .collect();
+        assert!(!word.is_empty());
+    }
+
+    #[test]
+    fn test_alphanumeric() {
+        let mut rng = crate::test::rng(806);
+
+        // Test by generating a relatively large number of chars, so we also
+        // take the rejection sampling path.
+        let mut incorrect = false;
+        for _ in 0..100 {
+            let c: char = rng.sample(Alphanumeric).into();
+            incorrect |= !(('0'..='9').contains(&c) ||
+                           ('A'..='Z').contains(&c) ||
+                           ('a'..='z').contains(&c) );
+        }
+        assert!(!incorrect);
+    }
+
+    #[test]
+    fn value_stability() {
+        fn test_samples<T: Copy + core::fmt::Debug + PartialEq, D: Distribution<T>>(
+            distr: &D, zero: T, expected: &[T],
+        ) {
+            let mut rng = crate::test::rng(807);
+            let mut buf = [zero; 5];
+            for x in &mut buf {
+                *x = rng.sample(&distr);
+            }
+            assert_eq!(&buf, expected);
+        }
+
+        test_samples(&Standard, 'a', &[
+            '\u{8cdac}',
+            '\u{a346a}',
+            '\u{80120}',
+            '\u{ed692}',
+            '\u{35888}',
+        ]);
+        test_samples(&Alphanumeric, 0, &[104, 109, 101, 51, 77]);
+        test_samples(&Standard, false, &[true, true, false, true, false]);
+        test_samples(&Standard, None as Option<bool>, &[
+            Some(true),
+            None,
+            Some(false),
+            None,
+            Some(false),
+        ]);
+        test_samples(&Standard, Wrapping(0i32), &[
+            Wrapping(-2074640887),
+            Wrapping(-1719949321),
+            Wrapping(2018088303),
+            Wrapping(-547181756),
+            Wrapping(838957336),
+        ]);
+
+        // We test only sub-sets of tuple and array impls
+        test_samples(&Standard, (), &[(), (), (), (), ()]);
+        test_samples(&Standard, (false,), &[
+            (true,),
+            (true,),
+            (false,),
+            (true,),
+            (false,),
+        ]);
+        test_samples(&Standard, (false, false), &[
+            (true, true),
+            (false, true),
+            (false, false),
+            (true, false),
+            (false, false),
+        ]);
+
+        test_samples(&Standard, [0u8; 0], &[[], [], [], [], []]);
+        test_samples(&Standard, [0u8; 3], &[
+            [9, 247, 111],
+            [68, 24, 13],
+            [174, 19, 194],
+            [172, 69, 213],
+            [149, 207, 29],
+        ]);
+    }
+}
+
+
\ No newline at end of file diff --git a/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/slice.rs.html b/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/slice.rs.html new file mode 100644 index 0000000..2f43484 --- /dev/null +++ b/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/slice.rs.html @@ -0,0 +1,237 @@ +slice.rs - source
1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
+97
+98
+99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+
// Copyright 2021 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use crate::distributions::{Distribution, Uniform};
+
+/// A distribution to sample items uniformly from a slice.
+///
+/// [`Slice::new`] constructs a distribution referencing a slice and uniformly
+/// samples references from the items in the slice. It may do extra work up
+/// front to make sampling of multiple values faster; if only one sample from
+/// the slice is required, [`SliceRandom::choose`] can be more efficient.
+///
+/// Steps are taken to avoid bias which might be present in naive
+/// implementations; for example `slice[rng.gen() % slice.len()]` samples from
+/// the slice, but may be more likely to select numbers in the low range than
+/// other values.
+///
+/// This distribution samples with replacement; each sample is independent.
+/// Sampling without replacement requires state to be retained, and therefore
+/// cannot be handled by a distribution; you should instead consider methods
+/// on [`SliceRandom`], such as [`SliceRandom::choose_multiple`].
+///
+/// # Example
+///
+/// ```
+/// use rand::Rng;
+/// use rand::distributions::Slice;
+///
+/// let vowels = ['a', 'e', 'i', 'o', 'u'];
+/// let vowels_dist = Slice::new(&vowels).unwrap();
+/// let rng = rand::thread_rng();
+///
+/// // build a string of 10 vowels
+/// let vowel_string: String = rng
+///     .sample_iter(&vowels_dist)
+///     .take(10)
+///     .collect();
+///
+/// println!("{}", vowel_string);
+/// assert_eq!(vowel_string.len(), 10);
+/// assert!(vowel_string.chars().all(|c| vowels.contains(&c)));
+/// ```
+///
+/// For a single sample, [`SliceRandom::choose`][crate::seq::SliceRandom::choose]
+/// may be preferred:
+///
+/// ```
+/// use rand::seq::SliceRandom;
+///
+/// let vowels = ['a', 'e', 'i', 'o', 'u'];
+/// let mut rng = rand::thread_rng();
+///
+/// println!("{}", vowels.choose(&mut rng).unwrap())
+/// ```
+///
+/// [`SliceRandom`]: crate::seq::SliceRandom
+/// [`SliceRandom::choose`]: crate::seq::SliceRandom::choose
+/// [`SliceRandom::choose_multiple`]: crate::seq::SliceRandom::choose_multiple
+#[derive(Debug, Clone, Copy)]
+pub struct Slice<'a, T> {
+    slice: &'a [T],
+    range: Uniform<usize>,
+}
+
+impl<'a, T> Slice<'a, T> {
+    /// Create a new `Slice` instance which samples uniformly from the slice.
+    /// Returns `Err` if the slice is empty.
+    pub fn new(slice: &'a [T]) -> Result<Self, EmptySlice> {
+        match slice.len() {
+            0 => Err(EmptySlice),
+            len => Ok(Self {
+                slice,
+                range: Uniform::new(0, len),
+            }),
+        }
+    }
+}
+
+impl<'a, T> Distribution<&'a T> for Slice<'a, T> {
+    fn sample<R: crate::Rng + ?Sized>(&self, rng: &mut R) -> &'a T {
+        let idx = self.range.sample(rng);
+
+        debug_assert!(
+            idx < self.slice.len(),
+            "Uniform::new(0, {}) somehow returned {}",
+            self.slice.len(),
+            idx
+        );
+
+        // Safety: at construction time, it was ensured that the slice was
+        // non-empty, and that the `Uniform` range produces values in range
+        // for the slice
+        unsafe { self.slice.get_unchecked(idx) }
+    }
+}
+
+/// Error type indicating that a [`Slice`] distribution was improperly
+/// constructed with an empty slice.
+#[derive(Debug, Clone, Copy)]
+pub struct EmptySlice;
+
+impl core::fmt::Display for EmptySlice {
+    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+        write!(
+            f,
+            "Tried to create a `distributions::Slice` with an empty slice"
+        )
+    }
+}
+
+#[cfg(feature = "std")]
+impl std::error::Error for EmptySlice {}
+
+
\ No newline at end of file diff --git a/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/uniform.rs.html b/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/uniform.rs.html new file mode 100644 index 0000000..72d0ba1 --- /dev/null +++ b/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/uniform.rs.html @@ -0,0 +1,3319 @@ +uniform.rs - source
1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
+97
+98
+99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
+441
+442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
+461
+462
+463
+464
+465
+466
+467
+468
+469
+470
+471
+472
+473
+474
+475
+476
+477
+478
+479
+480
+481
+482
+483
+484
+485
+486
+487
+488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
+502
+503
+504
+505
+506
+507
+508
+509
+510
+511
+512
+513
+514
+515
+516
+517
+518
+519
+520
+521
+522
+523
+524
+525
+526
+527
+528
+529
+530
+531
+532
+533
+534
+535
+536
+537
+538
+539
+540
+541
+542
+543
+544
+545
+546
+547
+548
+549
+550
+551
+552
+553
+554
+555
+556
+557
+558
+559
+560
+561
+562
+563
+564
+565
+566
+567
+568
+569
+570
+571
+572
+573
+574
+575
+576
+577
+578
+579
+580
+581
+582
+583
+584
+585
+586
+587
+588
+589
+590
+591
+592
+593
+594
+595
+596
+597
+598
+599
+600
+601
+602
+603
+604
+605
+606
+607
+608
+609
+610
+611
+612
+613
+614
+615
+616
+617
+618
+619
+620
+621
+622
+623
+624
+625
+626
+627
+628
+629
+630
+631
+632
+633
+634
+635
+636
+637
+638
+639
+640
+641
+642
+643
+644
+645
+646
+647
+648
+649
+650
+651
+652
+653
+654
+655
+656
+657
+658
+659
+660
+661
+662
+663
+664
+665
+666
+667
+668
+669
+670
+671
+672
+673
+674
+675
+676
+677
+678
+679
+680
+681
+682
+683
+684
+685
+686
+687
+688
+689
+690
+691
+692
+693
+694
+695
+696
+697
+698
+699
+700
+701
+702
+703
+704
+705
+706
+707
+708
+709
+710
+711
+712
+713
+714
+715
+716
+717
+718
+719
+720
+721
+722
+723
+724
+725
+726
+727
+728
+729
+730
+731
+732
+733
+734
+735
+736
+737
+738
+739
+740
+741
+742
+743
+744
+745
+746
+747
+748
+749
+750
+751
+752
+753
+754
+755
+756
+757
+758
+759
+760
+761
+762
+763
+764
+765
+766
+767
+768
+769
+770
+771
+772
+773
+774
+775
+776
+777
+778
+779
+780
+781
+782
+783
+784
+785
+786
+787
+788
+789
+790
+791
+792
+793
+794
+795
+796
+797
+798
+799
+800
+801
+802
+803
+804
+805
+806
+807
+808
+809
+810
+811
+812
+813
+814
+815
+816
+817
+818
+819
+820
+821
+822
+823
+824
+825
+826
+827
+828
+829
+830
+831
+832
+833
+834
+835
+836
+837
+838
+839
+840
+841
+842
+843
+844
+845
+846
+847
+848
+849
+850
+851
+852
+853
+854
+855
+856
+857
+858
+859
+860
+861
+862
+863
+864
+865
+866
+867
+868
+869
+870
+871
+872
+873
+874
+875
+876
+877
+878
+879
+880
+881
+882
+883
+884
+885
+886
+887
+888
+889
+890
+891
+892
+893
+894
+895
+896
+897
+898
+899
+900
+901
+902
+903
+904
+905
+906
+907
+908
+909
+910
+911
+912
+913
+914
+915
+916
+917
+918
+919
+920
+921
+922
+923
+924
+925
+926
+927
+928
+929
+930
+931
+932
+933
+934
+935
+936
+937
+938
+939
+940
+941
+942
+943
+944
+945
+946
+947
+948
+949
+950
+951
+952
+953
+954
+955
+956
+957
+958
+959
+960
+961
+962
+963
+964
+965
+966
+967
+968
+969
+970
+971
+972
+973
+974
+975
+976
+977
+978
+979
+980
+981
+982
+983
+984
+985
+986
+987
+988
+989
+990
+991
+992
+993
+994
+995
+996
+997
+998
+999
+1000
+1001
+1002
+1003
+1004
+1005
+1006
+1007
+1008
+1009
+1010
+1011
+1012
+1013
+1014
+1015
+1016
+1017
+1018
+1019
+1020
+1021
+1022
+1023
+1024
+1025
+1026
+1027
+1028
+1029
+1030
+1031
+1032
+1033
+1034
+1035
+1036
+1037
+1038
+1039
+1040
+1041
+1042
+1043
+1044
+1045
+1046
+1047
+1048
+1049
+1050
+1051
+1052
+1053
+1054
+1055
+1056
+1057
+1058
+1059
+1060
+1061
+1062
+1063
+1064
+1065
+1066
+1067
+1068
+1069
+1070
+1071
+1072
+1073
+1074
+1075
+1076
+1077
+1078
+1079
+1080
+1081
+1082
+1083
+1084
+1085
+1086
+1087
+1088
+1089
+1090
+1091
+1092
+1093
+1094
+1095
+1096
+1097
+1098
+1099
+1100
+1101
+1102
+1103
+1104
+1105
+1106
+1107
+1108
+1109
+1110
+1111
+1112
+1113
+1114
+1115
+1116
+1117
+1118
+1119
+1120
+1121
+1122
+1123
+1124
+1125
+1126
+1127
+1128
+1129
+1130
+1131
+1132
+1133
+1134
+1135
+1136
+1137
+1138
+1139
+1140
+1141
+1142
+1143
+1144
+1145
+1146
+1147
+1148
+1149
+1150
+1151
+1152
+1153
+1154
+1155
+1156
+1157
+1158
+1159
+1160
+1161
+1162
+1163
+1164
+1165
+1166
+1167
+1168
+1169
+1170
+1171
+1172
+1173
+1174
+1175
+1176
+1177
+1178
+1179
+1180
+1181
+1182
+1183
+1184
+1185
+1186
+1187
+1188
+1189
+1190
+1191
+1192
+1193
+1194
+1195
+1196
+1197
+1198
+1199
+1200
+1201
+1202
+1203
+1204
+1205
+1206
+1207
+1208
+1209
+1210
+1211
+1212
+1213
+1214
+1215
+1216
+1217
+1218
+1219
+1220
+1221
+1222
+1223
+1224
+1225
+1226
+1227
+1228
+1229
+1230
+1231
+1232
+1233
+1234
+1235
+1236
+1237
+1238
+1239
+1240
+1241
+1242
+1243
+1244
+1245
+1246
+1247
+1248
+1249
+1250
+1251
+1252
+1253
+1254
+1255
+1256
+1257
+1258
+1259
+1260
+1261
+1262
+1263
+1264
+1265
+1266
+1267
+1268
+1269
+1270
+1271
+1272
+1273
+1274
+1275
+1276
+1277
+1278
+1279
+1280
+1281
+1282
+1283
+1284
+1285
+1286
+1287
+1288
+1289
+1290
+1291
+1292
+1293
+1294
+1295
+1296
+1297
+1298
+1299
+1300
+1301
+1302
+1303
+1304
+1305
+1306
+1307
+1308
+1309
+1310
+1311
+1312
+1313
+1314
+1315
+1316
+1317
+1318
+1319
+1320
+1321
+1322
+1323
+1324
+1325
+1326
+1327
+1328
+1329
+1330
+1331
+1332
+1333
+1334
+1335
+1336
+1337
+1338
+1339
+1340
+1341
+1342
+1343
+1344
+1345
+1346
+1347
+1348
+1349
+1350
+1351
+1352
+1353
+1354
+1355
+1356
+1357
+1358
+1359
+1360
+1361
+1362
+1363
+1364
+1365
+1366
+1367
+1368
+1369
+1370
+1371
+1372
+1373
+1374
+1375
+1376
+1377
+1378
+1379
+1380
+1381
+1382
+1383
+1384
+1385
+1386
+1387
+1388
+1389
+1390
+1391
+1392
+1393
+1394
+1395
+1396
+1397
+1398
+1399
+1400
+1401
+1402
+1403
+1404
+1405
+1406
+1407
+1408
+1409
+1410
+1411
+1412
+1413
+1414
+1415
+1416
+1417
+1418
+1419
+1420
+1421
+1422
+1423
+1424
+1425
+1426
+1427
+1428
+1429
+1430
+1431
+1432
+1433
+1434
+1435
+1436
+1437
+1438
+1439
+1440
+1441
+1442
+1443
+1444
+1445
+1446
+1447
+1448
+1449
+1450
+1451
+1452
+1453
+1454
+1455
+1456
+1457
+1458
+1459
+1460
+1461
+1462
+1463
+1464
+1465
+1466
+1467
+1468
+1469
+1470
+1471
+1472
+1473
+1474
+1475
+1476
+1477
+1478
+1479
+1480
+1481
+1482
+1483
+1484
+1485
+1486
+1487
+1488
+1489
+1490
+1491
+1492
+1493
+1494
+1495
+1496
+1497
+1498
+1499
+1500
+1501
+1502
+1503
+1504
+1505
+1506
+1507
+1508
+1509
+1510
+1511
+1512
+1513
+1514
+1515
+1516
+1517
+1518
+1519
+1520
+1521
+1522
+1523
+1524
+1525
+1526
+1527
+1528
+1529
+1530
+1531
+1532
+1533
+1534
+1535
+1536
+1537
+1538
+1539
+1540
+1541
+1542
+1543
+1544
+1545
+1546
+1547
+1548
+1549
+1550
+1551
+1552
+1553
+1554
+1555
+1556
+1557
+1558
+1559
+1560
+1561
+1562
+1563
+1564
+1565
+1566
+1567
+1568
+1569
+1570
+1571
+1572
+1573
+1574
+1575
+1576
+1577
+1578
+1579
+1580
+1581
+1582
+1583
+1584
+1585
+1586
+1587
+1588
+1589
+1590
+1591
+1592
+1593
+1594
+1595
+1596
+1597
+1598
+1599
+1600
+1601
+1602
+1603
+1604
+1605
+1606
+1607
+1608
+1609
+1610
+1611
+1612
+1613
+1614
+1615
+1616
+1617
+1618
+1619
+1620
+1621
+1622
+1623
+1624
+1625
+1626
+1627
+1628
+1629
+1630
+1631
+1632
+1633
+1634
+1635
+1636
+1637
+1638
+1639
+1640
+1641
+1642
+1643
+1644
+1645
+1646
+1647
+1648
+1649
+1650
+1651
+1652
+1653
+1654
+1655
+1656
+1657
+1658
+
// Copyright 2018-2020 Developers of the Rand project.
+// Copyright 2017 The Rust Project Developers.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! A distribution uniformly sampling numbers within a given range.
+//!
+//! [`Uniform`] is the standard distribution to sample uniformly from a range;
+//! e.g. `Uniform::new_inclusive(1, 6)` can sample integers from 1 to 6, like a
+//! standard die. [`Rng::gen_range`] supports any type supported by
+//! [`Uniform`].
+//!
+//! This distribution is provided with support for several primitive types
+//! (all integer and floating-point types) as well as [`std::time::Duration`],
+//! and supports extension to user-defined types via a type-specific *back-end*
+//! implementation.
+//!
+//! The types [`UniformInt`], [`UniformFloat`] and [`UniformDuration`] are the
+//! back-ends supporting sampling from primitive integer and floating-point
+//! ranges as well as from [`std::time::Duration`]; these types do not normally
+//! need to be used directly (unless implementing a derived back-end).
+//!
+//! # Example usage
+//!
+//! ```
+//! use rand::{Rng, thread_rng};
+//! use rand::distributions::Uniform;
+//!
+//! let mut rng = thread_rng();
+//! let side = Uniform::new(-10.0, 10.0);
+//!
+//! // sample between 1 and 10 points
+//! for _ in 0..rng.gen_range(1..=10) {
+//!     // sample a point from the square with sides -10 - 10 in two dimensions
+//!     let (x, y) = (rng.sample(side), rng.sample(side));
+//!     println!("Point: {}, {}", x, y);
+//! }
+//! ```
+//!
+//! # Extending `Uniform` to support a custom type
+//!
+//! To extend [`Uniform`] to support your own types, write a back-end which
+//! implements the [`UniformSampler`] trait, then implement the [`SampleUniform`]
+//! helper trait to "register" your back-end. See the `MyF32` example below.
+//!
+//! At a minimum, the back-end needs to store any parameters needed for sampling
+//! (e.g. the target range) and implement `new`, `new_inclusive` and `sample`.
+//! Those methods should include an assert to check the range is valid (i.e.
+//! `low < high`). The example below merely wraps another back-end.
+//!
+//! The `new`, `new_inclusive` and `sample_single` functions use arguments of
+//! type SampleBorrow<X> in order to support passing in values by reference or
+//! by value. In the implementation of these functions, you can choose to
+//! simply use the reference returned by [`SampleBorrow::borrow`], or you can choose
+//! to copy or clone the value, whatever is appropriate for your type.
+//!
+//! ```
+//! use rand::prelude::*;
+//! use rand::distributions::uniform::{Uniform, SampleUniform,
+//!         UniformSampler, UniformFloat, SampleBorrow};
+//!
+//! struct MyF32(f32);
+//!
+//! #[derive(Clone, Copy, Debug)]
+//! struct UniformMyF32(UniformFloat<f32>);
+//!
+//! impl UniformSampler for UniformMyF32 {
+//!     type X = MyF32;
+//!     fn new<B1, B2>(low: B1, high: B2) -> Self
+//!         where B1: SampleBorrow<Self::X> + Sized,
+//!               B2: SampleBorrow<Self::X> + Sized
+//!     {
+//!         UniformMyF32(UniformFloat::<f32>::new(low.borrow().0, high.borrow().0))
+//!     }
+//!     fn new_inclusive<B1, B2>(low: B1, high: B2) -> Self
+//!         where B1: SampleBorrow<Self::X> + Sized,
+//!               B2: SampleBorrow<Self::X> + Sized
+//!     {
+//!         UniformMyF32(UniformFloat::<f32>::new_inclusive(
+//!             low.borrow().0,
+//!             high.borrow().0,
+//!         ))
+//!     }
+//!     fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
+//!         MyF32(self.0.sample(rng))
+//!     }
+//! }
+//!
+//! impl SampleUniform for MyF32 {
+//!     type Sampler = UniformMyF32;
+//! }
+//!
+//! let (low, high) = (MyF32(17.0f32), MyF32(22.0f32));
+//! let uniform = Uniform::new(low, high);
+//! let x = uniform.sample(&mut thread_rng());
+//! ```
+//!
+//! [`SampleUniform`]: crate::distributions::uniform::SampleUniform
+//! [`UniformSampler`]: crate::distributions::uniform::UniformSampler
+//! [`UniformInt`]: crate::distributions::uniform::UniformInt
+//! [`UniformFloat`]: crate::distributions::uniform::UniformFloat
+//! [`UniformDuration`]: crate::distributions::uniform::UniformDuration
+//! [`SampleBorrow::borrow`]: crate::distributions::uniform::SampleBorrow::borrow
+
+use core::time::Duration;
+use core::ops::{Range, RangeInclusive};
+
+use crate::distributions::float::IntoFloat;
+use crate::distributions::utils::{BoolAsSIMD, FloatAsSIMD, FloatSIMDUtils, WideningMultiply};
+use crate::distributions::Distribution;
+use crate::{Rng, RngCore};
+
+#[cfg(not(feature = "std"))]
+#[allow(unused_imports)] // rustc doesn't detect that this is actually used
+use crate::distributions::utils::Float;
+
+#[cfg(feature = "simd_support")] use packed_simd::*;
+
+#[cfg(feature = "serde1")]
+use serde::{Serialize, Deserialize};
+
+/// Sample values uniformly between two bounds.
+///
+/// [`Uniform::new`] and [`Uniform::new_inclusive`] construct a uniform
+/// distribution sampling from the given range; these functions may do extra
+/// work up front to make sampling of multiple values faster. If only one sample
+/// from the range is required, [`Rng::gen_range`] can be more efficient.
+///
+/// When sampling from a constant range, many calculations can happen at
+/// compile-time and all methods should be fast; for floating-point ranges and
+/// the full range of integer types this should have comparable performance to
+/// the `Standard` distribution.
+///
+/// Steps are taken to avoid bias which might be present in naive
+/// implementations; for example `rng.gen::<u8>() % 170` samples from the range
+/// `[0, 169]` but is twice as likely to select numbers less than 85 than other
+/// values. Further, the implementations here give more weight to the high-bits
+/// generated by the RNG than the low bits, since with some RNGs the low-bits
+/// are of lower quality than the high bits.
+///
+/// Implementations must sample in `[low, high)` range for
+/// `Uniform::new(low, high)`, i.e., excluding `high`. In particular, care must
+/// be taken to ensure that rounding never results values `< low` or `>= high`.
+///
+/// # Example
+///
+/// ```
+/// use rand::distributions::{Distribution, Uniform};
+///
+/// let between = Uniform::from(10..10000);
+/// let mut rng = rand::thread_rng();
+/// let mut sum = 0;
+/// for _ in 0..1000 {
+///     sum += between.sample(&mut rng);
+/// }
+/// println!("{}", sum);
+/// ```
+///
+/// For a single sample, [`Rng::gen_range`] may be preferred:
+///
+/// ```
+/// use rand::Rng;
+///
+/// let mut rng = rand::thread_rng();
+/// println!("{}", rng.gen_range(0..10));
+/// ```
+///
+/// [`new`]: Uniform::new
+/// [`new_inclusive`]: Uniform::new_inclusive
+/// [`Rng::gen_range`]: Rng::gen_range
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
+#[cfg_attr(feature = "serde1", serde(bound(serialize = "X::Sampler: Serialize")))]
+#[cfg_attr(feature = "serde1", serde(bound(deserialize = "X::Sampler: Deserialize<'de>")))]
+pub struct Uniform<X: SampleUniform>(X::Sampler);
+
+impl<X: SampleUniform> Uniform<X> {
+    /// Create a new `Uniform` instance which samples uniformly from the half
+    /// open range `[low, high)` (excluding `high`). Panics if `low >= high`.
+    pub fn new<B1, B2>(low: B1, high: B2) -> Uniform<X>
+    where
+        B1: SampleBorrow<X> + Sized,
+        B2: SampleBorrow<X> + Sized,
+    {
+        Uniform(X::Sampler::new(low, high))
+    }
+
+    /// Create a new `Uniform` instance which samples uniformly from the closed
+    /// range `[low, high]` (inclusive). Panics if `low > high`.
+    pub fn new_inclusive<B1, B2>(low: B1, high: B2) -> Uniform<X>
+    where
+        B1: SampleBorrow<X> + Sized,
+        B2: SampleBorrow<X> + Sized,
+    {
+        Uniform(X::Sampler::new_inclusive(low, high))
+    }
+}
+
+impl<X: SampleUniform> Distribution<X> for Uniform<X> {
+    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> X {
+        self.0.sample(rng)
+    }
+}
+
+/// Helper trait for creating objects using the correct implementation of
+/// [`UniformSampler`] for the sampling type.
+///
+/// See the [module documentation] on how to implement [`Uniform`] range
+/// sampling for a custom type.
+///
+/// [module documentation]: crate::distributions::uniform
+pub trait SampleUniform: Sized {
+    /// The `UniformSampler` implementation supporting type `X`.
+    type Sampler: UniformSampler<X = Self>;
+}
+
+/// Helper trait handling actual uniform sampling.
+///
+/// See the [module documentation] on how to implement [`Uniform`] range
+/// sampling for a custom type.
+///
+/// Implementation of [`sample_single`] is optional, and is only useful when
+/// the implementation can be faster than `Self::new(low, high).sample(rng)`.
+///
+/// [module documentation]: crate::distributions::uniform
+/// [`sample_single`]: UniformSampler::sample_single
+pub trait UniformSampler: Sized {
+    /// The type sampled by this implementation.
+    type X;
+
+    /// Construct self, with inclusive lower bound and exclusive upper bound
+    /// `[low, high)`.
+    ///
+    /// Usually users should not call this directly but instead use
+    /// `Uniform::new`, which asserts that `low < high` before calling this.
+    fn new<B1, B2>(low: B1, high: B2) -> Self
+    where
+        B1: SampleBorrow<Self::X> + Sized,
+        B2: SampleBorrow<Self::X> + Sized;
+
+    /// Construct self, with inclusive bounds `[low, high]`.
+    ///
+    /// Usually users should not call this directly but instead use
+    /// `Uniform::new_inclusive`, which asserts that `low <= high` before
+    /// calling this.
+    fn new_inclusive<B1, B2>(low: B1, high: B2) -> Self
+    where
+        B1: SampleBorrow<Self::X> + Sized,
+        B2: SampleBorrow<Self::X> + Sized;
+
+    /// Sample a value.
+    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X;
+
+    /// Sample a single value uniformly from a range with inclusive lower bound
+    /// and exclusive upper bound `[low, high)`.
+    ///
+    /// By default this is implemented using
+    /// `UniformSampler::new(low, high).sample(rng)`. However, for some types
+    /// more optimal implementations for single usage may be provided via this
+    /// method (which is the case for integers and floats).
+    /// Results may not be identical.
+    ///
+    /// Note that to use this method in a generic context, the type needs to be
+    /// retrieved via `SampleUniform::Sampler` as follows:
+    /// ```
+    /// use rand::{thread_rng, distributions::uniform::{SampleUniform, UniformSampler}};
+    /// # #[allow(unused)]
+    /// fn sample_from_range<T: SampleUniform>(lb: T, ub: T) -> T {
+    ///     let mut rng = thread_rng();
+    ///     <T as SampleUniform>::Sampler::sample_single(lb, ub, &mut rng)
+    /// }
+    /// ```
+    fn sample_single<R: Rng + ?Sized, B1, B2>(low: B1, high: B2, rng: &mut R) -> Self::X
+    where
+        B1: SampleBorrow<Self::X> + Sized,
+        B2: SampleBorrow<Self::X> + Sized,
+    {
+        let uniform: Self = UniformSampler::new(low, high);
+        uniform.sample(rng)
+    }
+
+    /// Sample a single value uniformly from a range with inclusive lower bound
+    /// and inclusive upper bound `[low, high]`.
+    ///
+    /// By default this is implemented using
+    /// `UniformSampler::new_inclusive(low, high).sample(rng)`. However, for
+    /// some types more optimal implementations for single usage may be provided
+    /// via this method.
+    /// Results may not be identical.
+    fn sample_single_inclusive<R: Rng + ?Sized, B1, B2>(low: B1, high: B2, rng: &mut R)
+        -> Self::X
+        where B1: SampleBorrow<Self::X> + Sized,
+              B2: SampleBorrow<Self::X> + Sized
+    {
+        let uniform: Self = UniformSampler::new_inclusive(low, high);
+        uniform.sample(rng)
+    }
+}
+
+impl<X: SampleUniform> From<Range<X>> for Uniform<X> {
+    fn from(r: ::core::ops::Range<X>) -> Uniform<X> {
+        Uniform::new(r.start, r.end)
+    }
+}
+
+impl<X: SampleUniform> From<RangeInclusive<X>> for Uniform<X> {
+    fn from(r: ::core::ops::RangeInclusive<X>) -> Uniform<X> {
+        Uniform::new_inclusive(r.start(), r.end())
+    }
+}
+
+
+/// Helper trait similar to [`Borrow`] but implemented
+/// only for SampleUniform and references to SampleUniform in
+/// order to resolve ambiguity issues.
+///
+/// [`Borrow`]: std::borrow::Borrow
+pub trait SampleBorrow<Borrowed> {
+    /// Immutably borrows from an owned value. See [`Borrow::borrow`]
+    ///
+    /// [`Borrow::borrow`]: std::borrow::Borrow::borrow
+    fn borrow(&self) -> &Borrowed;
+}
+impl<Borrowed> SampleBorrow<Borrowed> for Borrowed
+where Borrowed: SampleUniform
+{
+    #[inline(always)]
+    fn borrow(&self) -> &Borrowed {
+        self
+    }
+}
+impl<'a, Borrowed> SampleBorrow<Borrowed> for &'a Borrowed
+where Borrowed: SampleUniform
+{
+    #[inline(always)]
+    fn borrow(&self) -> &Borrowed {
+        *self
+    }
+}
+
+/// Range that supports generating a single sample efficiently.
+///
+/// Any type implementing this trait can be used to specify the sampled range
+/// for `Rng::gen_range`.
+pub trait SampleRange<T> {
+    /// Generate a sample from the given range.
+    fn sample_single<R: RngCore + ?Sized>(self, rng: &mut R) -> T;
+
+    /// Check whether the range is empty.
+    fn is_empty(&self) -> bool;
+}
+
+impl<T: SampleUniform + PartialOrd> SampleRange<T> for Range<T> {
+    #[inline]
+    fn sample_single<R: RngCore + ?Sized>(self, rng: &mut R) -> T {
+        T::Sampler::sample_single(self.start, self.end, rng)
+    }
+
+    #[inline]
+    fn is_empty(&self) -> bool {
+        !(self.start < self.end)
+    }
+}
+
+impl<T: SampleUniform + PartialOrd> SampleRange<T> for RangeInclusive<T> {
+    #[inline]
+    fn sample_single<R: RngCore + ?Sized>(self, rng: &mut R) -> T {
+        T::Sampler::sample_single_inclusive(self.start(), self.end(), rng)
+    }
+
+    #[inline]
+    fn is_empty(&self) -> bool {
+        !(self.start() <= self.end())
+    }
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+
+// What follows are all back-ends.
+
+
+/// The back-end implementing [`UniformSampler`] for integer types.
+///
+/// Unless you are implementing [`UniformSampler`] for your own type, this type
+/// should not be used directly, use [`Uniform`] instead.
+///
+/// # Implementation notes
+///
+/// For simplicity, we use the same generic struct `UniformInt<X>` for all
+/// integer types `X`. This gives us only one field type, `X`; to store unsigned
+/// values of this size, we take use the fact that these conversions are no-ops.
+///
+/// For a closed range, the number of possible numbers we should generate is
+/// `range = (high - low + 1)`. To avoid bias, we must ensure that the size of
+/// our sample space, `zone`, is a multiple of `range`; other values must be
+/// rejected (by replacing with a new random sample).
+///
+/// As a special case, we use `range = 0` to represent the full range of the
+/// result type (i.e. for `new_inclusive($ty::MIN, $ty::MAX)`).
+///
+/// The optimum `zone` is the largest product of `range` which fits in our
+/// (unsigned) target type. We calculate this by calculating how many numbers we
+/// must reject: `reject = (MAX + 1) % range = (MAX - range + 1) % range`. Any (large)
+/// product of `range` will suffice, thus in `sample_single` we multiply by a
+/// power of 2 via bit-shifting (faster but may cause more rejections).
+///
+/// The smallest integer PRNGs generate is `u32`. For 8- and 16-bit outputs we
+/// use `u32` for our `zone` and samples (because it's not slower and because
+/// it reduces the chance of having to reject a sample). In this case we cannot
+/// store `zone` in the target type since it is too large, however we know
+/// `ints_to_reject < range <= $unsigned::MAX`.
+///
+/// An alternative to using a modulus is widening multiply: After a widening
+/// multiply by `range`, the result is in the high word. Then comparing the low
+/// word against `zone` makes sure our distribution is uniform.
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
+pub struct UniformInt<X> {
+    low: X,
+    range: X,
+    z: X, // either ints_to_reject or zone depending on implementation
+}
+
+macro_rules! uniform_int_impl {
+    ($ty:ty, $unsigned:ident, $u_large:ident) => {
+        impl SampleUniform for $ty {
+            type Sampler = UniformInt<$ty>;
+        }
+
+        impl UniformSampler for UniformInt<$ty> {
+            // We play free and fast with unsigned vs signed here
+            // (when $ty is signed), but that's fine, since the
+            // contract of this macro is for $ty and $unsigned to be
+            // "bit-equal", so casting between them is a no-op.
+
+            type X = $ty;
+
+            #[inline] // if the range is constant, this helps LLVM to do the
+                      // calculations at compile-time.
+            fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
+            where
+                B1: SampleBorrow<Self::X> + Sized,
+                B2: SampleBorrow<Self::X> + Sized,
+            {
+                let low = *low_b.borrow();
+                let high = *high_b.borrow();
+                assert!(low < high, "Uniform::new called with `low >= high`");
+                UniformSampler::new_inclusive(low, high - 1)
+            }
+
+            #[inline] // if the range is constant, this helps LLVM to do the
+                      // calculations at compile-time.
+            fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
+            where
+                B1: SampleBorrow<Self::X> + Sized,
+                B2: SampleBorrow<Self::X> + Sized,
+            {
+                let low = *low_b.borrow();
+                let high = *high_b.borrow();
+                assert!(
+                    low <= high,
+                    "Uniform::new_inclusive called with `low > high`"
+                );
+                let unsigned_max = ::core::$u_large::MAX;
+
+                let range = high.wrapping_sub(low).wrapping_add(1) as $unsigned;
+                let ints_to_reject = if range > 0 {
+                    let range = $u_large::from(range);
+                    (unsigned_max - range + 1) % range
+                } else {
+                    0
+                };
+
+                UniformInt {
+                    low,
+                    // These are really $unsigned values, but store as $ty:
+                    range: range as $ty,
+                    z: ints_to_reject as $unsigned as $ty,
+                }
+            }
+
+            #[inline]
+            fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
+                let range = self.range as $unsigned as $u_large;
+                if range > 0 {
+                    let unsigned_max = ::core::$u_large::MAX;
+                    let zone = unsigned_max - (self.z as $unsigned as $u_large);
+                    loop {
+                        let v: $u_large = rng.gen();
+                        let (hi, lo) = v.wmul(range);
+                        if lo <= zone {
+                            return self.low.wrapping_add(hi as $ty);
+                        }
+                    }
+                } else {
+                    // Sample from the entire integer range.
+                    rng.gen()
+                }
+            }
+
+            #[inline]
+            fn sample_single<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
+            where
+                B1: SampleBorrow<Self::X> + Sized,
+                B2: SampleBorrow<Self::X> + Sized,
+            {
+                let low = *low_b.borrow();
+                let high = *high_b.borrow();
+                assert!(low < high, "UniformSampler::sample_single: low >= high");
+                Self::sample_single_inclusive(low, high - 1, rng)
+            }
+
+            #[inline]
+            fn sample_single_inclusive<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
+            where
+                B1: SampleBorrow<Self::X> + Sized,
+                B2: SampleBorrow<Self::X> + Sized,
+            {
+                let low = *low_b.borrow();
+                let high = *high_b.borrow();
+                assert!(low <= high, "UniformSampler::sample_single_inclusive: low > high");
+                let range = high.wrapping_sub(low).wrapping_add(1) as $unsigned as $u_large;
+                // If the above resulted in wrap-around to 0, the range is $ty::MIN..=$ty::MAX,
+                // and any integer will do.
+                if range == 0 {
+                    return rng.gen();
+                }
+
+                let zone = if ::core::$unsigned::MAX <= ::core::u16::MAX as $unsigned {
+                    // Using a modulus is faster than the approximation for
+                    // i8 and i16. I suppose we trade the cost of one
+                    // modulus for near-perfect branch prediction.
+                    let unsigned_max: $u_large = ::core::$u_large::MAX;
+                    let ints_to_reject = (unsigned_max - range + 1) % range;
+                    unsigned_max - ints_to_reject
+                } else {
+                    // conservative but fast approximation. `- 1` is necessary to allow the
+                    // same comparison without bias.
+                    (range << range.leading_zeros()).wrapping_sub(1)
+                };
+
+                loop {
+                    let v: $u_large = rng.gen();
+                    let (hi, lo) = v.wmul(range);
+                    if lo <= zone {
+                        return low.wrapping_add(hi as $ty);
+                    }
+                }
+            }
+        }
+    };
+}
+
+uniform_int_impl! { i8, u8, u32 }
+uniform_int_impl! { i16, u16, u32 }
+uniform_int_impl! { i32, u32, u32 }
+uniform_int_impl! { i64, u64, u64 }
+uniform_int_impl! { i128, u128, u128 }
+uniform_int_impl! { isize, usize, usize }
+uniform_int_impl! { u8, u8, u32 }
+uniform_int_impl! { u16, u16, u32 }
+uniform_int_impl! { u32, u32, u32 }
+uniform_int_impl! { u64, u64, u64 }
+uniform_int_impl! { usize, usize, usize }
+uniform_int_impl! { u128, u128, u128 }
+
+#[cfg(feature = "simd_support")]
+macro_rules! uniform_simd_int_impl {
+    ($ty:ident, $unsigned:ident, $u_scalar:ident) => {
+        // The "pick the largest zone that can fit in an `u32`" optimization
+        // is less useful here. Multiple lanes complicate things, we don't
+        // know the PRNG's minimal output size, and casting to a larger vector
+        // is generally a bad idea for SIMD performance. The user can still
+        // implement it manually.
+
+        // TODO: look into `Uniform::<u32x4>::new(0u32, 100)` functionality
+        //       perhaps `impl SampleUniform for $u_scalar`?
+        impl SampleUniform for $ty {
+            type Sampler = UniformInt<$ty>;
+        }
+
+        impl UniformSampler for UniformInt<$ty> {
+            type X = $ty;
+
+            #[inline] // if the range is constant, this helps LLVM to do the
+                      // calculations at compile-time.
+            fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
+                where B1: SampleBorrow<Self::X> + Sized,
+                      B2: SampleBorrow<Self::X> + Sized
+            {
+                let low = *low_b.borrow();
+                let high = *high_b.borrow();
+                assert!(low.lt(high).all(), "Uniform::new called with `low >= high`");
+                UniformSampler::new_inclusive(low, high - 1)
+            }
+
+            #[inline] // if the range is constant, this helps LLVM to do the
+                      // calculations at compile-time.
+            fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
+                where B1: SampleBorrow<Self::X> + Sized,
+                      B2: SampleBorrow<Self::X> + Sized
+            {
+                let low = *low_b.borrow();
+                let high = *high_b.borrow();
+                assert!(low.le(high).all(),
+                        "Uniform::new_inclusive called with `low > high`");
+                let unsigned_max = ::core::$u_scalar::MAX;
+
+                // NOTE: these may need to be replaced with explicitly
+                // wrapping operations if `packed_simd` changes
+                let range: $unsigned = ((high - low) + 1).cast();
+                // `% 0` will panic at runtime.
+                let not_full_range = range.gt($unsigned::splat(0));
+                // replacing 0 with `unsigned_max` allows a faster `select`
+                // with bitwise OR
+                let modulo = not_full_range.select(range, $unsigned::splat(unsigned_max));
+                // wrapping addition
+                let ints_to_reject = (unsigned_max - range + 1) % modulo;
+                // When `range` is 0, `lo` of `v.wmul(range)` will always be
+                // zero which means only one sample is needed.
+                let zone = unsigned_max - ints_to_reject;
+
+                UniformInt {
+                    low,
+                    // These are really $unsigned values, but store as $ty:
+                    range: range.cast(),
+                    z: zone.cast(),
+                }
+            }
+
+            fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
+                let range: $unsigned = self.range.cast();
+                let zone: $unsigned = self.z.cast();
+
+                // This might seem very slow, generating a whole new
+                // SIMD vector for every sample rejection. For most uses
+                // though, the chance of rejection is small and provides good
+                // general performance. With multiple lanes, that chance is
+                // multiplied. To mitigate this, we replace only the lanes of
+                // the vector which fail, iteratively reducing the chance of
+                // rejection. The replacement method does however add a little
+                // overhead. Benchmarking or calculating probabilities might
+                // reveal contexts where this replacement method is slower.
+                let mut v: $unsigned = rng.gen();
+                loop {
+                    let (hi, lo) = v.wmul(range);
+                    let mask = lo.le(zone);
+                    if mask.all() {
+                        let hi: $ty = hi.cast();
+                        // wrapping addition
+                        let result = self.low + hi;
+                        // `select` here compiles to a blend operation
+                        // When `range.eq(0).none()` the compare and blend
+                        // operations are avoided.
+                        let v: $ty = v.cast();
+                        return range.gt($unsigned::splat(0)).select(result, v);
+                    }
+                    // Replace only the failing lanes
+                    v = mask.select(v, rng.gen());
+                }
+            }
+        }
+    };
+
+    // bulk implementation
+    ($(($unsigned:ident, $signed:ident),)+ $u_scalar:ident) => {
+        $(
+            uniform_simd_int_impl!($unsigned, $unsigned, $u_scalar);
+            uniform_simd_int_impl!($signed, $unsigned, $u_scalar);
+        )+
+    };
+}
+
+#[cfg(feature = "simd_support")]
+uniform_simd_int_impl! {
+    (u64x2, i64x2),
+    (u64x4, i64x4),
+    (u64x8, i64x8),
+    u64
+}
+
+#[cfg(feature = "simd_support")]
+uniform_simd_int_impl! {
+    (u32x2, i32x2),
+    (u32x4, i32x4),
+    (u32x8, i32x8),
+    (u32x16, i32x16),
+    u32
+}
+
+#[cfg(feature = "simd_support")]
+uniform_simd_int_impl! {
+    (u16x2, i16x2),
+    (u16x4, i16x4),
+    (u16x8, i16x8),
+    (u16x16, i16x16),
+    (u16x32, i16x32),
+    u16
+}
+
+#[cfg(feature = "simd_support")]
+uniform_simd_int_impl! {
+    (u8x2, i8x2),
+    (u8x4, i8x4),
+    (u8x8, i8x8),
+    (u8x16, i8x16),
+    (u8x32, i8x32),
+    (u8x64, i8x64),
+    u8
+}
+
+impl SampleUniform for char {
+    type Sampler = UniformChar;
+}
+
+/// The back-end implementing [`UniformSampler`] for `char`.
+///
+/// Unless you are implementing [`UniformSampler`] for your own type, this type
+/// should not be used directly, use [`Uniform`] instead.
+///
+/// This differs from integer range sampling since the range `0xD800..=0xDFFF`
+/// are used for surrogate pairs in UCS and UTF-16, and consequently are not
+/// valid Unicode code points. We must therefore avoid sampling values in this
+/// range.
+#[derive(Clone, Copy, Debug)]
+#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
+pub struct UniformChar {
+    sampler: UniformInt<u32>,
+}
+
+/// UTF-16 surrogate range start
+const CHAR_SURROGATE_START: u32 = 0xD800;
+/// UTF-16 surrogate range size
+const CHAR_SURROGATE_LEN: u32 = 0xE000 - CHAR_SURROGATE_START;
+
+/// Convert `char` to compressed `u32`
+fn char_to_comp_u32(c: char) -> u32 {
+    match c as u32 {
+        c if c >= CHAR_SURROGATE_START => c - CHAR_SURROGATE_LEN,
+        c => c,
+    }
+}
+
+impl UniformSampler for UniformChar {
+    type X = char;
+
+    #[inline] // if the range is constant, this helps LLVM to do the
+              // calculations at compile-time.
+    fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
+    where
+        B1: SampleBorrow<Self::X> + Sized,
+        B2: SampleBorrow<Self::X> + Sized,
+    {
+        let low = char_to_comp_u32(*low_b.borrow());
+        let high = char_to_comp_u32(*high_b.borrow());
+        let sampler = UniformInt::<u32>::new(low, high);
+        UniformChar { sampler }
+    }
+
+    #[inline] // if the range is constant, this helps LLVM to do the
+              // calculations at compile-time.
+    fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
+    where
+        B1: SampleBorrow<Self::X> + Sized,
+        B2: SampleBorrow<Self::X> + Sized,
+    {
+        let low = char_to_comp_u32(*low_b.borrow());
+        let high = char_to_comp_u32(*high_b.borrow());
+        let sampler = UniformInt::<u32>::new_inclusive(low, high);
+        UniformChar { sampler }
+    }
+
+    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
+        let mut x = self.sampler.sample(rng);
+        if x >= CHAR_SURROGATE_START {
+            x += CHAR_SURROGATE_LEN;
+        }
+        // SAFETY: x must not be in surrogate range or greater than char::MAX.
+        // This relies on range constructors which accept char arguments.
+        // Validity of input char values is assumed.
+        unsafe { core::char::from_u32_unchecked(x) }
+    }
+}
+
+/// The back-end implementing [`UniformSampler`] for floating-point types.
+///
+/// Unless you are implementing [`UniformSampler`] for your own type, this type
+/// should not be used directly, use [`Uniform`] instead.
+///
+/// # Implementation notes
+///
+/// Instead of generating a float in the `[0, 1)` range using [`Standard`], the
+/// `UniformFloat` implementation converts the output of an PRNG itself. This
+/// way one or two steps can be optimized out.
+///
+/// The floats are first converted to a value in the `[1, 2)` interval using a
+/// transmute-based method, and then mapped to the expected range with a
+/// multiply and addition. Values produced this way have what equals 23 bits of
+/// random digits for an `f32`, and 52 for an `f64`.
+///
+/// [`new`]: UniformSampler::new
+/// [`new_inclusive`]: UniformSampler::new_inclusive
+/// [`Standard`]: crate::distributions::Standard
+#[derive(Clone, Copy, Debug, PartialEq)]
+#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
+pub struct UniformFloat<X> {
+    low: X,
+    scale: X,
+}
+
+macro_rules! uniform_float_impl {
+    ($ty:ty, $uty:ident, $f_scalar:ident, $u_scalar:ident, $bits_to_discard:expr) => {
+        impl SampleUniform for $ty {
+            type Sampler = UniformFloat<$ty>;
+        }
+
+        impl UniformSampler for UniformFloat<$ty> {
+            type X = $ty;
+
+            fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
+            where
+                B1: SampleBorrow<Self::X> + Sized,
+                B2: SampleBorrow<Self::X> + Sized,
+            {
+                let low = *low_b.borrow();
+                let high = *high_b.borrow();
+                debug_assert!(
+                    low.all_finite(),
+                    "Uniform::new called with `low` non-finite."
+                );
+                debug_assert!(
+                    high.all_finite(),
+                    "Uniform::new called with `high` non-finite."
+                );
+                assert!(low.all_lt(high), "Uniform::new called with `low >= high`");
+                let max_rand = <$ty>::splat(
+                    (::core::$u_scalar::MAX >> $bits_to_discard).into_float_with_exponent(0) - 1.0,
+                );
+
+                let mut scale = high - low;
+                assert!(scale.all_finite(), "Uniform::new: range overflow");
+
+                loop {
+                    let mask = (scale * max_rand + low).ge_mask(high);
+                    if mask.none() {
+                        break;
+                    }
+                    scale = scale.decrease_masked(mask);
+                }
+
+                debug_assert!(<$ty>::splat(0.0).all_le(scale));
+
+                UniformFloat { low, scale }
+            }
+
+            fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
+            where
+                B1: SampleBorrow<Self::X> + Sized,
+                B2: SampleBorrow<Self::X> + Sized,
+            {
+                let low = *low_b.borrow();
+                let high = *high_b.borrow();
+                debug_assert!(
+                    low.all_finite(),
+                    "Uniform::new_inclusive called with `low` non-finite."
+                );
+                debug_assert!(
+                    high.all_finite(),
+                    "Uniform::new_inclusive called with `high` non-finite."
+                );
+                assert!(
+                    low.all_le(high),
+                    "Uniform::new_inclusive called with `low > high`"
+                );
+                let max_rand = <$ty>::splat(
+                    (::core::$u_scalar::MAX >> $bits_to_discard).into_float_with_exponent(0) - 1.0,
+                );
+
+                let mut scale = (high - low) / max_rand;
+                assert!(scale.all_finite(), "Uniform::new_inclusive: range overflow");
+
+                loop {
+                    let mask = (scale * max_rand + low).gt_mask(high);
+                    if mask.none() {
+                        break;
+                    }
+                    scale = scale.decrease_masked(mask);
+                }
+
+                debug_assert!(<$ty>::splat(0.0).all_le(scale));
+
+                UniformFloat { low, scale }
+            }
+
+            fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
+                // Generate a value in the range [1, 2)
+                let value1_2 = (rng.gen::<$uty>() >> $bits_to_discard).into_float_with_exponent(0);
+
+                // Get a value in the range [0, 1) in order to avoid
+                // overflowing into infinity when multiplying with scale
+                let value0_1 = value1_2 - 1.0;
+
+                // We don't use `f64::mul_add`, because it is not available with
+                // `no_std`. Furthermore, it is slower for some targets (but
+                // faster for others). However, the order of multiplication and
+                // addition is important, because on some platforms (e.g. ARM)
+                // it will be optimized to a single (non-FMA) instruction.
+                value0_1 * self.scale + self.low
+            }
+
+            #[inline]
+            fn sample_single<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
+            where
+                B1: SampleBorrow<Self::X> + Sized,
+                B2: SampleBorrow<Self::X> + Sized,
+            {
+                let low = *low_b.borrow();
+                let high = *high_b.borrow();
+                debug_assert!(
+                    low.all_finite(),
+                    "UniformSampler::sample_single called with `low` non-finite."
+                );
+                debug_assert!(
+                    high.all_finite(),
+                    "UniformSampler::sample_single called with `high` non-finite."
+                );
+                assert!(
+                    low.all_lt(high),
+                    "UniformSampler::sample_single: low >= high"
+                );
+                let mut scale = high - low;
+                assert!(scale.all_finite(), "UniformSampler::sample_single: range overflow");
+
+                loop {
+                    // Generate a value in the range [1, 2)
+                    let value1_2 =
+                        (rng.gen::<$uty>() >> $bits_to_discard).into_float_with_exponent(0);
+
+                    // Get a value in the range [0, 1) in order to avoid
+                    // overflowing into infinity when multiplying with scale
+                    let value0_1 = value1_2 - 1.0;
+
+                    // Doing multiply before addition allows some architectures
+                    // to use a single instruction.
+                    let res = value0_1 * scale + low;
+
+                    debug_assert!(low.all_le(res) || !scale.all_finite());
+                    if res.all_lt(high) {
+                        return res;
+                    }
+
+                    // This handles a number of edge cases.
+                    // * `low` or `high` is NaN. In this case `scale` and
+                    //   `res` are going to end up as NaN.
+                    // * `low` is negative infinity and `high` is finite.
+                    //   `scale` is going to be infinite and `res` will be
+                    //   NaN.
+                    // * `high` is positive infinity and `low` is finite.
+                    //   `scale` is going to be infinite and `res` will
+                    //   be infinite or NaN (if value0_1 is 0).
+                    // * `low` is negative infinity and `high` is positive
+                    //   infinity. `scale` will be infinite and `res` will
+                    //   be NaN.
+                    // * `low` and `high` are finite, but `high - low`
+                    //   overflows to infinite. `scale` will be infinite
+                    //   and `res` will be infinite or NaN (if value0_1 is 0).
+                    // So if `high` or `low` are non-finite, we are guaranteed
+                    // to fail the `res < high` check above and end up here.
+                    //
+                    // While we technically should check for non-finite `low`
+                    // and `high` before entering the loop, by doing the checks
+                    // here instead, we allow the common case to avoid these
+                    // checks. But we are still guaranteed that if `low` or
+                    // `high` are non-finite we'll end up here and can do the
+                    // appropriate checks.
+                    //
+                    // Likewise `high - low` overflowing to infinity is also
+                    // rare, so handle it here after the common case.
+                    let mask = !scale.finite_mask();
+                    if mask.any() {
+                        assert!(
+                            low.all_finite() && high.all_finite(),
+                            "Uniform::sample_single: low and high must be finite"
+                        );
+                        scale = scale.decrease_masked(mask);
+                    }
+                }
+            }
+        }
+    };
+}
+
+uniform_float_impl! { f32, u32, f32, u32, 32 - 23 }
+uniform_float_impl! { f64, u64, f64, u64, 64 - 52 }
+
+#[cfg(feature = "simd_support")]
+uniform_float_impl! { f32x2, u32x2, f32, u32, 32 - 23 }
+#[cfg(feature = "simd_support")]
+uniform_float_impl! { f32x4, u32x4, f32, u32, 32 - 23 }
+#[cfg(feature = "simd_support")]
+uniform_float_impl! { f32x8, u32x8, f32, u32, 32 - 23 }
+#[cfg(feature = "simd_support")]
+uniform_float_impl! { f32x16, u32x16, f32, u32, 32 - 23 }
+
+#[cfg(feature = "simd_support")]
+uniform_float_impl! { f64x2, u64x2, f64, u64, 64 - 52 }
+#[cfg(feature = "simd_support")]
+uniform_float_impl! { f64x4, u64x4, f64, u64, 64 - 52 }
+#[cfg(feature = "simd_support")]
+uniform_float_impl! { f64x8, u64x8, f64, u64, 64 - 52 }
+
+
+/// The back-end implementing [`UniformSampler`] for `Duration`.
+///
+/// Unless you are implementing [`UniformSampler`] for your own types, this type
+/// should not be used directly, use [`Uniform`] instead.
+#[derive(Clone, Copy, Debug)]
+#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
+pub struct UniformDuration {
+    mode: UniformDurationMode,
+    offset: u32,
+}
+
+#[derive(Debug, Copy, Clone)]
+#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
+enum UniformDurationMode {
+    Small {
+        secs: u64,
+        nanos: Uniform<u32>,
+    },
+    Medium {
+        nanos: Uniform<u64>,
+    },
+    Large {
+        max_secs: u64,
+        max_nanos: u32,
+        secs: Uniform<u64>,
+    },
+}
+
+impl SampleUniform for Duration {
+    type Sampler = UniformDuration;
+}
+
+impl UniformSampler for UniformDuration {
+    type X = Duration;
+
+    #[inline]
+    fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
+    where
+        B1: SampleBorrow<Self::X> + Sized,
+        B2: SampleBorrow<Self::X> + Sized,
+    {
+        let low = *low_b.borrow();
+        let high = *high_b.borrow();
+        assert!(low < high, "Uniform::new called with `low >= high`");
+        UniformDuration::new_inclusive(low, high - Duration::new(0, 1))
+    }
+
+    #[inline]
+    fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
+    where
+        B1: SampleBorrow<Self::X> + Sized,
+        B2: SampleBorrow<Self::X> + Sized,
+    {
+        let low = *low_b.borrow();
+        let high = *high_b.borrow();
+        assert!(
+            low <= high,
+            "Uniform::new_inclusive called with `low > high`"
+        );
+
+        let low_s = low.as_secs();
+        let low_n = low.subsec_nanos();
+        let mut high_s = high.as_secs();
+        let mut high_n = high.subsec_nanos();
+
+        if high_n < low_n {
+            high_s -= 1;
+            high_n += 1_000_000_000;
+        }
+
+        let mode = if low_s == high_s {
+            UniformDurationMode::Small {
+                secs: low_s,
+                nanos: Uniform::new_inclusive(low_n, high_n),
+            }
+        } else {
+            let max = high_s
+                .checked_mul(1_000_000_000)
+                .and_then(|n| n.checked_add(u64::from(high_n)));
+
+            if let Some(higher_bound) = max {
+                let lower_bound = low_s * 1_000_000_000 + u64::from(low_n);
+                UniformDurationMode::Medium {
+                    nanos: Uniform::new_inclusive(lower_bound, higher_bound),
+                }
+            } else {
+                // An offset is applied to simplify generation of nanoseconds
+                let max_nanos = high_n - low_n;
+                UniformDurationMode::Large {
+                    max_secs: high_s,
+                    max_nanos,
+                    secs: Uniform::new_inclusive(low_s, high_s),
+                }
+            }
+        };
+        UniformDuration {
+            mode,
+            offset: low_n,
+        }
+    }
+
+    #[inline]
+    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Duration {
+        match self.mode {
+            UniformDurationMode::Small { secs, nanos } => {
+                let n = nanos.sample(rng);
+                Duration::new(secs, n)
+            }
+            UniformDurationMode::Medium { nanos } => {
+                let nanos = nanos.sample(rng);
+                Duration::new(nanos / 1_000_000_000, (nanos % 1_000_000_000) as u32)
+            }
+            UniformDurationMode::Large {
+                max_secs,
+                max_nanos,
+                secs,
+            } => {
+                // constant folding means this is at least as fast as `Rng::sample(Range)`
+                let nano_range = Uniform::new(0, 1_000_000_000);
+                loop {
+                    let s = secs.sample(rng);
+                    let n = nano_range.sample(rng);
+                    if !(s == max_secs && n > max_nanos) {
+                        let sum = n + self.offset;
+                        break Duration::new(s, sum);
+                    }
+                }
+            }
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use crate::rngs::mock::StepRng;
+
+    #[test]
+    #[cfg(feature = "serde1")]
+    fn test_serialization_uniform_duration() {
+        let distr = UniformDuration::new(Duration::from_secs(10), Duration::from_secs(60));
+        let de_distr: UniformDuration = bincode::deserialize(&bincode::serialize(&distr).unwrap()).unwrap();
+        assert_eq!(
+            distr.offset, de_distr.offset
+        );
+        match (distr.mode, de_distr.mode) {
+            (UniformDurationMode::Small {secs: a_secs, nanos: a_nanos}, UniformDurationMode::Small {secs, nanos}) => {
+                assert_eq!(a_secs, secs);
+
+                assert_eq!(a_nanos.0.low, nanos.0.low);
+                assert_eq!(a_nanos.0.range, nanos.0.range);
+                assert_eq!(a_nanos.0.z, nanos.0.z);
+            }
+            (UniformDurationMode::Medium {nanos: a_nanos} , UniformDurationMode::Medium {nanos}) => {
+                assert_eq!(a_nanos.0.low, nanos.0.low);
+                assert_eq!(a_nanos.0.range, nanos.0.range);
+                assert_eq!(a_nanos.0.z, nanos.0.z);
+            }
+            (UniformDurationMode::Large {max_secs:a_max_secs, max_nanos:a_max_nanos, secs:a_secs}, UniformDurationMode::Large {max_secs, max_nanos, secs} ) => {
+                assert_eq!(a_max_secs, max_secs);
+                assert_eq!(a_max_nanos, max_nanos);
+
+                assert_eq!(a_secs.0.low, secs.0.low);
+                assert_eq!(a_secs.0.range, secs.0.range);
+                assert_eq!(a_secs.0.z, secs.0.z);
+            }
+            _ => panic!("`UniformDurationMode` was not serialized/deserialized correctly")
+        }
+    }
+    
+    #[test]
+    #[cfg(feature = "serde1")]
+    fn test_uniform_serialization() {
+        let unit_box: Uniform<i32>  = Uniform::new(-1, 1);
+        let de_unit_box: Uniform<i32> = bincode::deserialize(&bincode::serialize(&unit_box).unwrap()).unwrap();
+
+        assert_eq!(unit_box.0.low, de_unit_box.0.low);
+        assert_eq!(unit_box.0.range, de_unit_box.0.range);
+        assert_eq!(unit_box.0.z, de_unit_box.0.z);
+
+        let unit_box: Uniform<f32> = Uniform::new(-1., 1.);
+        let de_unit_box: Uniform<f32> = bincode::deserialize(&bincode::serialize(&unit_box).unwrap()).unwrap();
+
+        assert_eq!(unit_box.0.low, de_unit_box.0.low);
+        assert_eq!(unit_box.0.scale, de_unit_box.0.scale);
+    }
+
+    #[should_panic]
+    #[test]
+    fn test_uniform_bad_limits_equal_int() {
+        Uniform::new(10, 10);
+    }
+
+    #[test]
+    fn test_uniform_good_limits_equal_int() {
+        let mut rng = crate::test::rng(804);
+        let dist = Uniform::new_inclusive(10, 10);
+        for _ in 0..20 {
+            assert_eq!(rng.sample(dist), 10);
+        }
+    }
+
+    #[should_panic]
+    #[test]
+    fn test_uniform_bad_limits_flipped_int() {
+        Uniform::new(10, 5);
+    }
+
+    #[test]
+    #[cfg_attr(miri, ignore)] // Miri is too slow
+    fn test_integers() {
+        use core::{i128, u128};
+        use core::{i16, i32, i64, i8, isize};
+        use core::{u16, u32, u64, u8, usize};
+
+        let mut rng = crate::test::rng(251);
+        macro_rules! t {
+            ($ty:ident, $v:expr, $le:expr, $lt:expr) => {{
+                for &(low, high) in $v.iter() {
+                    let my_uniform = Uniform::new(low, high);
+                    for _ in 0..1000 {
+                        let v: $ty = rng.sample(my_uniform);
+                        assert!($le(low, v) && $lt(v, high));
+                    }
+
+                    let my_uniform = Uniform::new_inclusive(low, high);
+                    for _ in 0..1000 {
+                        let v: $ty = rng.sample(my_uniform);
+                        assert!($le(low, v) && $le(v, high));
+                    }
+
+                    let my_uniform = Uniform::new(&low, high);
+                    for _ in 0..1000 {
+                        let v: $ty = rng.sample(my_uniform);
+                        assert!($le(low, v) && $lt(v, high));
+                    }
+
+                    let my_uniform = Uniform::new_inclusive(&low, &high);
+                    for _ in 0..1000 {
+                        let v: $ty = rng.sample(my_uniform);
+                        assert!($le(low, v) && $le(v, high));
+                    }
+
+                    for _ in 0..1000 {
+                        let v = <$ty as SampleUniform>::Sampler::sample_single(low, high, &mut rng);
+                        assert!($le(low, v) && $lt(v, high));
+                    }
+
+                    for _ in 0..1000 {
+                        let v = <$ty as SampleUniform>::Sampler::sample_single_inclusive(low, high, &mut rng);
+                        assert!($le(low, v) && $le(v, high));
+                    }
+                }
+            }};
+
+            // scalar bulk
+            ($($ty:ident),*) => {{
+                $(t!(
+                    $ty,
+                    [(0, 10), (10, 127), ($ty::MIN, $ty::MAX)],
+                    |x, y| x <= y,
+                    |x, y| x < y
+                );)*
+            }};
+
+            // simd bulk
+            ($($ty:ident),* => $scalar:ident) => {{
+                $(t!(
+                    $ty,
+                    [
+                        ($ty::splat(0), $ty::splat(10)),
+                        ($ty::splat(10), $ty::splat(127)),
+                        ($ty::splat($scalar::MIN), $ty::splat($scalar::MAX)),
+                    ],
+                    |x: $ty, y| x.le(y).all(),
+                    |x: $ty, y| x.lt(y).all()
+                );)*
+            }};
+        }
+        t!(i8, i16, i32, i64, isize, u8, u16, u32, u64, usize, i128, u128);
+
+        #[cfg(feature = "simd_support")]
+        {
+            t!(u8x2, u8x4, u8x8, u8x16, u8x32, u8x64 => u8);
+            t!(i8x2, i8x4, i8x8, i8x16, i8x32, i8x64 => i8);
+            t!(u16x2, u16x4, u16x8, u16x16, u16x32 => u16);
+            t!(i16x2, i16x4, i16x8, i16x16, i16x32 => i16);
+            t!(u32x2, u32x4, u32x8, u32x16 => u32);
+            t!(i32x2, i32x4, i32x8, i32x16 => i32);
+            t!(u64x2, u64x4, u64x8 => u64);
+            t!(i64x2, i64x4, i64x8 => i64);
+        }
+    }
+
+    #[test]
+    #[cfg_attr(miri, ignore)] // Miri is too slow
+    fn test_char() {
+        let mut rng = crate::test::rng(891);
+        let mut max = core::char::from_u32(0).unwrap();
+        for _ in 0..100 {
+            let c = rng.gen_range('A'..='Z');
+            assert!(('A'..='Z').contains(&c));
+            max = max.max(c);
+        }
+        assert_eq!(max, 'Z');
+        let d = Uniform::new(
+            core::char::from_u32(0xD7F0).unwrap(),
+            core::char::from_u32(0xE010).unwrap(),
+        );
+        for _ in 0..100 {
+            let c = d.sample(&mut rng);
+            assert!((c as u32) < 0xD800 || (c as u32) > 0xDFFF);
+        }
+    }
+
+    #[test]
+    #[cfg_attr(miri, ignore)] // Miri is too slow
+    fn test_floats() {
+        let mut rng = crate::test::rng(252);
+        let mut zero_rng = StepRng::new(0, 0);
+        let mut max_rng = StepRng::new(0xffff_ffff_ffff_ffff, 0);
+        macro_rules! t {
+            ($ty:ty, $f_scalar:ident, $bits_shifted:expr) => {{
+                let v: &[($f_scalar, $f_scalar)] = &[
+                    (0.0, 100.0),
+                    (-1e35, -1e25),
+                    (1e-35, 1e-25),
+                    (-1e35, 1e35),
+                    (<$f_scalar>::from_bits(0), <$f_scalar>::from_bits(3)),
+                    (-<$f_scalar>::from_bits(10), -<$f_scalar>::from_bits(1)),
+                    (-<$f_scalar>::from_bits(5), 0.0),
+                    (-<$f_scalar>::from_bits(7), -0.0),
+                    (0.1 * ::core::$f_scalar::MAX, ::core::$f_scalar::MAX),
+                    (-::core::$f_scalar::MAX * 0.2, ::core::$f_scalar::MAX * 0.7),
+                ];
+                for &(low_scalar, high_scalar) in v.iter() {
+                    for lane in 0..<$ty>::lanes() {
+                        let low = <$ty>::splat(0.0 as $f_scalar).replace(lane, low_scalar);
+                        let high = <$ty>::splat(1.0 as $f_scalar).replace(lane, high_scalar);
+                        let my_uniform = Uniform::new(low, high);
+                        let my_incl_uniform = Uniform::new_inclusive(low, high);
+                        for _ in 0..100 {
+                            let v = rng.sample(my_uniform).extract(lane);
+                            assert!(low_scalar <= v && v < high_scalar);
+                            let v = rng.sample(my_incl_uniform).extract(lane);
+                            assert!(low_scalar <= v && v <= high_scalar);
+                            let v = <$ty as SampleUniform>::Sampler
+                                ::sample_single(low, high, &mut rng).extract(lane);
+                            assert!(low_scalar <= v && v < high_scalar);
+                        }
+
+                        assert_eq!(
+                            rng.sample(Uniform::new_inclusive(low, low)).extract(lane),
+                            low_scalar
+                        );
+
+                        assert_eq!(zero_rng.sample(my_uniform).extract(lane), low_scalar);
+                        assert_eq!(zero_rng.sample(my_incl_uniform).extract(lane), low_scalar);
+                        assert_eq!(<$ty as SampleUniform>::Sampler
+                            ::sample_single(low, high, &mut zero_rng)
+                            .extract(lane), low_scalar);
+                        assert!(max_rng.sample(my_uniform).extract(lane) < high_scalar);
+                        assert!(max_rng.sample(my_incl_uniform).extract(lane) <= high_scalar);
+
+                        // Don't run this test for really tiny differences between high and low
+                        // since for those rounding might result in selecting high for a very
+                        // long time.
+                        if (high_scalar - low_scalar) > 0.0001 {
+                            let mut lowering_max_rng = StepRng::new(
+                                0xffff_ffff_ffff_ffff,
+                                (-1i64 << $bits_shifted) as u64,
+                            );
+                            assert!(
+                                <$ty as SampleUniform>::Sampler
+                                    ::sample_single(low, high, &mut lowering_max_rng)
+                                    .extract(lane) < high_scalar
+                            );
+                        }
+                    }
+                }
+
+                assert_eq!(
+                    rng.sample(Uniform::new_inclusive(
+                        ::core::$f_scalar::MAX,
+                        ::core::$f_scalar::MAX
+                    )),
+                    ::core::$f_scalar::MAX
+                );
+                assert_eq!(
+                    rng.sample(Uniform::new_inclusive(
+                        -::core::$f_scalar::MAX,
+                        -::core::$f_scalar::MAX
+                    )),
+                    -::core::$f_scalar::MAX
+                );
+            }};
+        }
+
+        t!(f32, f32, 32 - 23);
+        t!(f64, f64, 64 - 52);
+        #[cfg(feature = "simd_support")]
+        {
+            t!(f32x2, f32, 32 - 23);
+            t!(f32x4, f32, 32 - 23);
+            t!(f32x8, f32, 32 - 23);
+            t!(f32x16, f32, 32 - 23);
+            t!(f64x2, f64, 64 - 52);
+            t!(f64x4, f64, 64 - 52);
+            t!(f64x8, f64, 64 - 52);
+        }
+    }
+
+    #[test]
+    #[should_panic]
+    fn test_float_overflow() {
+        let _ = Uniform::from(::core::f64::MIN..::core::f64::MAX);
+    }
+
+    #[test]
+    #[should_panic]
+    fn test_float_overflow_single() {
+        let mut rng = crate::test::rng(252);
+        rng.gen_range(::core::f64::MIN..::core::f64::MAX);
+    }
+
+    #[test]
+    #[cfg(all(
+        feature = "std",
+        not(target_arch = "wasm32"),
+        not(target_arch = "asmjs")
+    ))]
+    fn test_float_assertions() {
+        use super::SampleUniform;
+        use std::panic::catch_unwind;
+        fn range<T: SampleUniform>(low: T, high: T) {
+            let mut rng = crate::test::rng(253);
+            T::Sampler::sample_single(low, high, &mut rng);
+        }
+
+        macro_rules! t {
+            ($ty:ident, $f_scalar:ident) => {{
+                let v: &[($f_scalar, $f_scalar)] = &[
+                    (::std::$f_scalar::NAN, 0.0),
+                    (1.0, ::std::$f_scalar::NAN),
+                    (::std::$f_scalar::NAN, ::std::$f_scalar::NAN),
+                    (1.0, 0.5),
+                    (::std::$f_scalar::MAX, -::std::$f_scalar::MAX),
+                    (::std::$f_scalar::INFINITY, ::std::$f_scalar::INFINITY),
+                    (
+                        ::std::$f_scalar::NEG_INFINITY,
+                        ::std::$f_scalar::NEG_INFINITY,
+                    ),
+                    (::std::$f_scalar::NEG_INFINITY, 5.0),
+                    (5.0, ::std::$f_scalar::INFINITY),
+                    (::std::$f_scalar::NAN, ::std::$f_scalar::INFINITY),
+                    (::std::$f_scalar::NEG_INFINITY, ::std::$f_scalar::NAN),
+                    (::std::$f_scalar::NEG_INFINITY, ::std::$f_scalar::INFINITY),
+                ];
+                for &(low_scalar, high_scalar) in v.iter() {
+                    for lane in 0..<$ty>::lanes() {
+                        let low = <$ty>::splat(0.0 as $f_scalar).replace(lane, low_scalar);
+                        let high = <$ty>::splat(1.0 as $f_scalar).replace(lane, high_scalar);
+                        assert!(catch_unwind(|| range(low, high)).is_err());
+                        assert!(catch_unwind(|| Uniform::new(low, high)).is_err());
+                        assert!(catch_unwind(|| Uniform::new_inclusive(low, high)).is_err());
+                        assert!(catch_unwind(|| range(low, low)).is_err());
+                        assert!(catch_unwind(|| Uniform::new(low, low)).is_err());
+                    }
+                }
+            }};
+        }
+
+        t!(f32, f32);
+        t!(f64, f64);
+        #[cfg(feature = "simd_support")]
+        {
+            t!(f32x2, f32);
+            t!(f32x4, f32);
+            t!(f32x8, f32);
+            t!(f32x16, f32);
+            t!(f64x2, f64);
+            t!(f64x4, f64);
+            t!(f64x8, f64);
+        }
+    }
+
+
+    #[test]
+    #[cfg_attr(miri, ignore)] // Miri is too slow
+    fn test_durations() {
+        let mut rng = crate::test::rng(253);
+
+        let v = &[
+            (Duration::new(10, 50000), Duration::new(100, 1234)),
+            (Duration::new(0, 100), Duration::new(1, 50)),
+            (
+                Duration::new(0, 0),
+                Duration::new(u64::max_value(), 999_999_999),
+            ),
+        ];
+        for &(low, high) in v.iter() {
+            let my_uniform = Uniform::new(low, high);
+            for _ in 0..1000 {
+                let v = rng.sample(my_uniform);
+                assert!(low <= v && v < high);
+            }
+        }
+    }
+
+    #[test]
+    fn test_custom_uniform() {
+        use crate::distributions::uniform::{
+            SampleBorrow, SampleUniform, UniformFloat, UniformSampler,
+        };
+        #[derive(Clone, Copy, PartialEq, PartialOrd)]
+        struct MyF32 {
+            x: f32,
+        }
+        #[derive(Clone, Copy, Debug)]
+        struct UniformMyF32(UniformFloat<f32>);
+        impl UniformSampler for UniformMyF32 {
+            type X = MyF32;
+
+            fn new<B1, B2>(low: B1, high: B2) -> Self
+            where
+                B1: SampleBorrow<Self::X> + Sized,
+                B2: SampleBorrow<Self::X> + Sized,
+            {
+                UniformMyF32(UniformFloat::<f32>::new(low.borrow().x, high.borrow().x))
+            }
+
+            fn new_inclusive<B1, B2>(low: B1, high: B2) -> Self
+            where
+                B1: SampleBorrow<Self::X> + Sized,
+                B2: SampleBorrow<Self::X> + Sized,
+            {
+                UniformSampler::new(low, high)
+            }
+
+            fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
+                MyF32 {
+                    x: self.0.sample(rng),
+                }
+            }
+        }
+        impl SampleUniform for MyF32 {
+            type Sampler = UniformMyF32;
+        }
+
+        let (low, high) = (MyF32 { x: 17.0f32 }, MyF32 { x: 22.0f32 });
+        let uniform = Uniform::new(low, high);
+        let mut rng = crate::test::rng(804);
+        for _ in 0..100 {
+            let x: MyF32 = rng.sample(uniform);
+            assert!(low <= x && x < high);
+        }
+    }
+
+    #[test]
+    fn test_uniform_from_std_range() {
+        let r = Uniform::from(2u32..7);
+        assert_eq!(r.0.low, 2);
+        assert_eq!(r.0.range, 5);
+        let r = Uniform::from(2.0f64..7.0);
+        assert_eq!(r.0.low, 2.0);
+        assert_eq!(r.0.scale, 5.0);
+    }
+
+    #[test]
+    fn test_uniform_from_std_range_inclusive() {
+        let r = Uniform::from(2u32..=6);
+        assert_eq!(r.0.low, 2);
+        assert_eq!(r.0.range, 5);
+        let r = Uniform::from(2.0f64..=7.0);
+        assert_eq!(r.0.low, 2.0);
+        assert!(r.0.scale > 5.0);
+        assert!(r.0.scale < 5.0 + 1e-14);
+    }
+
+    #[test]
+    fn value_stability() {
+        fn test_samples<T: SampleUniform + Copy + core::fmt::Debug + PartialEq>(
+            lb: T, ub: T, expected_single: &[T], expected_multiple: &[T],
+        ) where Uniform<T>: Distribution<T> {
+            let mut rng = crate::test::rng(897);
+            let mut buf = [lb; 3];
+
+            for x in &mut buf {
+                *x = T::Sampler::sample_single(lb, ub, &mut rng);
+            }
+            assert_eq!(&buf, expected_single);
+
+            let distr = Uniform::new(lb, ub);
+            for x in &mut buf {
+                *x = rng.sample(&distr);
+            }
+            assert_eq!(&buf, expected_multiple);
+        }
+
+        // We test on a sub-set of types; possibly we should do more.
+        // TODO: SIMD types
+
+        test_samples(11u8, 219, &[17, 66, 214], &[181, 93, 165]);
+        test_samples(11u32, 219, &[17, 66, 214], &[181, 93, 165]);
+
+        test_samples(0f32, 1e-2f32, &[0.0003070104, 0.0026630748, 0.00979833], &[
+            0.008194133,
+            0.00398172,
+            0.007428536,
+        ]);
+        test_samples(
+            -1e10f64,
+            1e10f64,
+            &[-4673848682.871551, 6388267422.932352, 4857075081.198343],
+            &[1173375212.1808167, 1917642852.109581, 2365076174.3153973],
+        );
+
+        test_samples(
+            Duration::new(2, 0),
+            Duration::new(4, 0),
+            &[
+                Duration::new(2, 532615131),
+                Duration::new(3, 638826742),
+                Duration::new(3, 485707508),
+            ],
+            &[
+                Duration::new(3, 117337521),
+                Duration::new(3, 191764285),
+                Duration::new(3, 236507617),
+            ],
+        );
+    }
+
+    #[test]
+    fn uniform_distributions_can_be_compared() {
+        assert_eq!(Uniform::new(1.0, 2.0), Uniform::new(1.0, 2.0));
+
+        // To cover UniformInt
+        assert_eq!(Uniform::new(1 as u32, 2 as u32), Uniform::new(1 as u32, 2 as u32));
+    }
+}
+
+
\ No newline at end of file diff --git a/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/utils.rs.html b/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/utils.rs.html new file mode 100644 index 0000000..118f6fa --- /dev/null +++ b/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/utils.rs.html @@ -0,0 +1,861 @@ +utils.rs - source
1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
+97
+98
+99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+
// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Math helper functions
+
+#[cfg(feature = "simd_support")] use packed_simd::*;
+
+
+pub(crate) trait WideningMultiply<RHS = Self> {
+    type Output;
+
+    fn wmul(self, x: RHS) -> Self::Output;
+}
+
+macro_rules! wmul_impl {
+    ($ty:ty, $wide:ty, $shift:expr) => {
+        impl WideningMultiply for $ty {
+            type Output = ($ty, $ty);
+
+            #[inline(always)]
+            fn wmul(self, x: $ty) -> Self::Output {
+                let tmp = (self as $wide) * (x as $wide);
+                ((tmp >> $shift) as $ty, tmp as $ty)
+            }
+        }
+    };
+
+    // simd bulk implementation
+    ($(($ty:ident, $wide:ident),)+, $shift:expr) => {
+        $(
+            impl WideningMultiply for $ty {
+                type Output = ($ty, $ty);
+
+                #[inline(always)]
+                fn wmul(self, x: $ty) -> Self::Output {
+                    // For supported vectors, this should compile to a couple
+                    // supported multiply & swizzle instructions (no actual
+                    // casting).
+                    // TODO: optimize
+                    let y: $wide = self.cast();
+                    let x: $wide = x.cast();
+                    let tmp = y * x;
+                    let hi: $ty = (tmp >> $shift).cast();
+                    let lo: $ty = tmp.cast();
+                    (hi, lo)
+                }
+            }
+        )+
+    };
+}
+wmul_impl! { u8, u16, 8 }
+wmul_impl! { u16, u32, 16 }
+wmul_impl! { u32, u64, 32 }
+wmul_impl! { u64, u128, 64 }
+
+// This code is a translation of the __mulddi3 function in LLVM's
+// compiler-rt. It is an optimised variant of the common method
+// `(a + b) * (c + d) = ac + ad + bc + bd`.
+//
+// For some reason LLVM can optimise the C version very well, but
+// keeps shuffling registers in this Rust translation.
+macro_rules! wmul_impl_large {
+    ($ty:ty, $half:expr) => {
+        impl WideningMultiply for $ty {
+            type Output = ($ty, $ty);
+
+            #[inline(always)]
+            fn wmul(self, b: $ty) -> Self::Output {
+                const LOWER_MASK: $ty = !0 >> $half;
+                let mut low = (self & LOWER_MASK).wrapping_mul(b & LOWER_MASK);
+                let mut t = low >> $half;
+                low &= LOWER_MASK;
+                t += (self >> $half).wrapping_mul(b & LOWER_MASK);
+                low += (t & LOWER_MASK) << $half;
+                let mut high = t >> $half;
+                t = low >> $half;
+                low &= LOWER_MASK;
+                t += (b >> $half).wrapping_mul(self & LOWER_MASK);
+                low += (t & LOWER_MASK) << $half;
+                high += t >> $half;
+                high += (self >> $half).wrapping_mul(b >> $half);
+
+                (high, low)
+            }
+        }
+    };
+
+    // simd bulk implementation
+    (($($ty:ty,)+) $scalar:ty, $half:expr) => {
+        $(
+            impl WideningMultiply for $ty {
+                type Output = ($ty, $ty);
+
+                #[inline(always)]
+                fn wmul(self, b: $ty) -> Self::Output {
+                    // needs wrapping multiplication
+                    const LOWER_MASK: $scalar = !0 >> $half;
+                    let mut low = (self & LOWER_MASK) * (b & LOWER_MASK);
+                    let mut t = low >> $half;
+                    low &= LOWER_MASK;
+                    t += (self >> $half) * (b & LOWER_MASK);
+                    low += (t & LOWER_MASK) << $half;
+                    let mut high = t >> $half;
+                    t = low >> $half;
+                    low &= LOWER_MASK;
+                    t += (b >> $half) * (self & LOWER_MASK);
+                    low += (t & LOWER_MASK) << $half;
+                    high += t >> $half;
+                    high += (self >> $half) * (b >> $half);
+
+                    (high, low)
+                }
+            }
+        )+
+    };
+}
+wmul_impl_large! { u128, 64 }
+
+macro_rules! wmul_impl_usize {
+    ($ty:ty) => {
+        impl WideningMultiply for usize {
+            type Output = (usize, usize);
+
+            #[inline(always)]
+            fn wmul(self, x: usize) -> Self::Output {
+                let (high, low) = (self as $ty).wmul(x as $ty);
+                (high as usize, low as usize)
+            }
+        }
+    };
+}
+#[cfg(target_pointer_width = "16")]
+wmul_impl_usize! { u16 }
+#[cfg(target_pointer_width = "32")]
+wmul_impl_usize! { u32 }
+#[cfg(target_pointer_width = "64")]
+wmul_impl_usize! { u64 }
+
+#[cfg(feature = "simd_support")]
+mod simd_wmul {
+    use super::*;
+    #[cfg(target_arch = "x86")] use core::arch::x86::*;
+    #[cfg(target_arch = "x86_64")] use core::arch::x86_64::*;
+
+    wmul_impl! {
+        (u8x2, u16x2),
+        (u8x4, u16x4),
+        (u8x8, u16x8),
+        (u8x16, u16x16),
+        (u8x32, u16x32),,
+        8
+    }
+
+    wmul_impl! { (u16x2, u32x2),, 16 }
+    wmul_impl! { (u16x4, u32x4),, 16 }
+    #[cfg(not(target_feature = "sse2"))]
+    wmul_impl! { (u16x8, u32x8),, 16 }
+    #[cfg(not(target_feature = "avx2"))]
+    wmul_impl! { (u16x16, u32x16),, 16 }
+
+    // 16-bit lane widths allow use of the x86 `mulhi` instructions, which
+    // means `wmul` can be implemented with only two instructions.
+    #[allow(unused_macros)]
+    macro_rules! wmul_impl_16 {
+        ($ty:ident, $intrinsic:ident, $mulhi:ident, $mullo:ident) => {
+            impl WideningMultiply for $ty {
+                type Output = ($ty, $ty);
+
+                #[inline(always)]
+                fn wmul(self, x: $ty) -> Self::Output {
+                    let b = $intrinsic::from_bits(x);
+                    let a = $intrinsic::from_bits(self);
+                    let hi = $ty::from_bits(unsafe { $mulhi(a, b) });
+                    let lo = $ty::from_bits(unsafe { $mullo(a, b) });
+                    (hi, lo)
+                }
+            }
+        };
+    }
+
+    #[cfg(target_feature = "sse2")]
+    wmul_impl_16! { u16x8, __m128i, _mm_mulhi_epu16, _mm_mullo_epi16 }
+    #[cfg(target_feature = "avx2")]
+    wmul_impl_16! { u16x16, __m256i, _mm256_mulhi_epu16, _mm256_mullo_epi16 }
+    // FIXME: there are no `__m512i` types in stdsimd yet, so `wmul::<u16x32>`
+    // cannot use the same implementation.
+
+    wmul_impl! {
+        (u32x2, u64x2),
+        (u32x4, u64x4),
+        (u32x8, u64x8),,
+        32
+    }
+
+    // TODO: optimize, this seems to seriously slow things down
+    wmul_impl_large! { (u8x64,) u8, 4 }
+    wmul_impl_large! { (u16x32,) u16, 8 }
+    wmul_impl_large! { (u32x16,) u32, 16 }
+    wmul_impl_large! { (u64x2, u64x4, u64x8,) u64, 32 }
+}
+
+/// Helper trait when dealing with scalar and SIMD floating point types.
+pub(crate) trait FloatSIMDUtils {
+    // `PartialOrd` for vectors compares lexicographically. We want to compare all
+    // the individual SIMD lanes instead, and get the combined result over all
+    // lanes. This is possible using something like `a.lt(b).all()`, but we
+    // implement it as a trait so we can write the same code for `f32` and `f64`.
+    // Only the comparison functions we need are implemented.
+    fn all_lt(self, other: Self) -> bool;
+    fn all_le(self, other: Self) -> bool;
+    fn all_finite(self) -> bool;
+
+    type Mask;
+    fn finite_mask(self) -> Self::Mask;
+    fn gt_mask(self, other: Self) -> Self::Mask;
+    fn ge_mask(self, other: Self) -> Self::Mask;
+
+    // Decrease all lanes where the mask is `true` to the next lower value
+    // representable by the floating-point type. At least one of the lanes
+    // must be set.
+    fn decrease_masked(self, mask: Self::Mask) -> Self;
+
+    // Convert from int value. Conversion is done while retaining the numerical
+    // value, not by retaining the binary representation.
+    type UInt;
+    fn cast_from_int(i: Self::UInt) -> Self;
+}
+
+/// Implement functions available in std builds but missing from core primitives
+#[cfg(not(std))]
+// False positive: We are following `std` here.
+#[allow(clippy::wrong_self_convention)]
+pub(crate) trait Float: Sized {
+    fn is_nan(self) -> bool;
+    fn is_infinite(self) -> bool;
+    fn is_finite(self) -> bool;
+}
+
+/// Implement functions on f32/f64 to give them APIs similar to SIMD types
+pub(crate) trait FloatAsSIMD: Sized {
+    #[inline(always)]
+    fn lanes() -> usize {
+        1
+    }
+    #[inline(always)]
+    fn splat(scalar: Self) -> Self {
+        scalar
+    }
+    #[inline(always)]
+    fn extract(self, index: usize) -> Self {
+        debug_assert_eq!(index, 0);
+        self
+    }
+    #[inline(always)]
+    fn replace(self, index: usize, new_value: Self) -> Self {
+        debug_assert_eq!(index, 0);
+        new_value
+    }
+}
+
+pub(crate) trait BoolAsSIMD: Sized {
+    fn any(self) -> bool;
+    fn all(self) -> bool;
+    fn none(self) -> bool;
+}
+
+impl BoolAsSIMD for bool {
+    #[inline(always)]
+    fn any(self) -> bool {
+        self
+    }
+
+    #[inline(always)]
+    fn all(self) -> bool {
+        self
+    }
+
+    #[inline(always)]
+    fn none(self) -> bool {
+        !self
+    }
+}
+
+macro_rules! scalar_float_impl {
+    ($ty:ident, $uty:ident) => {
+        #[cfg(not(std))]
+        impl Float for $ty {
+            #[inline]
+            fn is_nan(self) -> bool {
+                self != self
+            }
+
+            #[inline]
+            fn is_infinite(self) -> bool {
+                self == ::core::$ty::INFINITY || self == ::core::$ty::NEG_INFINITY
+            }
+
+            #[inline]
+            fn is_finite(self) -> bool {
+                !(self.is_nan() || self.is_infinite())
+            }
+        }
+
+        impl FloatSIMDUtils for $ty {
+            type Mask = bool;
+            type UInt = $uty;
+
+            #[inline(always)]
+            fn all_lt(self, other: Self) -> bool {
+                self < other
+            }
+
+            #[inline(always)]
+            fn all_le(self, other: Self) -> bool {
+                self <= other
+            }
+
+            #[inline(always)]
+            fn all_finite(self) -> bool {
+                self.is_finite()
+            }
+
+            #[inline(always)]
+            fn finite_mask(self) -> Self::Mask {
+                self.is_finite()
+            }
+
+            #[inline(always)]
+            fn gt_mask(self, other: Self) -> Self::Mask {
+                self > other
+            }
+
+            #[inline(always)]
+            fn ge_mask(self, other: Self) -> Self::Mask {
+                self >= other
+            }
+
+            #[inline(always)]
+            fn decrease_masked(self, mask: Self::Mask) -> Self {
+                debug_assert!(mask, "At least one lane must be set");
+                <$ty>::from_bits(self.to_bits() - 1)
+            }
+
+            #[inline]
+            fn cast_from_int(i: Self::UInt) -> Self {
+                i as $ty
+            }
+        }
+
+        impl FloatAsSIMD for $ty {}
+    };
+}
+
+scalar_float_impl!(f32, u32);
+scalar_float_impl!(f64, u64);
+
+
+#[cfg(feature = "simd_support")]
+macro_rules! simd_impl {
+    ($ty:ident, $f_scalar:ident, $mty:ident, $uty:ident) => {
+        impl FloatSIMDUtils for $ty {
+            type Mask = $mty;
+            type UInt = $uty;
+
+            #[inline(always)]
+            fn all_lt(self, other: Self) -> bool {
+                self.lt(other).all()
+            }
+
+            #[inline(always)]
+            fn all_le(self, other: Self) -> bool {
+                self.le(other).all()
+            }
+
+            #[inline(always)]
+            fn all_finite(self) -> bool {
+                self.finite_mask().all()
+            }
+
+            #[inline(always)]
+            fn finite_mask(self) -> Self::Mask {
+                // This can possibly be done faster by checking bit patterns
+                let neg_inf = $ty::splat(::core::$f_scalar::NEG_INFINITY);
+                let pos_inf = $ty::splat(::core::$f_scalar::INFINITY);
+                self.gt(neg_inf) & self.lt(pos_inf)
+            }
+
+            #[inline(always)]
+            fn gt_mask(self, other: Self) -> Self::Mask {
+                self.gt(other)
+            }
+
+            #[inline(always)]
+            fn ge_mask(self, other: Self) -> Self::Mask {
+                self.ge(other)
+            }
+
+            #[inline(always)]
+            fn decrease_masked(self, mask: Self::Mask) -> Self {
+                // Casting a mask into ints will produce all bits set for
+                // true, and 0 for false. Adding that to the binary
+                // representation of a float means subtracting one from
+                // the binary representation, resulting in the next lower
+                // value representable by $ty. This works even when the
+                // current value is infinity.
+                debug_assert!(mask.any(), "At least one lane must be set");
+                <$ty>::from_bits(<$uty>::from_bits(self) + <$uty>::from_bits(mask))
+            }
+
+            #[inline]
+            fn cast_from_int(i: Self::UInt) -> Self {
+                i.cast()
+            }
+        }
+    };
+}
+
+#[cfg(feature="simd_support")] simd_impl! { f32x2, f32, m32x2, u32x2 }
+#[cfg(feature="simd_support")] simd_impl! { f32x4, f32, m32x4, u32x4 }
+#[cfg(feature="simd_support")] simd_impl! { f32x8, f32, m32x8, u32x8 }
+#[cfg(feature="simd_support")] simd_impl! { f32x16, f32, m32x16, u32x16 }
+#[cfg(feature="simd_support")] simd_impl! { f64x2, f64, m64x2, u64x2 }
+#[cfg(feature="simd_support")] simd_impl! { f64x4, f64, m64x4, u64x4 }
+#[cfg(feature="simd_support")] simd_impl! { f64x8, f64, m64x8, u64x8 }
+
+
\ No newline at end of file diff --git a/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/weighted.rs.html b/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/weighted.rs.html new file mode 100644 index 0000000..7ae8d0d --- /dev/null +++ b/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/weighted.rs.html @@ -0,0 +1,97 @@ +weighted.rs - source
1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+
// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Weighted index sampling
+//!
+//! This module is deprecated. Use [`crate::distributions::WeightedIndex`] and
+//! [`crate::distributions::WeightedError`] instead.
+
+pub use super::{WeightedIndex, WeightedError};
+
+#[allow(missing_docs)]
+#[deprecated(since = "0.8.0", note = "moved to rand_distr crate")]
+pub mod alias_method {
+    // This module exists to provide a deprecation warning which minimises
+    // compile errors, but still fails to compile if ever used.
+    use core::marker::PhantomData;
+    use alloc::vec::Vec;
+    use super::WeightedError;
+
+    #[derive(Debug)]
+    pub struct WeightedIndex<W: Weight> {
+        _phantom: PhantomData<W>,
+    }
+    impl<W: Weight> WeightedIndex<W> {
+        pub fn new(_weights: Vec<W>) -> Result<Self, WeightedError> {
+            Err(WeightedError::NoItem)
+        }
+    }
+
+    pub trait Weight {}
+    macro_rules! impl_weight {
+        () => {};
+        ($T:ident, $($more:ident,)*) => {
+            impl Weight for $T {}
+            impl_weight!($($more,)*);
+        };
+    }
+    impl_weight!(f64, f32,);
+    impl_weight!(u8, u16, u32, u64, usize,);
+    impl_weight!(i8, i16, i32, i64, isize,);
+    impl_weight!(u128, i128,);
+}
+
+
\ No newline at end of file diff --git a/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/weighted_index.rs.html b/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/weighted_index.rs.html new file mode 100644 index 0000000..7439a5b --- /dev/null +++ b/rust/theBook/chapter-2-guessing-game/guessing_game/target/doc/src/rand/distributions/weighted_index.rs.html @@ -0,0 +1,919 @@ +weighted_index.rs - source
1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
+97
+98
+99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
+441
+442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+
// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Weighted index sampling
+
+use crate::distributions::uniform::{SampleBorrow, SampleUniform, UniformSampler};
+use crate::distributions::Distribution;
+use crate::Rng;
+use core::cmp::PartialOrd;
+use core::fmt;
+
+// Note that this whole module is only imported if feature="alloc" is enabled.
+use alloc::vec::Vec;
+
+#[cfg(feature = "serde1")]
+use serde::{Serialize, Deserialize};
+
+/// A distribution using weighted sampling of discrete items
+///
+/// Sampling a `WeightedIndex` distribution returns the index of a randomly
+/// selected element from the iterator used when the `WeightedIndex` was
+/// created. The chance of a given element being picked is proportional to the
+/// value of the element. The weights can use any type `X` for which an
+/// implementation of [`Uniform<X>`] exists.
+///
+/// # Performance
+///
+/// Time complexity of sampling from `WeightedIndex` is `O(log N)` where
+/// `N` is the number of weights. As an alternative,
+/// [`rand_distr::weighted_alias`](https://docs.rs/rand_distr/*/rand_distr/weighted_alias/index.html)
+/// supports `O(1)` sampling, but with much higher initialisation cost.
+///
+/// A `WeightedIndex<X>` contains a `Vec<X>` and a [`Uniform<X>`] and so its
+/// size is the sum of the size of those objects, possibly plus some alignment.
+///
+/// Creating a `WeightedIndex<X>` will allocate enough space to hold `N - 1`
+/// weights of type `X`, where `N` is the number of weights. However, since
+/// `Vec` doesn't guarantee a particular growth strategy, additional memory
+/// might be allocated but not used. Since the `WeightedIndex` object also
+/// contains, this might cause additional allocations, though for primitive
+/// types, [`Uniform<X>`] doesn't allocate any memory.
+///
+/// Sampling from `WeightedIndex` will result in a single call to
+/// `Uniform<X>::sample` (method of the [`Distribution`] trait), which typically
+/// will request a single value from the underlying [`RngCore`], though the
+/// exact number depends on the implementation of `Uniform<X>::sample`.
+///
+/// # Example
+///
+/// ```
+/// use rand::prelude::*;
+/// use rand::distributions::WeightedIndex;
+///
+/// let choices = ['a', 'b', 'c'];
+/// let weights = [2,   1,   1];
+/// let dist = WeightedIndex::new(&weights).unwrap();
+/// let mut rng = thread_rng();
+/// for _ in 0..100 {
+///     // 50% chance to print 'a', 25% chance to print 'b', 25% chance to print 'c'
+///     println!("{}", choices[dist.sample(&mut rng)]);
+/// }
+///
+/// let items = [('a', 0), ('b', 3), ('c', 7)];
+/// let dist2 = WeightedIndex::new(items.iter().map(|item| item.1)).unwrap();
+/// for _ in 0..100 {
+///     // 0% chance to print 'a', 30% chance to print 'b', 70% chance to print 'c'
+///     println!("{}", items[dist2.sample(&mut rng)].0);
+/// }
+/// ```
+///
+/// [`Uniform<X>`]: crate::distributions::Uniform
+/// [`RngCore`]: crate::RngCore
+#[derive(Debug, Clone, PartialEq)]
+#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
+#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
+pub struct WeightedIndex<X: SampleUniform + PartialOrd> {
+    cumulative_weights: Vec<X>,
+    total_weight: X,
+    weight_distribution: X::Sampler,
+}
+
+impl<X: SampleUniform + PartialOrd> WeightedIndex<X> {
+    /// Creates a new a `WeightedIndex` [`Distribution`] using the values
+    /// in `weights`. The weights can use any type `X` for which an
+    /// implementation of [`Uniform<X>`] exists.
+    ///
+    /// Returns an error if the iterator is empty, if any weight is `< 0`, or
+    /// if its total value is 0.
+    ///
+    /// [`Uniform<X>`]: crate::distributions::uniform::Uniform
+    pub fn new<I>(weights: I) -> Result<WeightedIndex<X>, WeightedError>
+    where
+        I: IntoIterator,
+        I::Item: SampleBorrow<X>,
+        X: for<'a> ::core::ops::AddAssign<&'a X> + Clone + Default,
+    {
+        let mut iter = weights.into_iter();
+        let mut total_weight: X = iter.next().ok_or(WeightedError::NoItem)?.borrow().clone();
+
+        let zero = <X as Default>::default();
+        if !(total_weight >= zero) {
+            return Err(WeightedError::InvalidWeight);
+        }
+
+        let mut weights = Vec::<X>::with_capacity(iter.size_hint().0);
+        for w in iter {
+            // Note that `!(w >= x)` is not equivalent to `w < x` for partially
+            // ordered types due to NaNs which are equal to nothing.
+            if !(w.borrow() >= &zero) {
+                return Err(WeightedError::InvalidWeight);
+            }
+            weights.push(total_weight.clone());
+            total_weight += w.borrow();
+        }
+
+        if total_weight == zero {
+            return Err(WeightedError::AllWeightsZero);
+        }
+        let distr = X::Sampler::new(zero, total_weight.clone());
+
+        Ok(WeightedIndex {
+            cumulative_weights: weights,
+            total_weight,
+            weight_distribution: distr,
+        })
+    }
+
+    /// Update a subset of weights, without changing the number of weights.
+    ///
+    /// `new_weights` must be sorted by the index.
+    ///
+    /// Using this method instead of `new` might be more efficient if only a small number of
+    /// weights is modified. No allocations are performed, unless the weight type `X` uses
+    /// allocation internally.
+    ///
+    /// In case of error, `self` is not modified.
+    pub fn update_weights(&mut self, new_weights: &[(usize, &X)]) -> Result<(), WeightedError>
+    where X: for<'a> ::core::ops::AddAssign<&'a X>
+            + for<'a> ::core::ops::SubAssign<&'a X>
+            + Clone
+            + Default {
+        if new_weights.is_empty() {
+            return Ok(());
+        }
+
+        let zero = <X as Default>::default();
+
+        let mut total_weight = self.total_weight.clone();
+
+        // Check for errors first, so we don't modify `self` in case something
+        // goes wrong.
+        let mut prev_i = None;
+        for &(i, w) in new_weights {
+            if let Some(old_i) = prev_i {
+                if old_i >= i {
+                    return Err(WeightedError::InvalidWeight);
+                }
+            }
+            if !(*w >= zero) {
+                return Err(WeightedError::InvalidWeight);
+            }
+            if i > self.cumulative_weights.len() {
+                return Err(WeightedError::TooMany);
+            }
+
+            let mut old_w = if i < self.cumulative_weights.len() {
+                self.cumulative_weights[i].clone()
+            } else {
+                self.total_weight.clone()
+            };
+            if i > 0 {
+                old_w -= &self.cumulative_weights[i - 1];
+            }
+
+            total_weight -= &old_w;
+            total_weight += w;
+            prev_i = Some(i);
+        }
+        if total_weight <= zero {
+            return Err(WeightedError::AllWeightsZero);
+        }
+
+        // Update the weights. Because we checked all the preconditions in the
+        // previous loop, this should never panic.
+        let mut iter = new_weights.iter();
+
+        let mut prev_weight = zero.clone();
+        let mut next_new_weight = iter.next();
+        let &(first_new_index, _) = next_new_weight.unwrap();
+        let mut cumulative_weight = if first_new_index > 0 {
+            self.cumulative_weights[first_new_index - 1].clone()
+        } else {
+            zero.clone()
+        };
+        for i in first_new_index..self.cumulative_weights.len() {
+            match next_new_weight {
+                Some(&(j, w)) if i == j => {
+                    cumulative_weight += w;
+                    next_new_weight = iter.next();
+                }
+                _ => {
+                    let mut tmp = self.cumulative_weights[i].clone();
+                    tmp -= &prev_weight; // We know this is positive.
+                    cumulative_weight += &tmp;
+                }
+            }
+            prev_weight = cumulative_weight.clone();
+            core::mem::swap(&mut prev_weight, &mut self.cumulative_weights[i]);
+        }
+
+        self.total_weight = total_weight;
+        self.weight_distribution = X::Sampler::new(zero, self.total_weight.clone());
+
+        Ok(())
+    }
+}
+
+impl<X> Distribution<usize> for WeightedIndex<X>
+where X: SampleUniform + PartialOrd
+{
+    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> usize {
+        use ::core::cmp::Ordering;
+        let chosen_weight = self.weight_distribution.sample(rng);
+        // Find the first item which has a weight *higher* than the chosen weight.
+        self.cumulative_weights
+            .binary_search_by(|w| {
+                if *w <= chosen_weight {
+                    Ordering::Less
+                } else {
+                    Ordering::Greater
+                }
+            })
+            .unwrap_err()
+    }
+}
+
+#[cfg(test)]
+mod test {
+    use super::*;
+
+    #[cfg(feature = "serde1")]
+    #[test]
+    fn test_weightedindex_serde1() {
+        let weighted_index = WeightedIndex::new(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).unwrap();
+
+        let ser_weighted_index = bincode::serialize(&weighted_index).unwrap();
+        let de_weighted_index: WeightedIndex<i32> =
+            bincode::deserialize(&ser_weighted_index).unwrap();
+
+        assert_eq!(
+            de_weighted_index.cumulative_weights,
+            weighted_index.cumulative_weights
+        );
+        assert_eq!(de_weighted_index.total_weight, weighted_index.total_weight);
+    }
+
+    #[test]
+    fn test_accepting_nan(){
+        assert_eq!(
+            WeightedIndex::new(&[core::f32::NAN, 0.5]).unwrap_err(),
+            WeightedError::InvalidWeight,
+        );
+        assert_eq!(
+            WeightedIndex::new(&[core::f32::NAN]).unwrap_err(),
+            WeightedError::InvalidWeight,
+        );
+        assert_eq!(
+            WeightedIndex::new(&[0.5, core::f32::NAN]).unwrap_err(),
+            WeightedError::InvalidWeight,
+        );
+
+        assert_eq!(
+            WeightedIndex::new(&[0.5, 7.0])
+                .unwrap()
+                .update_weights(&[(0, &core::f32::NAN)])
+                .unwrap_err(),
+            WeightedError::InvalidWeight,
+        )
+    }
+
+
+    #[test]
+    #[cfg_attr(miri, ignore)] // Miri is too slow
+    fn test_weightedindex() {
+        let mut r = crate::test::rng(700);
+        const N_REPS: u32 = 5000;
+        let weights = [1u32, 2, 3, 0, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7];
+        let total_weight = weights.iter().sum::<u32>() as f32;
+
+        let verify = |result: [i32; 14]| {
+            for (i, count) in result.iter().enumerate() {
+                let exp = (weights[i] * N_REPS) as f32 / total_weight;
+                let mut err = (*count as f32 - exp).abs();
+                if err != 0.0 {
+                    err /= exp;
+                }
+                assert!(err <= 0.25);
+            }
+        };
+
+        // WeightedIndex from vec
+        let mut chosen = [0i32; 14];
+        let distr = WeightedIndex::new(weights.to_vec()).unwrap();
+        for _ in 0..N_REPS {
+            chosen[distr.sample(&mut r)] += 1;
+        }
+        verify(chosen);
+
+        // WeightedIndex from slice
+        chosen = [0i32; 14];
+        let distr = WeightedIndex::new(&weights[..]).unwrap();
+        for _ in 0..N_REPS {
+            chosen[distr.sample(&mut r)] += 1;
+        }
+        verify(chosen);
+
+        // WeightedIndex from iterator
+        chosen = [0i32; 14];
+        let distr = WeightedIndex::new(weights.iter()).unwrap();
+        for _ in 0..N_REPS {
+            chosen[distr.sample(&mut r)] += 1;
+        }
+        verify(chosen);
+
+        for _ in 0..5 {
+            assert_eq!(WeightedIndex::new(&[0, 1]).unwrap().sample(&mut r), 1);
+            assert_eq!(WeightedIndex::new(&[1, 0]).unwrap().sample(&mut r), 0);
+            assert_eq!(
+                WeightedIndex::new(&[0, 0, 0, 0, 10, 0])
+                    .unwrap()
+                    .sample(&mut r),
+                4
+            );
+        }
+
+        assert_eq!(
+            WeightedIndex::new(&[10][0..0]).unwrap_err(),
+            WeightedError::NoItem
+        );
+        assert_eq!(
+            WeightedIndex::new(&[0]).unwrap_err(),
+            WeightedError::AllWeightsZero
+        );
+        assert_eq!(
+            WeightedIndex::new(&[10, 20, -1, 30]).unwrap_err(),
+            WeightedError::InvalidWeight
+        );
+        assert_eq!(
+            WeightedIndex::new(&[-10, 20, 1, 30]).unwrap_err(),
+            WeightedError::InvalidWeight
+        );
+        assert_eq!(
+            WeightedIndex::new(&[-10]).unwrap_err(),
+            WeightedError::InvalidWeight
+        );
+    }
+
+    #[test]
+    fn test_update_weights() {
+        let data = [
+            (
+                &[10u32, 2, 3, 4][..],
+                &[(1, &100), (2, &4)][..], // positive change
+                &[10, 100, 4, 4][..],
+            ),
+            (
+                &[1u32, 2, 3, 0, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7][..],
+                &[(2, &1), (5, &1), (13, &100)][..], // negative change and last element
+                &[1u32, 2, 1, 0, 5, 1, 7, 1, 2, 3, 4, 5, 6, 100][..],
+            ),
+        ];
+
+        for (weights, update, expected_weights) in data.iter() {
+            let total_weight = weights.iter().sum::<u32>();
+            let mut distr = WeightedIndex::new(weights.to_vec()).unwrap();
+            assert_eq!(distr.total_weight, total_weight);
+
+            distr.update_weights(update).unwrap();
+            let expected_total_weight = expected_weights.iter().sum::<u32>();
+            let expected_distr = WeightedIndex::new(expected_weights.to_vec()).unwrap();
+            assert_eq!(distr.total_weight, expected_total_weight);
+            assert_eq!(distr.total_weight, expected_distr.total_weight);
+            assert_eq!(distr.cumulative_weights, expected_distr.cumulative_weights);
+        }
+    }
+
+    #[test]
+    fn value_stability() {
+        fn test_samples<X: SampleUniform + PartialOrd, I>(
+            weights: I, buf: &mut [usize], expected: &[usize],
+        ) where
+            I: IntoIterator,
+            I::Item: SampleBorrow<X>,
+            X: for<'a> ::core::ops::AddAssign<&'a X> + Clone + Default,
+        {
+            assert_eq!(buf.len(), expected.len());
+            let distr = WeightedIndex::new(weights).unwrap();
+            let mut rng = crate::test::rng(701);
+            for r in buf.iter_mut() {
+                *r = rng.sample(&distr);
+            }
+            assert_eq!(buf, expected);
+        }
+
+        let mut buf = [0; 10];
+        test_samples(&[1i32, 1, 1, 1, 1, 1, 1, 1, 1], &mut buf, &[
+            0, 6, 2, 6, 3, 4, 7, 8, 2, 5,
+        ]);
+        test_samples(&[0.7f32, 0.1, 0.1, 0.1], &mut buf, &[
+            0, 0, 0, 1, 0, 0, 2, 3, 0, 0,
+        ]);
+        test_samples(&[1.0f64, 0.999, 0.998, 0.997], &mut buf, &[
+            2, 2, 1, 3, 2, 1, 3, 3, 2, 1,
+        ]);
+    }
+
+    #[test]
+    fn weighted_index_distributions_can_be_compared() {
+        assert_eq!(WeightedIndex::new(&[1, 2]), WeightedIndex::new(&[1, 2]));
+    }
+}
+
+/// Error type returned from `WeightedIndex::new`.
+#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub enum WeightedError {
+    /// The provided weight collection contains no items.
+    NoItem,
+
+    /// A weight is either less than zero, greater than the supported maximum,
+    /// NaN, or otherwise invalid.
+    InvalidWeight,
+
+    /// All items in the provided weight collection are zero.
+    AllWeightsZero,
+
+    /// Too many weights are provided (length greater than `u32::MAX`)
+    TooMany,
+}
+
+#[cfg(feature = "std")]
+impl std::error::Error for WeightedError {}
+
+impl fmt::Display for WeightedError {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        f.write_str(match *self {
+            WeightedError::NoItem => "No weights provided in distribution",
+            WeightedError::InvalidWeight => "A weight is invalid in distribution",
+            WeightedError::AllWeightsZero => "All weights are zero in distribution",
+            WeightedError::TooMany => "Too many weights (hit u32::MAX) in distribution",
+        })
+    }
+}
+
+
\ No newline at end of file -- cgit v1.2.3-70-g09d2