Updated script that can be controled by Nodejs web app

This commit is contained in:
mac OS
2024-11-25 12:24:18 +07:00
parent c440eda1f4
commit 8b0ab2bd3a
8662 changed files with 1803808 additions and 34 deletions

View File

@ -0,0 +1,71 @@
**This software is dual-licensed under the The University of Illinois/NCSA
Open Source License (NCSA) and The 3-Clause BSD License**
# NCSA Open Source License
**Copyright (c) 2019 Kevin Sheppard. All rights reserved.**
Developed by: Kevin Sheppard (<kevin.sheppard@economics.ox.ac.uk>,
<kevin.k.sheppard@gmail.com>)
[http://www.kevinsheppard.com](http://www.kevinsheppard.com)
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal with
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimers.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimers in the documentation and/or
other materials provided with the distribution.
Neither the names of Kevin Sheppard, nor the names of any contributors may be
used to endorse or promote products derived from this Software without specific
prior written permission.
**THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH
THE SOFTWARE.**
# 3-Clause BSD License
**Copyright (c) 2019 Kevin Sheppard. All rights reserved.**
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
**THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.**
# Components
Many parts of this module have been derived from original sources,
often the algorithm's designer. Component licenses are located with
the component code.

View File

@ -0,0 +1,14 @@
cimport numpy as np
from libc.stdint cimport uint32_t, uint64_t
cdef extern from "numpy/random/bitgen.h":
struct bitgen:
void *state
uint64_t (*next_uint64)(void *st) nogil
uint32_t (*next_uint32)(void *st) nogil
double (*next_double)(void *st) nogil
uint64_t (*next_raw)(void *st) nogil
ctypedef bitgen bitgen_t
from numpy.random.bit_generator cimport BitGenerator, SeedSequence

View File

@ -0,0 +1,215 @@
"""
========================
Random Number Generation
========================
Use ``default_rng()`` to create a `Generator` and call its methods.
=============== =========================================================
Generator
--------------- ---------------------------------------------------------
Generator Class implementing all of the random number distributions
default_rng Default constructor for ``Generator``
=============== =========================================================
============================================= ===
BitGenerator Streams that work with Generator
--------------------------------------------- ---
MT19937
PCG64
PCG64DXSM
Philox
SFC64
============================================= ===
============================================= ===
Getting entropy to initialize a BitGenerator
--------------------------------------------- ---
SeedSequence
============================================= ===
Legacy
------
For backwards compatibility with previous versions of numpy before 1.17, the
various aliases to the global `RandomState` methods are left alone and do not
use the new `Generator` API.
==================== =========================================================
Utility functions
-------------------- ---------------------------------------------------------
random Uniformly distributed floats over ``[0, 1)``
bytes Uniformly distributed random bytes.
permutation Randomly permute a sequence / generate a random sequence.
shuffle Randomly permute a sequence in place.
choice Random sample from 1-D array.
==================== =========================================================
==================== =========================================================
Compatibility
functions - removed
in the new API
-------------------- ---------------------------------------------------------
rand Uniformly distributed values.
randn Normally distributed values.
ranf Uniformly distributed floating point numbers.
random_integers Uniformly distributed integers in a given range.
(deprecated, use ``integers(..., closed=True)`` instead)
random_sample Alias for `random_sample`
randint Uniformly distributed integers in a given range
seed Seed the legacy random number generator.
==================== =========================================================
==================== =========================================================
Univariate
distributions
-------------------- ---------------------------------------------------------
beta Beta distribution over ``[0, 1]``.
binomial Binomial distribution.
chisquare :math:`\\chi^2` distribution.
exponential Exponential distribution.
f F (Fisher-Snedecor) distribution.
gamma Gamma distribution.
geometric Geometric distribution.
gumbel Gumbel distribution.
hypergeometric Hypergeometric distribution.
laplace Laplace distribution.
logistic Logistic distribution.
lognormal Log-normal distribution.
logseries Logarithmic series distribution.
negative_binomial Negative binomial distribution.
noncentral_chisquare Non-central chi-square distribution.
noncentral_f Non-central F distribution.
normal Normal / Gaussian distribution.
pareto Pareto distribution.
poisson Poisson distribution.
power Power distribution.
rayleigh Rayleigh distribution.
triangular Triangular distribution.
uniform Uniform distribution.
vonmises Von Mises circular distribution.
wald Wald (inverse Gaussian) distribution.
weibull Weibull distribution.
zipf Zipf's distribution over ranked data.
==================== =========================================================
==================== ==========================================================
Multivariate
distributions
-------------------- ----------------------------------------------------------
dirichlet Multivariate generalization of Beta distribution.
multinomial Multivariate generalization of the binomial distribution.
multivariate_normal Multivariate generalization of the normal distribution.
==================== ==========================================================
==================== =========================================================
Standard
distributions
-------------------- ---------------------------------------------------------
standard_cauchy Standard Cauchy-Lorentz distribution.
standard_exponential Standard exponential distribution.
standard_gamma Standard Gamma distribution.
standard_normal Standard normal distribution.
standard_t Standard Student's t-distribution.
==================== =========================================================
==================== =========================================================
Internal functions
-------------------- ---------------------------------------------------------
get_state Get tuple representing internal state of generator.
set_state Set state of generator.
==================== =========================================================
"""
__all__ = [
'beta',
'binomial',
'bytes',
'chisquare',
'choice',
'dirichlet',
'exponential',
'f',
'gamma',
'geometric',
'get_state',
'gumbel',
'hypergeometric',
'laplace',
'logistic',
'lognormal',
'logseries',
'multinomial',
'multivariate_normal',
'negative_binomial',
'noncentral_chisquare',
'noncentral_f',
'normal',
'pareto',
'permutation',
'poisson',
'power',
'rand',
'randint',
'randn',
'random',
'random_integers',
'random_sample',
'ranf',
'rayleigh',
'sample',
'seed',
'set_state',
'shuffle',
'standard_cauchy',
'standard_exponential',
'standard_gamma',
'standard_normal',
'standard_t',
'triangular',
'uniform',
'vonmises',
'wald',
'weibull',
'zipf',
]
# add these for module-freeze analysis (like PyInstaller)
from . import _pickle
from . import _common
from . import _bounded_integers
from ._generator import Generator, default_rng
from .bit_generator import SeedSequence, BitGenerator
from ._mt19937 import MT19937
from ._pcg64 import PCG64, PCG64DXSM
from ._philox import Philox
from ._sfc64 import SFC64
from .mtrand import *
__all__ += ['Generator', 'RandomState', 'SeedSequence', 'MT19937',
'Philox', 'PCG64', 'PCG64DXSM', 'SFC64', 'default_rng',
'BitGenerator']
def __RandomState_ctor():
"""Return a RandomState instance.
This function exists solely to assist (un)pickling.
Note that the state of the RandomState returned here is irrelevant, as this
function's entire purpose is to return a newly allocated RandomState whose
state pickle can set. Consequently the RandomState returned by this function
is a freshly allocated copy with a seed=0.
See https://github.com/numpy/numpy/issues/4763 for a detailed discussion
"""
return RandomState(seed=0)
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester

View File

@ -0,0 +1,71 @@
from numpy._pytesttester import PytestTester
from numpy.random._generator import Generator as Generator
from numpy.random._generator import default_rng as default_rng
from numpy.random._mt19937 import MT19937 as MT19937
from numpy.random._pcg64 import (
PCG64 as PCG64,
PCG64DXSM as PCG64DXSM,
)
from numpy.random._philox import Philox as Philox
from numpy.random._sfc64 import SFC64 as SFC64
from numpy.random.bit_generator import BitGenerator as BitGenerator
from numpy.random.bit_generator import SeedSequence as SeedSequence
from numpy.random.mtrand import (
RandomState as RandomState,
beta as beta,
binomial as binomial,
bytes as bytes,
chisquare as chisquare,
choice as choice,
dirichlet as dirichlet,
exponential as exponential,
f as f,
gamma as gamma,
geometric as geometric,
get_bit_generator as get_bit_generator,
get_state as get_state,
gumbel as gumbel,
hypergeometric as hypergeometric,
laplace as laplace,
logistic as logistic,
lognormal as lognormal,
logseries as logseries,
multinomial as multinomial,
multivariate_normal as multivariate_normal,
negative_binomial as negative_binomial,
noncentral_chisquare as noncentral_chisquare,
noncentral_f as noncentral_f,
normal as normal,
pareto as pareto,
permutation as permutation,
poisson as poisson,
power as power,
rand as rand,
randint as randint,
randn as randn,
random as random,
random_integers as random_integers,
random_sample as random_sample,
ranf as ranf,
rayleigh as rayleigh,
sample as sample,
seed as seed,
set_bit_generator as set_bit_generator,
set_state as set_state,
shuffle as shuffle,
standard_cauchy as standard_cauchy,
standard_exponential as standard_exponential,
standard_gamma as standard_gamma,
standard_normal as standard_normal,
standard_t as standard_t,
triangular as triangular,
uniform as uniform,
vonmises as vonmises,
wald as wald,
weibull as weibull,
zipf as zipf,
)
__all__: list[str]
test: PytestTester

View File

@ -0,0 +1,29 @@
from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t,
int8_t, int16_t, int32_t, int64_t, intptr_t)
import numpy as np
cimport numpy as np
ctypedef np.npy_bool bool_t
from numpy.random cimport bitgen_t
cdef inline uint64_t _gen_mask(uint64_t max_val) noexcept nogil:
"""Mask generator for use in bounded random numbers"""
# Smallest bit mask >= max
cdef uint64_t mask = max_val
mask |= mask >> 1
mask |= mask >> 2
mask |= mask >> 4
mask |= mask >> 8
mask |= mask >> 16
mask |= mask >> 32
return mask
cdef object _rand_uint64(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
cdef object _rand_uint32(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
cdef object _rand_uint16(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
cdef object _rand_uint8(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
cdef object _rand_bool(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
cdef object _rand_int64(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
cdef object _rand_int32(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
cdef object _rand_int16(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
cdef object _rand_int8(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)

View File

@ -0,0 +1,107 @@
#cython: language_level=3
from libc.stdint cimport uint32_t, uint64_t, int32_t, int64_t
import numpy as np
cimport numpy as np
from numpy.random cimport bitgen_t
cdef double POISSON_LAM_MAX
cdef double LEGACY_POISSON_LAM_MAX
cdef uint64_t MAXSIZE
cdef enum ConstraintType:
CONS_NONE
CONS_NON_NEGATIVE
CONS_POSITIVE
CONS_POSITIVE_NOT_NAN
CONS_BOUNDED_0_1
CONS_BOUNDED_GT_0_1
CONS_BOUNDED_LT_0_1
CONS_GT_1
CONS_GTE_1
CONS_POISSON
LEGACY_CONS_POISSON
LEGACY_CONS_NON_NEGATIVE_INBOUNDS_LONG
ctypedef ConstraintType constraint_type
cdef object benchmark(bitgen_t *bitgen, object lock, Py_ssize_t cnt, object method)
cdef object random_raw(bitgen_t *bitgen, object lock, object size, object output)
cdef object prepare_cffi(bitgen_t *bitgen)
cdef object prepare_ctypes(bitgen_t *bitgen)
cdef int check_constraint(double val, object name, constraint_type cons) except -1
cdef int check_array_constraint(np.ndarray val, object name, constraint_type cons) except -1
cdef extern from "include/aligned_malloc.h":
cdef void *PyArray_realloc_aligned(void *p, size_t n)
cdef void *PyArray_malloc_aligned(size_t n)
cdef void *PyArray_calloc_aligned(size_t n, size_t s)
cdef void PyArray_free_aligned(void *p)
ctypedef void (*random_double_fill)(bitgen_t *state, np.npy_intp count, double* out) noexcept nogil
ctypedef double (*random_double_0)(void *state) noexcept nogil
ctypedef double (*random_double_1)(void *state, double a) noexcept nogil
ctypedef double (*random_double_2)(void *state, double a, double b) noexcept nogil
ctypedef double (*random_double_3)(void *state, double a, double b, double c) noexcept nogil
ctypedef void (*random_float_fill)(bitgen_t *state, np.npy_intp count, float* out) noexcept nogil
ctypedef float (*random_float_0)(bitgen_t *state) noexcept nogil
ctypedef float (*random_float_1)(bitgen_t *state, float a) noexcept nogil
ctypedef int64_t (*random_uint_0)(void *state) noexcept nogil
ctypedef int64_t (*random_uint_d)(void *state, double a) noexcept nogil
ctypedef int64_t (*random_uint_dd)(void *state, double a, double b) noexcept nogil
ctypedef int64_t (*random_uint_di)(void *state, double a, uint64_t b) noexcept nogil
ctypedef int64_t (*random_uint_i)(void *state, int64_t a) noexcept nogil
ctypedef int64_t (*random_uint_iii)(void *state, int64_t a, int64_t b, int64_t c) noexcept nogil
ctypedef uint32_t (*random_uint_0_32)(bitgen_t *state) noexcept nogil
ctypedef uint32_t (*random_uint_1_i_32)(bitgen_t *state, uint32_t a) noexcept nogil
ctypedef int32_t (*random_int_2_i_32)(bitgen_t *state, int32_t a, int32_t b) noexcept nogil
ctypedef int64_t (*random_int_2_i)(bitgen_t *state, int64_t a, int64_t b) noexcept nogil
cdef double kahan_sum(double *darr, np.npy_intp n) noexcept
cdef inline double uint64_to_double(uint64_t rnd) noexcept nogil:
return (rnd >> 11) * (1.0 / 9007199254740992.0)
cdef object double_fill(void *func, bitgen_t *state, object size, object lock, object out)
cdef object float_fill(void *func, bitgen_t *state, object size, object lock, object out)
cdef object float_fill_from_double(void *func, bitgen_t *state, object size, object lock, object out)
cdef object wrap_int(object val, object bits)
cdef np.ndarray int_to_array(object value, object name, object bits, object uint_size)
cdef validate_output_shape(iter_shape, np.ndarray output)
cdef object cont(void *func, void *state, object size, object lock, int narg,
object a, object a_name, constraint_type a_constraint,
object b, object b_name, constraint_type b_constraint,
object c, object c_name, constraint_type c_constraint,
object out)
cdef object disc(void *func, void *state, object size, object lock,
int narg_double, int narg_int64,
object a, object a_name, constraint_type a_constraint,
object b, object b_name, constraint_type b_constraint,
object c, object c_name, constraint_type c_constraint)
cdef object cont_f(void *func, bitgen_t *state, object size, object lock,
object a, object a_name, constraint_type a_constraint,
object out)
cdef object cont_broadcast_3(void *func, void *state, object size, object lock,
np.ndarray a_arr, object a_name, constraint_type a_constraint,
np.ndarray b_arr, object b_name, constraint_type b_constraint,
np.ndarray c_arr, object c_name, constraint_type c_constraint)
cdef object discrete_broadcast_iii(void *func, void *state, object size, object lock,
np.ndarray a_arr, object a_name, constraint_type a_constraint,
np.ndarray b_arr, object b_name, constraint_type b_constraint,
np.ndarray c_arr, object c_name, constraint_type c_constraint)

View File

@ -0,0 +1,40 @@
"""
Use cffi to access any of the underlying C functions from distributions.h
"""
import os
import numpy as np
import cffi
from .parse import parse_distributions_h
ffi = cffi.FFI()
inc_dir = os.path.join(np.get_include(), 'numpy')
# Basic numpy types
ffi.cdef('''
typedef intptr_t npy_intp;
typedef unsigned char npy_bool;
''')
parse_distributions_h(ffi, inc_dir)
lib = ffi.dlopen(np.random._generator.__file__)
# Compare the distributions.h random_standard_normal_fill to
# Generator.standard_random
bit_gen = np.random.PCG64()
rng = np.random.Generator(bit_gen)
state = bit_gen.state
interface = rng.bit_generator.cffi
n = 100
vals_cffi = ffi.new('double[%d]' % n)
lib.random_standard_normal_fill(interface.bit_generator, n, vals_cffi)
# reset the state
bit_gen.state = state
vals = rng.standard_normal(n)
for i in range(n):
assert vals[i] == vals_cffi[i]

View File

@ -0,0 +1,54 @@
import os
def parse_distributions_h(ffi, inc_dir):
"""
Parse distributions.h located in inc_dir for CFFI, filling in the ffi.cdef
Read the function declarations without the "#define ..." macros that will
be filled in when loading the library.
"""
with open(os.path.join(inc_dir, 'random', 'bitgen.h')) as fid:
s = []
for line in fid:
# massage the include file
if line.strip().startswith('#'):
continue
s.append(line)
ffi.cdef('\n'.join(s))
with open(os.path.join(inc_dir, 'random', 'distributions.h')) as fid:
s = []
in_skip = 0
ignoring = False
for line in fid:
# check for and remove extern "C" guards
if ignoring:
if line.strip().startswith('#endif'):
ignoring = False
continue
if line.strip().startswith('#ifdef __cplusplus'):
ignoring = True
# massage the include file
if line.strip().startswith('#'):
continue
# skip any inlined function definition
# which starts with 'static inline xxx(...) {'
# and ends with a closing '}'
if line.strip().startswith('static inline'):
in_skip += line.count('{')
continue
elif in_skip > 0:
in_skip += line.count('{')
in_skip -= line.count('}')
continue
# replace defines with their value or remove them
line = line.replace('DECLDIR', '')
line = line.replace('RAND_INT_TYPE', 'int64_t')
s.append(line)
ffi.cdef('\n'.join(s))

View File

@ -0,0 +1,78 @@
#!/usr/bin/env python3
#cython: language_level=3
from libc.stdint cimport uint32_t
from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
import numpy as np
cimport numpy as np
cimport cython
from numpy.random cimport bitgen_t
from numpy.random import PCG64
np.import_array()
@cython.boundscheck(False)
@cython.wraparound(False)
def uniform_mean(Py_ssize_t n):
cdef Py_ssize_t i
cdef bitgen_t *rng
cdef const char *capsule_name = "BitGenerator"
cdef double[::1] random_values
cdef np.ndarray randoms
x = PCG64()
capsule = x.capsule
if not PyCapsule_IsValid(capsule, capsule_name):
raise ValueError("Invalid pointer to anon_func_state")
rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
random_values = np.empty(n)
# Best practice is to acquire the lock whenever generating random values.
# This prevents other threads from modifying the state. Acquiring the lock
# is only necessary if the GIL is also released, as in this example.
with x.lock, nogil:
for i in range(n):
random_values[i] = rng.next_double(rng.state)
randoms = np.asarray(random_values)
return randoms.mean()
# This function is declared nogil so it can be used without the GIL below
cdef uint32_t bounded_uint(uint32_t lb, uint32_t ub, bitgen_t *rng) nogil:
cdef uint32_t mask, delta, val
mask = delta = ub - lb
mask |= mask >> 1
mask |= mask >> 2
mask |= mask >> 4
mask |= mask >> 8
mask |= mask >> 16
val = rng.next_uint32(rng.state) & mask
while val > delta:
val = rng.next_uint32(rng.state) & mask
return lb + val
@cython.boundscheck(False)
@cython.wraparound(False)
def bounded_uints(uint32_t lb, uint32_t ub, Py_ssize_t n):
cdef Py_ssize_t i
cdef bitgen_t *rng
cdef uint32_t[::1] out
cdef const char *capsule_name = "BitGenerator"
x = PCG64()
out = np.empty(n, dtype=np.uint32)
capsule = x.capsule
if not PyCapsule_IsValid(capsule, capsule_name):
raise ValueError("Invalid pointer to anon_func_state")
rng = <bitgen_t *>PyCapsule_GetPointer(capsule, capsule_name)
with x.lock, nogil:
for i in range(n):
out[i] = bounded_uint(lb, ub, rng)
return np.asarray(out)

View File

@ -0,0 +1,117 @@
#!/usr/bin/env python3
#cython: language_level=3
"""
This file shows how the to use a BitGenerator to create a distribution.
"""
import numpy as np
cimport numpy as np
cimport cython
from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
from libc.stdint cimport uint16_t, uint64_t
from numpy.random cimport bitgen_t
from numpy.random import PCG64
from numpy.random.c_distributions cimport (
random_standard_uniform_fill, random_standard_uniform_fill_f)
@cython.boundscheck(False)
@cython.wraparound(False)
def uniforms(Py_ssize_t n):
"""
Create an array of `n` uniformly distributed doubles.
A 'real' distribution would want to process the values into
some non-uniform distribution
"""
cdef Py_ssize_t i
cdef bitgen_t *rng
cdef const char *capsule_name = "BitGenerator"
cdef double[::1] random_values
x = PCG64()
capsule = x.capsule
# Optional check that the capsule if from a BitGenerator
if not PyCapsule_IsValid(capsule, capsule_name):
raise ValueError("Invalid pointer to anon_func_state")
# Cast the pointer
rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
random_values = np.empty(n, dtype='float64')
with x.lock, nogil:
for i in range(n):
# Call the function
random_values[i] = rng.next_double(rng.state)
randoms = np.asarray(random_values)
return randoms
# cython example 2
@cython.boundscheck(False)
@cython.wraparound(False)
def uint10_uniforms(Py_ssize_t n):
"""Uniform 10 bit integers stored as 16-bit unsigned integers"""
cdef Py_ssize_t i
cdef bitgen_t *rng
cdef const char *capsule_name = "BitGenerator"
cdef uint16_t[::1] random_values
cdef int bits_remaining
cdef int width = 10
cdef uint64_t buff, mask = 0x3FF
x = PCG64()
capsule = x.capsule
if not PyCapsule_IsValid(capsule, capsule_name):
raise ValueError("Invalid pointer to anon_func_state")
rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
random_values = np.empty(n, dtype='uint16')
# Best practice is to release GIL and acquire the lock
bits_remaining = 0
with x.lock, nogil:
for i in range(n):
if bits_remaining < width:
buff = rng.next_uint64(rng.state)
random_values[i] = buff & mask
buff >>= width
randoms = np.asarray(random_values)
return randoms
# cython example 3
def uniforms_ex(bit_generator, Py_ssize_t n, dtype=np.float64):
"""
Create an array of `n` uniformly distributed doubles via a "fill" function.
A 'real' distribution would want to process the values into
some non-uniform distribution
Parameters
----------
bit_generator: BitGenerator instance
n: int
Output vector length
dtype: {str, dtype}, optional
Desired dtype, either 'd' (or 'float64') or 'f' (or 'float32'). The
default dtype value is 'd'
"""
cdef Py_ssize_t i
cdef bitgen_t *rng
cdef const char *capsule_name = "BitGenerator"
cdef np.ndarray randoms
capsule = bit_generator.capsule
# Optional check that the capsule if from a BitGenerator
if not PyCapsule_IsValid(capsule, capsule_name):
raise ValueError("Invalid pointer to anon_func_state")
# Cast the pointer
rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
_dtype = np.dtype(dtype)
randoms = np.empty(n, dtype=_dtype)
if _dtype == np.float32:
with bit_generator.lock:
random_standard_uniform_fill_f(rng, n, <float*>np.PyArray_DATA(randoms))
elif _dtype == np.float64:
with bit_generator.lock:
random_standard_uniform_fill(rng, n, <double*>np.PyArray_DATA(randoms))
else:
raise TypeError('Unsupported dtype %r for random' % _dtype)
return randoms

View File

@ -0,0 +1,53 @@
project('random-build-examples', 'c', 'cpp', 'cython')
py_mod = import('python')
py3 = py_mod.find_installation(pure: false)
cc = meson.get_compiler('c')
cy = meson.get_compiler('cython')
# Keep synced with pyproject.toml
if not cy.version().version_compare('>=3.0.6')
error('tests requires Cython >= 3.0.6')
endif
base_cython_args = []
if cy.version().version_compare('>=3.1.0')
base_cython_args += ['-Xfreethreading_compatible=True']
endif
_numpy_abs = run_command(py3, ['-c',
'import os; os.chdir(".."); import numpy; print(os.path.abspath(numpy.get_include() + "../../.."))'],
check: true).stdout().strip()
npymath_path = _numpy_abs / '_core' / 'lib'
npy_include_path = _numpy_abs / '_core' / 'include'
npyrandom_path = _numpy_abs / 'random' / 'lib'
npymath_lib = cc.find_library('npymath', dirs: npymath_path)
npyrandom_lib = cc.find_library('npyrandom', dirs: npyrandom_path)
py3.extension_module(
'extending_distributions',
'extending_distributions.pyx',
install: false,
include_directories: [npy_include_path],
dependencies: [npyrandom_lib, npymath_lib],
cython_args: base_cython_args,
)
py3.extension_module(
'extending',
'extending.pyx',
install: false,
include_directories: [npy_include_path],
dependencies: [npyrandom_lib, npymath_lib],
cython_args: base_cython_args,
)
py3.extension_module(
'extending_cpp',
'extending_distributions.pyx',
install: false,
override_options : ['cython_language=cpp'],
cython_args: base_cython_args + ['--module-name', 'extending_cpp'],
include_directories: [npy_include_path],
dependencies: [npyrandom_lib, npymath_lib],
)

View File

@ -0,0 +1,84 @@
import numpy as np
import numba as nb
from numpy.random import PCG64
from timeit import timeit
bit_gen = PCG64()
next_d = bit_gen.cffi.next_double
state_addr = bit_gen.cffi.state_address
def normals(n, state):
out = np.empty(n)
for i in range((n + 1) // 2):
x1 = 2.0 * next_d(state) - 1.0
x2 = 2.0 * next_d(state) - 1.0
r2 = x1 * x1 + x2 * x2
while r2 >= 1.0 or r2 == 0.0:
x1 = 2.0 * next_d(state) - 1.0
x2 = 2.0 * next_d(state) - 1.0
r2 = x1 * x1 + x2 * x2
f = np.sqrt(-2.0 * np.log(r2) / r2)
out[2 * i] = f * x1
if 2 * i + 1 < n:
out[2 * i + 1] = f * x2
return out
# Compile using Numba
normalsj = nb.jit(normals, nopython=True)
# Must use state address not state with numba
n = 10000
def numbacall():
return normalsj(n, state_addr)
rg = np.random.Generator(PCG64())
def numpycall():
return rg.normal(size=n)
# Check that the functions work
r1 = numbacall()
r2 = numpycall()
assert r1.shape == (n,)
assert r1.shape == r2.shape
t1 = timeit(numbacall, number=1000)
print(f'{t1:.2f} secs for {n} PCG64 (Numba/PCG64) gaussian randoms')
t2 = timeit(numpycall, number=1000)
print(f'{t2:.2f} secs for {n} PCG64 (NumPy/PCG64) gaussian randoms')
# example 2
next_u32 = bit_gen.ctypes.next_uint32
ctypes_state = bit_gen.ctypes.state
@nb.jit(nopython=True)
def bounded_uint(lb, ub, state):
mask = delta = ub - lb
mask |= mask >> 1
mask |= mask >> 2
mask |= mask >> 4
mask |= mask >> 8
mask |= mask >> 16
val = next_u32(state) & mask
while val > delta:
val = next_u32(state) & mask
return lb + val
print(bounded_uint(323, 2394691, ctypes_state.value))
@nb.jit(nopython=True)
def bounded_uints(lb, ub, n, state):
out = np.empty(n, dtype=np.uint32)
for i in range(n):
out[i] = bounded_uint(lb, ub, state)
bounded_uints(323, 2394691, 10000000, ctypes_state.value)

View File

@ -0,0 +1,67 @@
r"""
Building the required library in this example requires a source distribution
of NumPy or clone of the NumPy git repository since distributions.c is not
included in binary distributions.
On *nix, execute in numpy/random/src/distributions
export ${PYTHON_VERSION}=3.8 # Python version
export PYTHON_INCLUDE=#path to Python's include folder, usually \
${PYTHON_HOME}/include/python${PYTHON_VERSION}m
export NUMPY_INCLUDE=#path to numpy's include folder, usually \
${PYTHON_HOME}/lib/python${PYTHON_VERSION}/site-packages/numpy/_core/include
gcc -shared -o libdistributions.so -fPIC distributions.c \
-I${NUMPY_INCLUDE} -I${PYTHON_INCLUDE}
mv libdistributions.so ../../_examples/numba/
On Windows
rem PYTHON_HOME and PYTHON_VERSION are setup dependent, this is an example
set PYTHON_HOME=c:\Anaconda
set PYTHON_VERSION=38
cl.exe /LD .\distributions.c -DDLL_EXPORT \
-I%PYTHON_HOME%\lib\site-packages\numpy\_core\include \
-I%PYTHON_HOME%\include %PYTHON_HOME%\libs\python%PYTHON_VERSION%.lib
move distributions.dll ../../_examples/numba/
"""
import os
import numba as nb
import numpy as np
from cffi import FFI
from numpy.random import PCG64
ffi = FFI()
if os.path.exists('./distributions.dll'):
lib = ffi.dlopen('./distributions.dll')
elif os.path.exists('./libdistributions.so'):
lib = ffi.dlopen('./libdistributions.so')
else:
raise RuntimeError('Required DLL/so file was not found.')
ffi.cdef("""
double random_standard_normal(void *bitgen_state);
""")
x = PCG64()
xffi = x.cffi
bit_generator = xffi.bit_generator
random_standard_normal = lib.random_standard_normal
def normals(n, bit_generator):
out = np.empty(n)
for i in range(n):
out[i] = random_standard_normal(bit_generator)
return out
normalsj = nb.jit(normals, nopython=True)
# Numba requires a memory address for void *
# Can also get address from x.ctypes.bit_generator.value
bit_generator_address = int(ffi.cast('uintptr_t', bit_generator))
norm = normalsj(1000, bit_generator_address)
print(norm[:12])

View File

@ -0,0 +1,784 @@
from collections.abc import Callable
from typing import Any, overload, TypeVar, Literal
import numpy as np
from numpy import (
dtype,
float32,
float64,
int8,
int16,
int32,
int64,
int_,
uint,
uint8,
uint16,
uint32,
uint64,
)
from numpy.random import BitGenerator, SeedSequence
from numpy._typing import (
ArrayLike,
NDArray,
_ArrayLikeFloat_co,
_ArrayLikeInt_co,
_DoubleCodes,
_DTypeLikeBool,
_DTypeLikeInt,
_DTypeLikeUInt,
_Float32Codes,
_Float64Codes,
_FloatLike_co,
_Int8Codes,
_Int16Codes,
_Int32Codes,
_Int64Codes,
_IntCodes,
_ShapeLike,
_SingleCodes,
_SupportsDType,
_UInt8Codes,
_UInt16Codes,
_UInt32Codes,
_UInt64Codes,
_UIntCodes,
)
_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
_DTypeLikeFloat32 = (
dtype[float32]
| _SupportsDType[dtype[float32]]
| type[float32]
| _Float32Codes
| _SingleCodes
)
_DTypeLikeFloat64 = (
dtype[float64]
| _SupportsDType[dtype[float64]]
| type[float]
| type[float64]
| _Float64Codes
| _DoubleCodes
)
class Generator:
def __init__(self, bit_generator: BitGenerator) -> None: ...
def __repr__(self) -> str: ...
def __str__(self) -> str: ...
def __getstate__(self) -> None: ...
def __setstate__(self, state: dict[str, Any] | None) -> None: ...
def __reduce__(self) -> tuple[
Callable[[BitGenerator], Generator],
tuple[BitGenerator],
None]: ...
@property
def bit_generator(self) -> BitGenerator: ...
def spawn(self, n_children: int) -> list[Generator]: ...
def bytes(self, length: int) -> bytes: ...
@overload
def standard_normal( # type: ignore[misc]
self,
size: None = ...,
dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ...,
out: None = ...,
) -> float: ...
@overload
def standard_normal( # type: ignore[misc]
self,
size: _ShapeLike = ...,
) -> NDArray[float64]: ...
@overload
def standard_normal( # type: ignore[misc]
self,
*,
out: NDArray[float64] = ...,
) -> NDArray[float64]: ...
@overload
def standard_normal( # type: ignore[misc]
self,
size: _ShapeLike = ...,
dtype: _DTypeLikeFloat32 = ...,
out: None | NDArray[float32] = ...,
) -> NDArray[float32]: ...
@overload
def standard_normal( # type: ignore[misc]
self,
size: _ShapeLike = ...,
dtype: _DTypeLikeFloat64 = ...,
out: None | NDArray[float64] = ...,
) -> NDArray[float64]: ...
@overload
def permutation(self, x: int, axis: int = ...) -> NDArray[int64]: ...
@overload
def permutation(self, x: ArrayLike, axis: int = ...) -> NDArray[Any]: ...
@overload
def standard_exponential( # type: ignore[misc]
self,
size: None = ...,
dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ...,
method: Literal["zig", "inv"] = ...,
out: None = ...,
) -> float: ...
@overload
def standard_exponential(
self,
size: _ShapeLike = ...,
) -> NDArray[float64]: ...
@overload
def standard_exponential(
self,
*,
out: NDArray[float64] = ...,
) -> NDArray[float64]: ...
@overload
def standard_exponential(
self,
size: _ShapeLike = ...,
*,
method: Literal["zig", "inv"] = ...,
out: None | NDArray[float64] = ...,
) -> NDArray[float64]: ...
@overload
def standard_exponential(
self,
size: _ShapeLike = ...,
dtype: _DTypeLikeFloat32 = ...,
method: Literal["zig", "inv"] = ...,
out: None | NDArray[float32] = ...,
) -> NDArray[float32]: ...
@overload
def standard_exponential(
self,
size: _ShapeLike = ...,
dtype: _DTypeLikeFloat64 = ...,
method: Literal["zig", "inv"] = ...,
out: None | NDArray[float64] = ...,
) -> NDArray[float64]: ...
@overload
def random( # type: ignore[misc]
self,
size: None = ...,
dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ...,
out: None = ...,
) -> float: ...
@overload
def random(
self,
*,
out: NDArray[float64] = ...,
) -> NDArray[float64]: ...
@overload
def random(
self,
size: _ShapeLike = ...,
*,
out: None | NDArray[float64] = ...,
) -> NDArray[float64]: ...
@overload
def random(
self,
size: _ShapeLike = ...,
dtype: _DTypeLikeFloat32 = ...,
out: None | NDArray[float32] = ...,
) -> NDArray[float32]: ...
@overload
def random(
self,
size: _ShapeLike = ...,
dtype: _DTypeLikeFloat64 = ...,
out: None | NDArray[float64] = ...,
) -> NDArray[float64]: ...
@overload
def beta(
self,
a: _FloatLike_co,
b: _FloatLike_co,
size: None = ...,
) -> float: ... # type: ignore[misc]
@overload
def beta(
self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> NDArray[float64]: ...
@overload
def exponential(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def exponential(
self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
) -> NDArray[float64]: ...
@overload
def integers( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
) -> int: ...
@overload
def integers( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
dtype: type[bool] = ...,
endpoint: bool = ...,
) -> bool: ...
@overload
def integers( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
dtype: type[np.bool] = ...,
endpoint: bool = ...,
) -> np.bool: ...
@overload
def integers( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
dtype: type[int] = ...,
endpoint: bool = ...,
) -> int: ...
@overload
def integers( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ...,
endpoint: bool = ...,
) -> uint8: ...
@overload
def integers( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ...,
endpoint: bool = ...,
) -> uint16: ...
@overload
def integers( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ...,
endpoint: bool = ...,
) -> uint32: ...
@overload
def integers( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ...,
endpoint: bool = ...,
) -> uint: ...
@overload
def integers( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ...,
endpoint: bool = ...,
) -> uint64: ...
@overload
def integers( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ...,
endpoint: bool = ...,
) -> int8: ...
@overload
def integers( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ...,
endpoint: bool = ...,
) -> int16: ...
@overload
def integers( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ...,
endpoint: bool = ...,
) -> int32: ...
@overload
def integers( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
dtype: dtype[int_] | type[int] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ...,
endpoint: bool = ...,
) -> int_: ...
@overload
def integers( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ...,
endpoint: bool = ...,
) -> int64: ...
@overload
def integers( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
) -> NDArray[int64]: ...
@overload
def integers( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: _DTypeLikeBool = ...,
endpoint: bool = ...,
) -> NDArray[np.bool]: ...
@overload
def integers( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ...,
endpoint: bool = ...,
) -> NDArray[int8]: ...
@overload
def integers( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ...,
endpoint: bool = ...,
) -> NDArray[int16]: ...
@overload
def integers( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ...,
endpoint: bool = ...,
) -> NDArray[int32]: ...
@overload
def integers( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: None | dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ...,
endpoint: bool = ...,
) -> NDArray[int64]: ...
@overload
def integers( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ...,
endpoint: bool = ...,
) -> NDArray[uint8]: ...
@overload
def integers( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ...,
endpoint: bool = ...,
) -> NDArray[uint16]: ...
@overload
def integers( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ...,
endpoint: bool = ...,
) -> NDArray[uint32]: ...
@overload
def integers( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ...,
endpoint: bool = ...,
) -> NDArray[uint64]: ...
@overload
def integers( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[int_] | type[int] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ...,
endpoint: bool = ...,
) -> NDArray[int_]: ...
@overload
def integers( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ...,
endpoint: bool = ...,
) -> NDArray[uint]: ...
# TODO: Use a TypeVar _T here to get away from Any output? Should be int->NDArray[int64], ArrayLike[_T] -> _T | NDArray[Any]
@overload
def choice(
self,
a: int,
size: None = ...,
replace: bool = ...,
p: None | _ArrayLikeFloat_co = ...,
axis: int = ...,
shuffle: bool = ...,
) -> int: ...
@overload
def choice(
self,
a: int,
size: _ShapeLike = ...,
replace: bool = ...,
p: None | _ArrayLikeFloat_co = ...,
axis: int = ...,
shuffle: bool = ...,
) -> NDArray[int64]: ...
@overload
def choice(
self,
a: ArrayLike,
size: None = ...,
replace: bool = ...,
p: None | _ArrayLikeFloat_co = ...,
axis: int = ...,
shuffle: bool = ...,
) -> Any: ...
@overload
def choice(
self,
a: ArrayLike,
size: _ShapeLike = ...,
replace: bool = ...,
p: None | _ArrayLikeFloat_co = ...,
axis: int = ...,
shuffle: bool = ...,
) -> NDArray[Any]: ...
@overload
def uniform(
self,
low: _FloatLike_co = ...,
high: _FloatLike_co = ...,
size: None = ...,
) -> float: ... # type: ignore[misc]
@overload
def uniform(
self,
low: _ArrayLikeFloat_co = ...,
high: _ArrayLikeFloat_co = ...,
size: None | _ShapeLike = ...,
) -> NDArray[float64]: ...
@overload
def normal(
self,
loc: _FloatLike_co = ...,
scale: _FloatLike_co = ...,
size: None = ...,
) -> float: ... # type: ignore[misc]
@overload
def normal(
self,
loc: _ArrayLikeFloat_co = ...,
scale: _ArrayLikeFloat_co = ...,
size: None | _ShapeLike = ...,
) -> NDArray[float64]: ...
@overload
def standard_gamma( # type: ignore[misc]
self,
shape: _FloatLike_co,
size: None = ...,
dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ...,
out: None = ...,
) -> float: ...
@overload
def standard_gamma(
self,
shape: _ArrayLikeFloat_co,
size: None | _ShapeLike = ...,
) -> NDArray[float64]: ...
@overload
def standard_gamma(
self,
shape: _ArrayLikeFloat_co,
*,
out: NDArray[float64] = ...,
) -> NDArray[float64]: ...
@overload
def standard_gamma(
self,
shape: _ArrayLikeFloat_co,
size: None | _ShapeLike = ...,
dtype: _DTypeLikeFloat32 = ...,
out: None | NDArray[float32] = ...,
) -> NDArray[float32]: ...
@overload
def standard_gamma(
self,
shape: _ArrayLikeFloat_co,
size: None | _ShapeLike = ...,
dtype: _DTypeLikeFloat64 = ...,
out: None | NDArray[float64] = ...,
) -> NDArray[float64]: ...
@overload
def gamma(self, shape: _FloatLike_co, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def gamma(
self,
shape: _ArrayLikeFloat_co,
scale: _ArrayLikeFloat_co = ...,
size: None | _ShapeLike = ...,
) -> NDArray[float64]: ...
@overload
def f(self, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def f(
self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> NDArray[float64]: ...
@overload
def noncentral_f(self, dfnum: _FloatLike_co, dfden: _FloatLike_co, nonc: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def noncentral_f(
self,
dfnum: _ArrayLikeFloat_co,
dfden: _ArrayLikeFloat_co,
nonc: _ArrayLikeFloat_co,
size: None | _ShapeLike = ...,
) -> NDArray[float64]: ...
@overload
def chisquare(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def chisquare(
self, df: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> NDArray[float64]: ...
@overload
def noncentral_chisquare(self, df: _FloatLike_co, nonc: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def noncentral_chisquare(
self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> NDArray[float64]: ...
@overload
def standard_t(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def standard_t(
self, df: _ArrayLikeFloat_co, size: None = ...
) -> NDArray[float64]: ...
@overload
def standard_t(
self, df: _ArrayLikeFloat_co, size: _ShapeLike = ...
) -> NDArray[float64]: ...
@overload
def vonmises(self, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def vonmises(
self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> NDArray[float64]: ...
@overload
def pareto(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def pareto(
self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> NDArray[float64]: ...
@overload
def weibull(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def weibull(
self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> NDArray[float64]: ...
@overload
def power(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def power(
self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> NDArray[float64]: ...
@overload
def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def standard_cauchy(self, size: _ShapeLike = ...) -> NDArray[float64]: ...
@overload
def laplace(
self,
loc: _FloatLike_co = ...,
scale: _FloatLike_co = ...,
size: None = ...,
) -> float: ... # type: ignore[misc]
@overload
def laplace(
self,
loc: _ArrayLikeFloat_co = ...,
scale: _ArrayLikeFloat_co = ...,
size: None | _ShapeLike = ...,
) -> NDArray[float64]: ...
@overload
def gumbel(
self,
loc: _FloatLike_co = ...,
scale: _FloatLike_co = ...,
size: None = ...,
) -> float: ... # type: ignore[misc]
@overload
def gumbel(
self,
loc: _ArrayLikeFloat_co = ...,
scale: _ArrayLikeFloat_co = ...,
size: None | _ShapeLike = ...,
) -> NDArray[float64]: ...
@overload
def logistic(
self,
loc: _FloatLike_co = ...,
scale: _FloatLike_co = ...,
size: None = ...,
) -> float: ... # type: ignore[misc]
@overload
def logistic(
self,
loc: _ArrayLikeFloat_co = ...,
scale: _ArrayLikeFloat_co = ...,
size: None | _ShapeLike = ...,
) -> NDArray[float64]: ...
@overload
def lognormal(
self,
mean: _FloatLike_co = ...,
sigma: _FloatLike_co = ...,
size: None = ...,
) -> float: ... # type: ignore[misc]
@overload
def lognormal(
self,
mean: _ArrayLikeFloat_co = ...,
sigma: _ArrayLikeFloat_co = ...,
size: None | _ShapeLike = ...,
) -> NDArray[float64]: ...
@overload
def rayleigh(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def rayleigh(
self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
) -> NDArray[float64]: ...
@overload
def wald(self, mean: _FloatLike_co, scale: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def wald(
self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> NDArray[float64]: ...
@overload
def triangular(
self,
left: _FloatLike_co,
mode: _FloatLike_co,
right: _FloatLike_co,
size: None = ...,
) -> float: ... # type: ignore[misc]
@overload
def triangular(
self,
left: _ArrayLikeFloat_co,
mode: _ArrayLikeFloat_co,
right: _ArrayLikeFloat_co,
size: None | _ShapeLike = ...,
) -> NDArray[float64]: ...
@overload
def binomial(self, n: int, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def binomial(
self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> NDArray[int64]: ...
@overload
def negative_binomial(self, n: _FloatLike_co, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def negative_binomial(
self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> NDArray[int64]: ...
@overload
def poisson(self, lam: _FloatLike_co = ..., size: None = ...) -> int: ... # type: ignore[misc]
@overload
def poisson(
self, lam: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
) -> NDArray[int64]: ...
@overload
def zipf(self, a: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def zipf(
self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> NDArray[int64]: ...
@overload
def geometric(self, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def geometric(
self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> NDArray[int64]: ...
@overload
def hypergeometric(self, ngood: int, nbad: int, nsample: int, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def hypergeometric(
self,
ngood: _ArrayLikeInt_co,
nbad: _ArrayLikeInt_co,
nsample: _ArrayLikeInt_co,
size: None | _ShapeLike = ...,
) -> NDArray[int64]: ...
@overload
def logseries(self, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def logseries(
self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> NDArray[int64]: ...
def multivariate_normal(
self,
mean: _ArrayLikeFloat_co,
cov: _ArrayLikeFloat_co,
size: None | _ShapeLike = ...,
check_valid: Literal["warn", "raise", "ignore"] = ...,
tol: float = ...,
*,
method: Literal["svd", "eigh", "cholesky"] = ...,
) -> NDArray[float64]: ...
def multinomial(
self, n: _ArrayLikeInt_co,
pvals: _ArrayLikeFloat_co,
size: None | _ShapeLike = ...
) -> NDArray[int64]: ...
def multivariate_hypergeometric(
self,
colors: _ArrayLikeInt_co,
nsample: int,
size: None | _ShapeLike = ...,
method: Literal["marginals", "count"] = ...,
) -> NDArray[int64]: ...
def dirichlet(
self, alpha: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> NDArray[float64]: ...
def permuted(
self, x: ArrayLike, *, axis: None | int = ..., out: None | NDArray[Any] = ...
) -> NDArray[Any]: ...
def shuffle(self, x: ArrayLike, axis: int = ...) -> None: ...
def default_rng(
seed: None | _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator = ...
) -> Generator: ...

View File

@ -0,0 +1,23 @@
from typing import TypedDict
from numpy import uint32
from numpy.typing import NDArray
from numpy.random.bit_generator import BitGenerator, SeedSequence
from numpy._typing import _ArrayLikeInt_co
class _MT19937Internal(TypedDict):
key: NDArray[uint32]
pos: int
class _MT19937State(TypedDict):
bit_generator: str
state: _MT19937Internal
class MT19937(BitGenerator):
def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ...
def _legacy_seeding(self, seed: _ArrayLikeInt_co) -> None: ...
def jumped(self, jumps: int = ...) -> MT19937: ...
@property
def state(self) -> _MT19937State: ...
@state.setter
def state(self, value: _MT19937State) -> None: ...

View File

@ -0,0 +1,42 @@
from typing import TypedDict
from numpy.random.bit_generator import BitGenerator, SeedSequence
from numpy._typing import _ArrayLikeInt_co
class _PCG64Internal(TypedDict):
state: int
inc: int
class _PCG64State(TypedDict):
bit_generator: str
state: _PCG64Internal
has_uint32: int
uinteger: int
class PCG64(BitGenerator):
def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ...
def jumped(self, jumps: int = ...) -> PCG64: ...
@property
def state(
self,
) -> _PCG64State: ...
@state.setter
def state(
self,
value: _PCG64State,
) -> None: ...
def advance(self, delta: int) -> PCG64: ...
class PCG64DXSM(BitGenerator):
def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ...
def jumped(self, jumps: int = ...) -> PCG64DXSM: ...
@property
def state(
self,
) -> _PCG64State: ...
@state.setter
def state(
self,
value: _PCG64State,
) -> None: ...
def advance(self, delta: int) -> PCG64DXSM: ...

View File

@ -0,0 +1,37 @@
from typing import TypedDict
from numpy import uint64
from numpy.typing import NDArray
from numpy.random.bit_generator import BitGenerator, SeedSequence
from numpy._typing import _ArrayLikeInt_co
class _PhiloxInternal(TypedDict):
counter: NDArray[uint64]
key: NDArray[uint64]
class _PhiloxState(TypedDict):
bit_generator: str
state: _PhiloxInternal
buffer: NDArray[uint64]
buffer_pos: int
has_uint32: int
uinteger: int
class Philox(BitGenerator):
def __init__(
self,
seed: None | _ArrayLikeInt_co | SeedSequence = ...,
counter: None | _ArrayLikeInt_co = ...,
key: None | _ArrayLikeInt_co = ...,
) -> None: ...
@property
def state(
self,
) -> _PhiloxState: ...
@state.setter
def state(
self,
value: _PhiloxState,
) -> None: ...
def jumped(self, jumps: int = ...) -> Philox: ...
def advance(self, delta: int) -> Philox: ...

View File

@ -0,0 +1,89 @@
from .bit_generator import BitGenerator
from .mtrand import RandomState
from ._philox import Philox
from ._pcg64 import PCG64, PCG64DXSM
from ._sfc64 import SFC64
from ._generator import Generator
from ._mt19937 import MT19937
BitGenerators = {'MT19937': MT19937,
'PCG64': PCG64,
'PCG64DXSM': PCG64DXSM,
'Philox': Philox,
'SFC64': SFC64,
}
def __bit_generator_ctor(bit_generator: str | type[BitGenerator] = 'MT19937'):
"""
Pickling helper function that returns a bit generator object
Parameters
----------
bit_generator : type[BitGenerator] or str
BitGenerator class or string containing the name of the BitGenerator
Returns
-------
BitGenerator
BitGenerator instance
"""
if isinstance(bit_generator, type):
bit_gen_class = bit_generator
elif bit_generator in BitGenerators:
bit_gen_class = BitGenerators[bit_generator]
else:
raise ValueError(
str(bit_generator) + ' is not a known BitGenerator module.'
)
return bit_gen_class()
def __generator_ctor(bit_generator_name="MT19937",
bit_generator_ctor=__bit_generator_ctor):
"""
Pickling helper function that returns a Generator object
Parameters
----------
bit_generator_name : str or BitGenerator
String containing the core BitGenerator's name or a
BitGenerator instance
bit_generator_ctor : callable, optional
Callable function that takes bit_generator_name as its only argument
and returns an instantized bit generator.
Returns
-------
rg : Generator
Generator using the named core BitGenerator
"""
if isinstance(bit_generator_name, BitGenerator):
return Generator(bit_generator_name)
# Legacy path that uses a bit generator name and ctor
return Generator(bit_generator_ctor(bit_generator_name))
def __randomstate_ctor(bit_generator_name="MT19937",
bit_generator_ctor=__bit_generator_ctor):
"""
Pickling helper function that returns a legacy RandomState-like object
Parameters
----------
bit_generator_name : str
String containing the core BitGenerator's name
bit_generator_ctor : callable, optional
Callable function that takes bit_generator_name as its only argument
and returns an instantized bit generator.
Returns
-------
rs : RandomState
Legacy RandomState using the named core BitGenerator
"""
if isinstance(bit_generator_name, BitGenerator):
return RandomState(bit_generator_name)
return RandomState(bit_generator_ctor(bit_generator_name))

View File

@ -0,0 +1,26 @@
from typing import TypedDict
from numpy import uint64
from numpy.random.bit_generator import BitGenerator, SeedSequence
from numpy._typing import NDArray, _ArrayLikeInt_co
class _SFC64Internal(TypedDict):
state: NDArray[uint64]
class _SFC64State(TypedDict):
bit_generator: str
state: _SFC64Internal
has_uint32: int
uinteger: int
class SFC64(BitGenerator):
def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ...
@property
def state(
self,
) -> _SFC64State: ...
@state.setter
def state(
self,
value: _SFC64State,
) -> None: ...

View File

@ -0,0 +1,35 @@
cimport numpy as np
from libc.stdint cimport uint32_t, uint64_t
cdef extern from "numpy/random/bitgen.h":
struct bitgen:
void *state
uint64_t (*next_uint64)(void *st) nogil
uint32_t (*next_uint32)(void *st) nogil
double (*next_double)(void *st) nogil
uint64_t (*next_raw)(void *st) nogil
ctypedef bitgen bitgen_t
cdef class BitGenerator():
cdef readonly object _seed_seq
cdef readonly object lock
cdef bitgen_t _bitgen
cdef readonly object _ctypes
cdef readonly object _cffi
cdef readonly object capsule
cdef class SeedSequence():
cdef readonly object entropy
cdef readonly tuple spawn_key
cdef readonly Py_ssize_t pool_size
cdef readonly object pool
cdef readonly uint32_t n_children_spawned
cdef mix_entropy(self, np.ndarray[np.npy_uint32, ndim=1] mixer,
np.ndarray[np.npy_uint32, ndim=1] entropy_array)
cdef get_assembled_entropy(self)
cdef class SeedlessSequence():
pass

View File

@ -0,0 +1,124 @@
import abc
from threading import Lock
from collections.abc import Callable, Mapping, Sequence
from typing import (
Any,
NamedTuple,
TypedDict,
TypeVar,
overload,
Literal,
)
from numpy import dtype, uint32, uint64
from numpy._typing import (
NDArray,
_ArrayLikeInt_co,
_ShapeLike,
_SupportsDType,
_UInt32Codes,
_UInt64Codes,
)
_T = TypeVar("_T")
_DTypeLikeUint32 = (
dtype[uint32]
| _SupportsDType[dtype[uint32]]
| type[uint32]
| _UInt32Codes
)
_DTypeLikeUint64 = (
dtype[uint64]
| _SupportsDType[dtype[uint64]]
| type[uint64]
| _UInt64Codes
)
class _SeedSeqState(TypedDict):
entropy: None | int | Sequence[int]
spawn_key: tuple[int, ...]
pool_size: int
n_children_spawned: int
class _Interface(NamedTuple):
state_address: Any
state: Any
next_uint64: Any
next_uint32: Any
next_double: Any
bit_generator: Any
class ISeedSequence(abc.ABC):
@abc.abstractmethod
def generate_state(
self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ...
) -> NDArray[uint32 | uint64]: ...
class ISpawnableSeedSequence(ISeedSequence):
@abc.abstractmethod
def spawn(self: _T, n_children: int) -> list[_T]: ...
class SeedlessSeedSequence(ISpawnableSeedSequence):
def generate_state(
self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ...
) -> NDArray[uint32 | uint64]: ...
def spawn(self: _T, n_children: int) -> list[_T]: ...
class SeedSequence(ISpawnableSeedSequence):
entropy: None | int | Sequence[int]
spawn_key: tuple[int, ...]
pool_size: int
n_children_spawned: int
pool: NDArray[uint32]
def __init__(
self,
entropy: None | int | Sequence[int] | _ArrayLikeInt_co = ...,
*,
spawn_key: Sequence[int] = ...,
pool_size: int = ...,
n_children_spawned: int = ...,
) -> None: ...
def __repr__(self) -> str: ...
@property
def state(
self,
) -> _SeedSeqState: ...
def generate_state(
self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ...
) -> NDArray[uint32 | uint64]: ...
def spawn(self, n_children: int) -> list[SeedSequence]: ...
class BitGenerator(abc.ABC):
lock: Lock
def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ...
def __getstate__(self) -> tuple[dict[str, Any], ISeedSequence]: ...
def __setstate__(
self, state_seed_seq: dict[str, Any] | tuple[dict[str, Any], ISeedSequence]
) -> None: ...
def __reduce__(
self,
) -> tuple[
Callable[[str], BitGenerator],
tuple[str],
tuple[dict[str, Any], ISeedSequence]
]: ...
@abc.abstractmethod
@property
def state(self) -> Mapping[str, Any]: ...
@state.setter
def state(self, value: Mapping[str, Any]) -> None: ...
@property
def seed_seq(self) -> ISeedSequence: ...
def spawn(self, n_children: int) -> list[BitGenerator]: ...
@overload
def random_raw(self, size: None = ..., output: Literal[True] = ...) -> int: ... # type: ignore[misc]
@overload
def random_raw(self, size: _ShapeLike = ..., output: Literal[True] = ...) -> NDArray[uint64]: ... # type: ignore[misc]
@overload
def random_raw(self, size: None | _ShapeLike = ..., output: Literal[False] = ...) -> None: ... # type: ignore[misc]
def _benchmark(self, cnt: int, method: str = ...) -> None: ...
@property
def ctypes(self) -> _Interface: ...
@property
def cffi(self) -> _Interface: ...

View File

@ -0,0 +1,120 @@
#!python
#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3
from numpy cimport npy_intp
from libc.stdint cimport (uint64_t, int32_t, int64_t)
from numpy.random cimport bitgen_t
cdef extern from "numpy/random/distributions.h":
struct s_binomial_t:
int has_binomial
double psave
int64_t nsave
double r
double q
double fm
int64_t m
double p1
double xm
double xl
double xr
double c
double laml
double lamr
double p2
double p3
double p4
ctypedef s_binomial_t binomial_t
float random_standard_uniform_f(bitgen_t *bitgen_state) nogil
double random_standard_uniform(bitgen_t *bitgen_state) nogil
void random_standard_uniform_fill(bitgen_t* bitgen_state, npy_intp cnt, double *out) nogil
void random_standard_uniform_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out) nogil
double random_standard_exponential(bitgen_t *bitgen_state) nogil
float random_standard_exponential_f(bitgen_t *bitgen_state) nogil
void random_standard_exponential_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil
void random_standard_exponential_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out) nogil
void random_standard_exponential_inv_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil
void random_standard_exponential_inv_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out) nogil
double random_standard_normal(bitgen_t* bitgen_state) nogil
float random_standard_normal_f(bitgen_t *bitgen_state) nogil
void random_standard_normal_fill(bitgen_t *bitgen_state, npy_intp count, double *out) nogil
void random_standard_normal_fill_f(bitgen_t *bitgen_state, npy_intp count, float *out) nogil
double random_standard_gamma(bitgen_t *bitgen_state, double shape) nogil
float random_standard_gamma_f(bitgen_t *bitgen_state, float shape) nogil
float random_standard_uniform_f(bitgen_t *bitgen_state) nogil
void random_standard_uniform_fill_f(bitgen_t* bitgen_state, npy_intp cnt, float *out) nogil
float random_standard_normal_f(bitgen_t* bitgen_state) nogil
float random_standard_gamma_f(bitgen_t *bitgen_state, float shape) nogil
int64_t random_positive_int64(bitgen_t *bitgen_state) nogil
int32_t random_positive_int32(bitgen_t *bitgen_state) nogil
int64_t random_positive_int(bitgen_t *bitgen_state) nogil
uint64_t random_uint(bitgen_t *bitgen_state) nogil
double random_normal(bitgen_t *bitgen_state, double loc, double scale) nogil
double random_gamma(bitgen_t *bitgen_state, double shape, double scale) nogil
float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale) nogil
double random_exponential(bitgen_t *bitgen_state, double scale) nogil
double random_uniform(bitgen_t *bitgen_state, double lower, double range) nogil
double random_beta(bitgen_t *bitgen_state, double a, double b) nogil
double random_chisquare(bitgen_t *bitgen_state, double df) nogil
double random_f(bitgen_t *bitgen_state, double dfnum, double dfden) nogil
double random_standard_cauchy(bitgen_t *bitgen_state) nogil
double random_pareto(bitgen_t *bitgen_state, double a) nogil
double random_weibull(bitgen_t *bitgen_state, double a) nogil
double random_power(bitgen_t *bitgen_state, double a) nogil
double random_laplace(bitgen_t *bitgen_state, double loc, double scale) nogil
double random_gumbel(bitgen_t *bitgen_state, double loc, double scale) nogil
double random_logistic(bitgen_t *bitgen_state, double loc, double scale) nogil
double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma) nogil
double random_rayleigh(bitgen_t *bitgen_state, double mode) nogil
double random_standard_t(bitgen_t *bitgen_state, double df) nogil
double random_noncentral_chisquare(bitgen_t *bitgen_state, double df,
double nonc) nogil
double random_noncentral_f(bitgen_t *bitgen_state, double dfnum,
double dfden, double nonc) nogil
double random_wald(bitgen_t *bitgen_state, double mean, double scale) nogil
double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa) nogil
double random_triangular(bitgen_t *bitgen_state, double left, double mode,
double right) nogil
int64_t random_poisson(bitgen_t *bitgen_state, double lam) nogil
int64_t random_negative_binomial(bitgen_t *bitgen_state, double n, double p) nogil
int64_t random_binomial(bitgen_t *bitgen_state, double p, int64_t n, binomial_t *binomial) nogil
int64_t random_logseries(bitgen_t *bitgen_state, double p) nogil
int64_t random_geometric_search(bitgen_t *bitgen_state, double p) nogil
int64_t random_geometric_inversion(bitgen_t *bitgen_state, double p) nogil
int64_t random_geometric(bitgen_t *bitgen_state, double p) nogil
int64_t random_zipf(bitgen_t *bitgen_state, double a) nogil
int64_t random_hypergeometric(bitgen_t *bitgen_state, int64_t good, int64_t bad,
int64_t sample) nogil
uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max) nogil
# Generate random uint64 numbers in closed interval [off, off + rng].
uint64_t random_bounded_uint64(bitgen_t *bitgen_state,
uint64_t off, uint64_t rng,
uint64_t mask, bint use_masked) nogil
void random_multinomial(bitgen_t *bitgen_state, int64_t n, int64_t *mnix,
double *pix, npy_intp d, binomial_t *binomial) nogil
int random_multivariate_hypergeometric_count(bitgen_t *bitgen_state,
int64_t total,
size_t num_colors, int64_t *colors,
int64_t nsample,
size_t num_variates, int64_t *variates) nogil
void random_multivariate_hypergeometric_marginals(bitgen_t *bitgen_state,
int64_t total,
size_t num_colors, int64_t *colors,
int64_t nsample,
size_t num_variates, int64_t *variates) nogil

View File

@ -0,0 +1,681 @@
import builtins
from collections.abc import Callable
from typing import Any, overload, Literal
import numpy as np
from numpy import (
dtype,
float32,
float64,
int8,
int16,
int32,
int64,
int_,
long,
uint8,
uint16,
uint32,
uint64,
uint,
ulong,
)
from numpy.random.bit_generator import BitGenerator
from numpy._typing import (
ArrayLike,
NDArray,
_ArrayLikeFloat_co,
_ArrayLikeInt_co,
_DoubleCodes,
_DTypeLikeBool,
_DTypeLikeInt,
_DTypeLikeUInt,
_Float32Codes,
_Float64Codes,
_Int8Codes,
_Int16Codes,
_Int32Codes,
_Int64Codes,
_IntCodes,
_LongCodes,
_ShapeLike,
_SingleCodes,
_SupportsDType,
_UInt8Codes,
_UInt16Codes,
_UInt32Codes,
_UInt64Codes,
_UIntCodes,
_ULongCodes,
)
_DTypeLikeFloat32 = (
dtype[float32]
| _SupportsDType[dtype[float32]]
| type[float32]
| _Float32Codes
| _SingleCodes
)
_DTypeLikeFloat64 = (
dtype[float64]
| _SupportsDType[dtype[float64]]
| type[float]
| type[float64]
| _Float64Codes
| _DoubleCodes
)
class RandomState:
_bit_generator: BitGenerator
def __init__(self, seed: None | _ArrayLikeInt_co | BitGenerator = ...) -> None: ...
def __repr__(self) -> str: ...
def __str__(self) -> str: ...
def __getstate__(self) -> dict[str, Any]: ...
def __setstate__(self, state: dict[str, Any]) -> None: ...
def __reduce__(self) -> tuple[Callable[[BitGenerator], RandomState], tuple[BitGenerator], dict[str, Any]]: ...
def seed(self, seed: None | _ArrayLikeFloat_co = ...) -> None: ...
@overload
def get_state(self, legacy: Literal[False] = ...) -> dict[str, Any]: ...
@overload
def get_state(
self, legacy: Literal[True] = ...
) -> dict[str, Any] | tuple[str, NDArray[uint32], int, int, float]: ...
def set_state(
self, state: dict[str, Any] | tuple[str, NDArray[uint32], int, int, float]
) -> None: ...
@overload
def random_sample(self, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def random_sample(self, size: _ShapeLike) -> NDArray[float64]: ...
@overload
def random(self, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def random(self, size: _ShapeLike) -> NDArray[float64]: ...
@overload
def beta(self, a: float, b: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def beta(
self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> NDArray[float64]: ...
@overload
def exponential(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def exponential(
self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
) -> NDArray[float64]: ...
@overload
def standard_exponential(self, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def standard_exponential(self, size: _ShapeLike) -> NDArray[float64]: ...
@overload
def tomaxint(self, size: None = ...) -> int: ... # type: ignore[misc]
@overload
# Generates long values, but stores it in a 64bit int:
def tomaxint(self, size: _ShapeLike) -> NDArray[int64]: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
) -> int: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
dtype: type[bool] = ...,
) -> bool: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
dtype: type[np.bool] = ...,
) -> np.bool: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
dtype: type[int] = ...,
) -> int: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ...,
) -> uint8: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ...,
) -> uint16: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ...,
) -> uint32: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ...,
) -> uint: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ...,
) -> ulong: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ...,
) -> uint64: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ...,
) -> int8: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ...,
) -> int16: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ...,
) -> int32: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
dtype: dtype[int_] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ...,
) -> int_: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
dtype: dtype[long] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ...,
) -> long: ...
@overload
def randint( # type: ignore[misc]
self,
low: int,
high: None | int = ...,
size: None = ...,
dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ...,
) -> int64: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
) -> NDArray[long]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: _DTypeLikeBool = ...,
) -> NDArray[np.bool]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ...,
) -> NDArray[int8]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ...,
) -> NDArray[int16]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ...,
) -> NDArray[int32]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: None | dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ...,
) -> NDArray[int64]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ...,
) -> NDArray[uint8]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ...,
) -> NDArray[uint16]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ...,
) -> NDArray[uint32]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ...,
) -> NDArray[uint64]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[long] | type[int] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ...,
) -> NDArray[long]: ...
@overload
def randint( # type: ignore[misc]
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ...,
) -> NDArray[ulong]: ...
def bytes(self, length: int) -> builtins.bytes: ...
@overload
def choice(
self,
a: int,
size: None = ...,
replace: bool = ...,
p: None | _ArrayLikeFloat_co = ...,
) -> int: ...
@overload
def choice(
self,
a: int,
size: _ShapeLike = ...,
replace: bool = ...,
p: None | _ArrayLikeFloat_co = ...,
) -> NDArray[long]: ...
@overload
def choice(
self,
a: ArrayLike,
size: None = ...,
replace: bool = ...,
p: None | _ArrayLikeFloat_co = ...,
) -> Any: ...
@overload
def choice(
self,
a: ArrayLike,
size: _ShapeLike = ...,
replace: bool = ...,
p: None | _ArrayLikeFloat_co = ...,
) -> NDArray[Any]: ...
@overload
def uniform(self, low: float = ..., high: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def uniform(
self,
low: _ArrayLikeFloat_co = ...,
high: _ArrayLikeFloat_co = ...,
size: None | _ShapeLike = ...,
) -> NDArray[float64]: ...
@overload
def rand(self) -> float: ...
@overload
def rand(self, *args: int) -> NDArray[float64]: ...
@overload
def randn(self) -> float: ...
@overload
def randn(self, *args: int) -> NDArray[float64]: ...
@overload
def random_integers(self, low: int, high: None | int = ..., size: None = ...) -> int: ... # type: ignore[misc]
@overload
def random_integers(
self,
low: _ArrayLikeInt_co,
high: None | _ArrayLikeInt_co = ...,
size: None | _ShapeLike = ...,
) -> NDArray[long]: ...
@overload
def standard_normal(self, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def standard_normal( # type: ignore[misc]
self, size: _ShapeLike = ...
) -> NDArray[float64]: ...
@overload
def normal(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def normal(
self,
loc: _ArrayLikeFloat_co = ...,
scale: _ArrayLikeFloat_co = ...,
size: None | _ShapeLike = ...,
) -> NDArray[float64]: ...
@overload
def standard_gamma( # type: ignore[misc]
self,
shape: float,
size: None = ...,
) -> float: ...
@overload
def standard_gamma(
self,
shape: _ArrayLikeFloat_co,
size: None | _ShapeLike = ...,
) -> NDArray[float64]: ...
@overload
def gamma(self, shape: float, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def gamma(
self,
shape: _ArrayLikeFloat_co,
scale: _ArrayLikeFloat_co = ...,
size: None | _ShapeLike = ...,
) -> NDArray[float64]: ...
@overload
def f(self, dfnum: float, dfden: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def f(
self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> NDArray[float64]: ...
@overload
def noncentral_f(self, dfnum: float, dfden: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def noncentral_f(
self,
dfnum: _ArrayLikeFloat_co,
dfden: _ArrayLikeFloat_co,
nonc: _ArrayLikeFloat_co,
size: None | _ShapeLike = ...,
) -> NDArray[float64]: ...
@overload
def chisquare(self, df: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def chisquare(
self, df: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> NDArray[float64]: ...
@overload
def noncentral_chisquare(self, df: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def noncentral_chisquare(
self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> NDArray[float64]: ...
@overload
def standard_t(self, df: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def standard_t(
self, df: _ArrayLikeFloat_co, size: None = ...
) -> NDArray[float64]: ...
@overload
def standard_t(
self, df: _ArrayLikeFloat_co, size: _ShapeLike = ...
) -> NDArray[float64]: ...
@overload
def vonmises(self, mu: float, kappa: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def vonmises(
self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> NDArray[float64]: ...
@overload
def pareto(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def pareto(
self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> NDArray[float64]: ...
@overload
def weibull(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def weibull(
self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> NDArray[float64]: ...
@overload
def power(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def power(
self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> NDArray[float64]: ...
@overload
def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def standard_cauchy(self, size: _ShapeLike = ...) -> NDArray[float64]: ...
@overload
def laplace(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def laplace(
self,
loc: _ArrayLikeFloat_co = ...,
scale: _ArrayLikeFloat_co = ...,
size: None | _ShapeLike = ...,
) -> NDArray[float64]: ...
@overload
def gumbel(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def gumbel(
self,
loc: _ArrayLikeFloat_co = ...,
scale: _ArrayLikeFloat_co = ...,
size: None | _ShapeLike = ...,
) -> NDArray[float64]: ...
@overload
def logistic(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def logistic(
self,
loc: _ArrayLikeFloat_co = ...,
scale: _ArrayLikeFloat_co = ...,
size: None | _ShapeLike = ...,
) -> NDArray[float64]: ...
@overload
def lognormal(self, mean: float = ..., sigma: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def lognormal(
self,
mean: _ArrayLikeFloat_co = ...,
sigma: _ArrayLikeFloat_co = ...,
size: None | _ShapeLike = ...,
) -> NDArray[float64]: ...
@overload
def rayleigh(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
@overload
def rayleigh(
self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
) -> NDArray[float64]: ...
@overload
def wald(self, mean: float, scale: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def wald(
self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> NDArray[float64]: ...
@overload
def triangular(self, left: float, mode: float, right: float, size: None = ...) -> float: ... # type: ignore[misc]
@overload
def triangular(
self,
left: _ArrayLikeFloat_co,
mode: _ArrayLikeFloat_co,
right: _ArrayLikeFloat_co,
size: None | _ShapeLike = ...,
) -> NDArray[float64]: ...
@overload
def binomial(self, n: int, p: float, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def binomial(
self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> NDArray[long]: ...
@overload
def negative_binomial(self, n: float, p: float, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def negative_binomial(
self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> NDArray[long]: ...
@overload
def poisson(self, lam: float = ..., size: None = ...) -> int: ... # type: ignore[misc]
@overload
def poisson(
self, lam: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
) -> NDArray[long]: ...
@overload
def zipf(self, a: float, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def zipf(
self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> NDArray[long]: ...
@overload
def geometric(self, p: float, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def geometric(
self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> NDArray[long]: ...
@overload
def hypergeometric(self, ngood: int, nbad: int, nsample: int, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def hypergeometric(
self,
ngood: _ArrayLikeInt_co,
nbad: _ArrayLikeInt_co,
nsample: _ArrayLikeInt_co,
size: None | _ShapeLike = ...,
) -> NDArray[long]: ...
@overload
def logseries(self, p: float, size: None = ...) -> int: ... # type: ignore[misc]
@overload
def logseries(
self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> NDArray[long]: ...
def multivariate_normal(
self,
mean: _ArrayLikeFloat_co,
cov: _ArrayLikeFloat_co,
size: None | _ShapeLike = ...,
check_valid: Literal["warn", "raise", "ignore"] = ...,
tol: float = ...,
) -> NDArray[float64]: ...
def multinomial(
self, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> NDArray[long]: ...
def dirichlet(
self, alpha: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
) -> NDArray[float64]: ...
def shuffle(self, x: ArrayLike) -> None: ...
@overload
def permutation(self, x: int) -> NDArray[long]: ...
@overload
def permutation(self, x: ArrayLike) -> NDArray[Any]: ...
_rand: RandomState
beta = _rand.beta
binomial = _rand.binomial
bytes = _rand.bytes
chisquare = _rand.chisquare
choice = _rand.choice
dirichlet = _rand.dirichlet
exponential = _rand.exponential
f = _rand.f
gamma = _rand.gamma
get_state = _rand.get_state
geometric = _rand.geometric
gumbel = _rand.gumbel
hypergeometric = _rand.hypergeometric
laplace = _rand.laplace
logistic = _rand.logistic
lognormal = _rand.lognormal
logseries = _rand.logseries
multinomial = _rand.multinomial
multivariate_normal = _rand.multivariate_normal
negative_binomial = _rand.negative_binomial
noncentral_chisquare = _rand.noncentral_chisquare
noncentral_f = _rand.noncentral_f
normal = _rand.normal
pareto = _rand.pareto
permutation = _rand.permutation
poisson = _rand.poisson
power = _rand.power
rand = _rand.rand
randint = _rand.randint
randn = _rand.randn
random = _rand.random
random_integers = _rand.random_integers
random_sample = _rand.random_sample
rayleigh = _rand.rayleigh
seed = _rand.seed
set_state = _rand.set_state
shuffle = _rand.shuffle
standard_cauchy = _rand.standard_cauchy
standard_exponential = _rand.standard_exponential
standard_gamma = _rand.standard_gamma
standard_normal = _rand.standard_normal
standard_t = _rand.standard_t
triangular = _rand.triangular
uniform = _rand.uniform
vonmises = _rand.vonmises
wald = _rand.wald
weibull = _rand.weibull
zipf = _rand.zipf
# Two legacy that are trivial wrappers around random_sample
sample = _rand.random_sample
ranf = _rand.random_sample
def set_bit_generator(bitgen: BitGenerator) -> None:
...
def get_bit_generator() -> BitGenerator:
...

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,561 @@
import os
from os.path import join
import sys
import numpy as np
from numpy.testing import (assert_equal, assert_allclose, assert_array_equal,
assert_raises)
import pytest
from numpy.random import (
Generator, MT19937, PCG64, PCG64DXSM, Philox, RandomState, SeedSequence,
SFC64, default_rng
)
from numpy.random._common import interface
try:
import cffi # noqa: F401
MISSING_CFFI = False
except ImportError:
MISSING_CFFI = True
try:
import ctypes # noqa: F401
MISSING_CTYPES = False
except ImportError:
MISSING_CTYPES = False
if sys.flags.optimize > 1:
# no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1
# cffi cannot succeed
MISSING_CFFI = True
pwd = os.path.dirname(os.path.abspath(__file__))
def assert_state_equal(actual, target):
for key in actual:
if isinstance(actual[key], dict):
assert_state_equal(actual[key], target[key])
elif isinstance(actual[key], np.ndarray):
assert_array_equal(actual[key], target[key])
else:
assert actual[key] == target[key]
def uint32_to_float32(u):
return ((u >> np.uint32(8)) * (1.0 / 2**24)).astype(np.float32)
def uniform32_from_uint64(x):
x = np.uint64(x)
upper = np.array(x >> np.uint64(32), dtype=np.uint32)
lower = np.uint64(0xffffffff)
lower = np.array(x & lower, dtype=np.uint32)
joined = np.column_stack([lower, upper]).ravel()
return uint32_to_float32(joined)
def uniform32_from_uint53(x):
x = np.uint64(x) >> np.uint64(16)
x = np.uint32(x & np.uint64(0xffffffff))
return uint32_to_float32(x)
def uniform32_from_uint32(x):
return uint32_to_float32(x)
def uniform32_from_uint(x, bits):
if bits == 64:
return uniform32_from_uint64(x)
elif bits == 53:
return uniform32_from_uint53(x)
elif bits == 32:
return uniform32_from_uint32(x)
else:
raise NotImplementedError
def uniform_from_uint(x, bits):
if bits in (64, 63, 53):
return uniform_from_uint64(x)
elif bits == 32:
return uniform_from_uint32(x)
def uniform_from_uint64(x):
return (x >> np.uint64(11)) * (1.0 / 9007199254740992.0)
def uniform_from_uint32(x):
out = np.empty(len(x) // 2)
for i in range(0, len(x), 2):
a = x[i] >> 5
b = x[i + 1] >> 6
out[i // 2] = (a * 67108864.0 + b) / 9007199254740992.0
return out
def uniform_from_dsfmt(x):
return x.view(np.double) - 1.0
def gauss_from_uint(x, n, bits):
if bits in (64, 63):
doubles = uniform_from_uint64(x)
elif bits == 32:
doubles = uniform_from_uint32(x)
else: # bits == 'dsfmt'
doubles = uniform_from_dsfmt(x)
gauss = []
loc = 0
x1 = x2 = 0.0
while len(gauss) < n:
r2 = 2
while r2 >= 1.0 or r2 == 0.0:
x1 = 2.0 * doubles[loc] - 1.0
x2 = 2.0 * doubles[loc + 1] - 1.0
r2 = x1 * x1 + x2 * x2
loc += 2
f = np.sqrt(-2.0 * np.log(r2) / r2)
gauss.append(f * x2)
gauss.append(f * x1)
return gauss[:n]
def test_seedsequence():
from numpy.random.bit_generator import (ISeedSequence,
ISpawnableSeedSequence,
SeedlessSeedSequence)
s1 = SeedSequence(range(10), spawn_key=(1, 2), pool_size=6)
s1.spawn(10)
s2 = SeedSequence(**s1.state)
assert_equal(s1.state, s2.state)
assert_equal(s1.n_children_spawned, s2.n_children_spawned)
# The interfaces cannot be instantiated themselves.
assert_raises(TypeError, ISeedSequence)
assert_raises(TypeError, ISpawnableSeedSequence)
dummy = SeedlessSeedSequence()
assert_raises(NotImplementedError, dummy.generate_state, 10)
assert len(dummy.spawn(10)) == 10
def test_generator_spawning():
""" Test spawning new generators and bit_generators directly.
"""
rng = np.random.default_rng()
seq = rng.bit_generator.seed_seq
new_ss = seq.spawn(5)
expected_keys = [seq.spawn_key + (i,) for i in range(5)]
assert [c.spawn_key for c in new_ss] == expected_keys
new_bgs = rng.bit_generator.spawn(5)
expected_keys = [seq.spawn_key + (i,) for i in range(5, 10)]
assert [bg.seed_seq.spawn_key for bg in new_bgs] == expected_keys
new_rngs = rng.spawn(5)
expected_keys = [seq.spawn_key + (i,) for i in range(10, 15)]
found_keys = [rng.bit_generator.seed_seq.spawn_key for rng in new_rngs]
assert found_keys == expected_keys
# Sanity check that streams are actually different:
assert new_rngs[0].uniform() != new_rngs[1].uniform()
def test_non_spawnable():
from numpy.random.bit_generator import ISeedSequence
class FakeSeedSequence:
def generate_state(self, n_words, dtype=np.uint32):
return np.zeros(n_words, dtype=dtype)
ISeedSequence.register(FakeSeedSequence)
rng = np.random.default_rng(FakeSeedSequence())
with pytest.raises(TypeError, match="The underlying SeedSequence"):
rng.spawn(5)
with pytest.raises(TypeError, match="The underlying SeedSequence"):
rng.bit_generator.spawn(5)
class Base:
dtype = np.uint64
data2 = data1 = {}
@classmethod
def setup_class(cls):
cls.bit_generator = PCG64
cls.bits = 64
cls.dtype = np.uint64
cls.seed_error_type = TypeError
cls.invalid_init_types = []
cls.invalid_init_values = []
@classmethod
def _read_csv(cls, filename):
with open(filename) as csv:
seed = csv.readline()
seed = seed.split(',')
seed = [int(s.strip(), 0) for s in seed[1:]]
data = []
for line in csv:
data.append(int(line.split(',')[-1].strip(), 0))
return {'seed': seed, 'data': np.array(data, dtype=cls.dtype)}
def test_raw(self):
bit_generator = self.bit_generator(*self.data1['seed'])
uints = bit_generator.random_raw(1000)
assert_equal(uints, self.data1['data'])
bit_generator = self.bit_generator(*self.data1['seed'])
uints = bit_generator.random_raw()
assert_equal(uints, self.data1['data'][0])
bit_generator = self.bit_generator(*self.data2['seed'])
uints = bit_generator.random_raw(1000)
assert_equal(uints, self.data2['data'])
def test_random_raw(self):
bit_generator = self.bit_generator(*self.data1['seed'])
uints = bit_generator.random_raw(output=False)
assert uints is None
uints = bit_generator.random_raw(1000, output=False)
assert uints is None
def test_gauss_inv(self):
n = 25
rs = RandomState(self.bit_generator(*self.data1['seed']))
gauss = rs.standard_normal(n)
assert_allclose(gauss,
gauss_from_uint(self.data1['data'], n, self.bits))
rs = RandomState(self.bit_generator(*self.data2['seed']))
gauss = rs.standard_normal(25)
assert_allclose(gauss,
gauss_from_uint(self.data2['data'], n, self.bits))
def test_uniform_double(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
vals = uniform_from_uint(self.data1['data'], self.bits)
uniforms = rs.random(len(vals))
assert_allclose(uniforms, vals)
assert_equal(uniforms.dtype, np.float64)
rs = Generator(self.bit_generator(*self.data2['seed']))
vals = uniform_from_uint(self.data2['data'], self.bits)
uniforms = rs.random(len(vals))
assert_allclose(uniforms, vals)
assert_equal(uniforms.dtype, np.float64)
def test_uniform_float(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
vals = uniform32_from_uint(self.data1['data'], self.bits)
uniforms = rs.random(len(vals), dtype=np.float32)
assert_allclose(uniforms, vals)
assert_equal(uniforms.dtype, np.float32)
rs = Generator(self.bit_generator(*self.data2['seed']))
vals = uniform32_from_uint(self.data2['data'], self.bits)
uniforms = rs.random(len(vals), dtype=np.float32)
assert_allclose(uniforms, vals)
assert_equal(uniforms.dtype, np.float32)
def test_repr(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
assert 'Generator' in repr(rs)
assert f'{id(rs):#x}'.upper().replace('X', 'x') in repr(rs)
def test_str(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
assert 'Generator' in str(rs)
assert str(self.bit_generator.__name__) in str(rs)
assert f'{id(rs):#x}'.upper().replace('X', 'x') not in str(rs)
def test_pickle(self):
import pickle
bit_generator = self.bit_generator(*self.data1['seed'])
state = bit_generator.state
bitgen_pkl = pickle.dumps(bit_generator)
reloaded = pickle.loads(bitgen_pkl)
reloaded_state = reloaded.state
assert_array_equal(Generator(bit_generator).standard_normal(1000),
Generator(reloaded).standard_normal(1000))
assert bit_generator is not reloaded
assert_state_equal(reloaded_state, state)
ss = SeedSequence(100)
aa = pickle.loads(pickle.dumps(ss))
assert_equal(ss.state, aa.state)
def test_pickle_preserves_seed_sequence(self):
# GH 26234
# Add explicit test that bit generators preserve seed sequences
import pickle
bit_generator = self.bit_generator(*self.data1['seed'])
ss = bit_generator.seed_seq
bg_plk = pickle.loads(pickle.dumps(bit_generator))
ss_plk = bg_plk.seed_seq
assert_equal(ss.state, ss_plk.state)
assert_equal(ss.pool, ss_plk.pool)
bit_generator.seed_seq.spawn(10)
bg_plk = pickle.loads(pickle.dumps(bit_generator))
ss_plk = bg_plk.seed_seq
assert_equal(ss.state, ss_plk.state)
assert_equal(ss.n_children_spawned, ss_plk.n_children_spawned)
def test_invalid_state_type(self):
bit_generator = self.bit_generator(*self.data1['seed'])
with pytest.raises(TypeError):
bit_generator.state = {'1'}
def test_invalid_state_value(self):
bit_generator = self.bit_generator(*self.data1['seed'])
state = bit_generator.state
state['bit_generator'] = 'otherBitGenerator'
with pytest.raises(ValueError):
bit_generator.state = state
def test_invalid_init_type(self):
bit_generator = self.bit_generator
for st in self.invalid_init_types:
with pytest.raises(TypeError):
bit_generator(*st)
def test_invalid_init_values(self):
bit_generator = self.bit_generator
for st in self.invalid_init_values:
with pytest.raises((ValueError, OverflowError)):
bit_generator(*st)
def test_benchmark(self):
bit_generator = self.bit_generator(*self.data1['seed'])
bit_generator._benchmark(1)
bit_generator._benchmark(1, 'double')
with pytest.raises(ValueError):
bit_generator._benchmark(1, 'int32')
@pytest.mark.skipif(MISSING_CFFI, reason='cffi not available')
def test_cffi(self):
bit_generator = self.bit_generator(*self.data1['seed'])
cffi_interface = bit_generator.cffi
assert isinstance(cffi_interface, interface)
other_cffi_interface = bit_generator.cffi
assert other_cffi_interface is cffi_interface
@pytest.mark.skipif(MISSING_CTYPES, reason='ctypes not available')
def test_ctypes(self):
bit_generator = self.bit_generator(*self.data1['seed'])
ctypes_interface = bit_generator.ctypes
assert isinstance(ctypes_interface, interface)
other_ctypes_interface = bit_generator.ctypes
assert other_ctypes_interface is ctypes_interface
def test_getstate(self):
bit_generator = self.bit_generator(*self.data1['seed'])
state = bit_generator.state
alt_state = bit_generator.__getstate__()
assert isinstance(alt_state, tuple)
assert_state_equal(state, alt_state[0])
assert isinstance(alt_state[1], SeedSequence)
class TestPhilox(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = Philox
cls.bits = 64
cls.dtype = np.uint64
cls.data1 = cls._read_csv(
join(pwd, './data/philox-testset-1.csv'))
cls.data2 = cls._read_csv(
join(pwd, './data/philox-testset-2.csv'))
cls.seed_error_type = TypeError
cls.invalid_init_types = []
cls.invalid_init_values = [(1, None, 1), (-1,), (None, None, 2 ** 257 + 1)]
def test_set_key(self):
bit_generator = self.bit_generator(*self.data1['seed'])
state = bit_generator.state
keyed = self.bit_generator(counter=state['state']['counter'],
key=state['state']['key'])
assert_state_equal(bit_generator.state, keyed.state)
class TestPCG64(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = PCG64
cls.bits = 64
cls.dtype = np.uint64
cls.data1 = cls._read_csv(join(pwd, './data/pcg64-testset-1.csv'))
cls.data2 = cls._read_csv(join(pwd, './data/pcg64-testset-2.csv'))
cls.seed_error_type = (ValueError, TypeError)
cls.invalid_init_types = [(3.2,), ([None],), (1, None)]
cls.invalid_init_values = [(-1,)]
def test_advance_symmetry(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
state = rs.bit_generator.state
step = -0x9e3779b97f4a7c150000000000000000
rs.bit_generator.advance(step)
val_neg = rs.integers(10)
rs.bit_generator.state = state
rs.bit_generator.advance(2**128 + step)
val_pos = rs.integers(10)
rs.bit_generator.state = state
rs.bit_generator.advance(10 * 2**128 + step)
val_big = rs.integers(10)
assert val_neg == val_pos
assert val_big == val_pos
def test_advange_large(self):
rs = Generator(self.bit_generator(38219308213743))
pcg = rs.bit_generator
state = pcg.state["state"]
initial_state = 287608843259529770491897792873167516365
assert state["state"] == initial_state
pcg.advance(sum(2**i for i in (96, 64, 32, 16, 8, 4, 2, 1)))
state = pcg.state["state"]
advanced_state = 135275564607035429730177404003164635391
assert state["state"] == advanced_state
class TestPCG64DXSM(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = PCG64DXSM
cls.bits = 64
cls.dtype = np.uint64
cls.data1 = cls._read_csv(join(pwd, './data/pcg64dxsm-testset-1.csv'))
cls.data2 = cls._read_csv(join(pwd, './data/pcg64dxsm-testset-2.csv'))
cls.seed_error_type = (ValueError, TypeError)
cls.invalid_init_types = [(3.2,), ([None],), (1, None)]
cls.invalid_init_values = [(-1,)]
def test_advance_symmetry(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
state = rs.bit_generator.state
step = -0x9e3779b97f4a7c150000000000000000
rs.bit_generator.advance(step)
val_neg = rs.integers(10)
rs.bit_generator.state = state
rs.bit_generator.advance(2**128 + step)
val_pos = rs.integers(10)
rs.bit_generator.state = state
rs.bit_generator.advance(10 * 2**128 + step)
val_big = rs.integers(10)
assert val_neg == val_pos
assert val_big == val_pos
def test_advange_large(self):
rs = Generator(self.bit_generator(38219308213743))
pcg = rs.bit_generator
state = pcg.state
initial_state = 287608843259529770491897792873167516365
assert state["state"]["state"] == initial_state
pcg.advance(sum(2**i for i in (96, 64, 32, 16, 8, 4, 2, 1)))
state = pcg.state["state"]
advanced_state = 277778083536782149546677086420637664879
assert state["state"] == advanced_state
class TestMT19937(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = MT19937
cls.bits = 32
cls.dtype = np.uint32
cls.data1 = cls._read_csv(join(pwd, './data/mt19937-testset-1.csv'))
cls.data2 = cls._read_csv(join(pwd, './data/mt19937-testset-2.csv'))
cls.seed_error_type = ValueError
cls.invalid_init_types = []
cls.invalid_init_values = [(-1,)]
def test_seed_float_array(self):
assert_raises(TypeError, self.bit_generator, np.array([np.pi]))
assert_raises(TypeError, self.bit_generator, np.array([-np.pi]))
assert_raises(TypeError, self.bit_generator, np.array([np.pi, -np.pi]))
assert_raises(TypeError, self.bit_generator, np.array([0, np.pi]))
assert_raises(TypeError, self.bit_generator, [np.pi])
assert_raises(TypeError, self.bit_generator, [0, np.pi])
def test_state_tuple(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
bit_generator = rs.bit_generator
state = bit_generator.state
desired = rs.integers(2 ** 16)
tup = (state['bit_generator'], state['state']['key'],
state['state']['pos'])
bit_generator.state = tup
actual = rs.integers(2 ** 16)
assert_equal(actual, desired)
tup = tup + (0, 0.0)
bit_generator.state = tup
actual = rs.integers(2 ** 16)
assert_equal(actual, desired)
class TestSFC64(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = SFC64
cls.bits = 64
cls.dtype = np.uint64
cls.data1 = cls._read_csv(
join(pwd, './data/sfc64-testset-1.csv'))
cls.data2 = cls._read_csv(
join(pwd, './data/sfc64-testset-2.csv'))
cls.seed_error_type = (ValueError, TypeError)
cls.invalid_init_types = [(3.2,), ([None],), (1, None)]
cls.invalid_init_values = [(-1,)]
def test_legacy_pickle(self):
# Pickling format was changed in 2.0.x
import gzip
import pickle
expected_state = np.array(
[
9957867060933711493,
532597980065565856,
14769588338631205282,
13
],
dtype=np.uint64
)
base_path = os.path.split(os.path.abspath(__file__))[0]
pkl_file = os.path.join(base_path, "data", f"sfc64_np126.pkl.gz")
with gzip.open(pkl_file) as gz:
sfc = pickle.load(gz)
assert isinstance(sfc, SFC64)
assert_equal(sfc.state["state"]["state"], expected_state)
class TestDefaultRNG:
def test_seed(self):
for args in [(), (None,), (1234,), ([1234, 5678],)]:
rg = default_rng(*args)
assert isinstance(rg.bit_generator, PCG64)
def test_passthrough(self):
bg = Philox()
rg = default_rng(bg)
assert rg.bit_generator is bg
rg2 = default_rng(rg)
assert rg2 is rg
assert rg2.bit_generator is bg

View File

@ -0,0 +1,119 @@
from importlib.util import spec_from_file_location, module_from_spec
import os
import pathlib
import pytest
import shutil
import subprocess
import sys
import sysconfig
import textwrap
import warnings
import numpy as np
from numpy.testing import IS_WASM, IS_EDITABLE
try:
import cffi
except ImportError:
cffi = None
if sys.flags.optimize > 1:
# no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1
# cffi cannot succeed
cffi = None
try:
with warnings.catch_warnings(record=True) as w:
# numba issue gh-4733
warnings.filterwarnings('always', '', DeprecationWarning)
import numba
except (ImportError, SystemError):
# Certain numpy/numba versions trigger a SystemError due to a numba bug
numba = None
try:
import cython
from Cython.Compiler.Version import version as cython_version
except ImportError:
cython = None
else:
from numpy._utils import _pep440
# Note: keep in sync with the one in pyproject.toml
required_version = '3.0.6'
if _pep440.parse(cython_version) < _pep440.Version(required_version):
# too old or wrong cython, skip the test
cython = None
@pytest.mark.skipif(
IS_EDITABLE,
reason='Editable install cannot find .pxd headers'
)
@pytest.mark.skipif(
sys.platform == "win32" and sys.maxsize < 2**32,
reason="Failing in 32-bit Windows wheel build job, skip for now"
)
@pytest.mark.skipif(IS_WASM, reason="Can't start subprocess")
@pytest.mark.skipif(cython is None, reason="requires cython")
@pytest.mark.slow
def test_cython(tmp_path):
import glob
# build the examples in a temporary directory
srcdir = os.path.join(os.path.dirname(__file__), '..')
shutil.copytree(srcdir, tmp_path / 'random')
build_dir = tmp_path / 'random' / '_examples' / 'cython'
target_dir = build_dir / "build"
os.makedirs(target_dir, exist_ok=True)
if sys.platform == "win32":
subprocess.check_call(["meson", "setup",
"--buildtype=release",
"--vsenv", str(build_dir)],
cwd=target_dir,
)
else:
subprocess.check_call(["meson", "setup", str(build_dir)],
cwd=target_dir
)
subprocess.check_call(["meson", "compile", "-vv"], cwd=target_dir)
# gh-16162: make sure numpy's __init__.pxd was used for cython
# not really part of this test, but it is a convenient place to check
g = glob.glob(str(target_dir / "*" / "extending.pyx.c"))
with open(g[0]) as fid:
txt_to_find = 'NumPy API declarations from "numpy/__init__'
for i, line in enumerate(fid):
if txt_to_find in line:
break
else:
assert False, ("Could not find '{}' in C file, "
"wrong pxd used".format(txt_to_find))
# import without adding the directory to sys.path
suffix = sysconfig.get_config_var('EXT_SUFFIX')
def load(modname):
so = (target_dir / modname).with_suffix(suffix)
spec = spec_from_file_location(modname, so)
mod = module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
# test that the module can be imported
load("extending")
load("extending_cpp")
# actually test the cython c-extension
extending_distributions = load("extending_distributions")
from numpy.random import PCG64
values = extending_distributions.uniforms_ex(PCG64(0), 10, 'd')
assert values.shape == (10,)
assert values.dtype == np.float64
@pytest.mark.skipif(numba is None or cffi is None,
reason="requires numba and cffi")
def test_numba():
from numpy.random._examples.numba import extending # noqa: F401
@pytest.mark.skipif(cffi is None, reason="requires cffi")
def test_cffi():
from numpy.random._examples.cffi import extending # noqa: F401

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,206 @@
from numpy.testing import (assert_, assert_array_equal)
import numpy as np
import pytest
from numpy.random import Generator, MT19937
class TestRegression:
def setup_method(self):
self.mt19937 = Generator(MT19937(121263137472525314065))
def test_vonmises_range(self):
# Make sure generated random variables are in [-pi, pi].
# Regression test for ticket #986.
for mu in np.linspace(-7., 7., 5):
r = self.mt19937.vonmises(mu, 1, 50)
assert_(np.all(r > -np.pi) and np.all(r <= np.pi))
def test_hypergeometric_range(self):
# Test for ticket #921
assert_(np.all(self.mt19937.hypergeometric(3, 18, 11, size=10) < 4))
assert_(np.all(self.mt19937.hypergeometric(18, 3, 11, size=10) > 0))
# Test for ticket #5623
args = (2**20 - 2, 2**20 - 2, 2**20 - 2) # Check for 32-bit systems
assert_(self.mt19937.hypergeometric(*args) > 0)
def test_logseries_convergence(self):
# Test for ticket #923
N = 1000
rvsn = self.mt19937.logseries(0.8, size=N)
# these two frequency counts should be close to theoretical
# numbers with this large sample
# theoretical large N result is 0.49706795
freq = np.sum(rvsn == 1) / N
msg = f'Frequency was {freq:f}, should be > 0.45'
assert_(freq > 0.45, msg)
# theoretical large N result is 0.19882718
freq = np.sum(rvsn == 2) / N
msg = f'Frequency was {freq:f}, should be < 0.23'
assert_(freq < 0.23, msg)
def test_shuffle_mixed_dimension(self):
# Test for trac ticket #2074
for t in [[1, 2, 3, None],
[(1, 1), (2, 2), (3, 3), None],
[1, (2, 2), (3, 3), None],
[(1, 1), 2, 3, None]]:
mt19937 = Generator(MT19937(12345))
shuffled = np.array(t, dtype=object)
mt19937.shuffle(shuffled)
expected = np.array([t[2], t[0], t[3], t[1]], dtype=object)
assert_array_equal(np.array(shuffled, dtype=object), expected)
def test_call_within_randomstate(self):
# Check that custom BitGenerator does not call into global state
res = np.array([1, 8, 0, 1, 5, 3, 3, 8, 1, 4])
for i in range(3):
mt19937 = Generator(MT19937(i))
m = Generator(MT19937(4321))
# If m.state is not honored, the result will change
assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res)
def test_multivariate_normal_size_types(self):
# Test for multivariate_normal issue with 'size' argument.
# Check that the multivariate_normal size argument can be a
# numpy integer.
self.mt19937.multivariate_normal([0], [[0]], size=1)
self.mt19937.multivariate_normal([0], [[0]], size=np.int_(1))
self.mt19937.multivariate_normal([0], [[0]], size=np.int64(1))
def test_beta_small_parameters(self):
# Test that beta with small a and b parameters does not produce
# NaNs due to roundoff errors causing 0 / 0, gh-5851
x = self.mt19937.beta(0.0001, 0.0001, size=100)
assert_(not np.any(np.isnan(x)), 'Nans in mt19937.beta')
def test_beta_very_small_parameters(self):
# gh-24203: beta would hang with very small parameters.
self.mt19937.beta(1e-49, 1e-40)
def test_beta_ridiculously_small_parameters(self):
# gh-24266: beta would generate nan when the parameters
# were subnormal or a small multiple of the smallest normal.
tiny = np.finfo(1.0).tiny
x = self.mt19937.beta(tiny/32, tiny/40, size=50)
assert not np.any(np.isnan(x))
def test_beta_expected_zero_frequency(self):
# gh-24475: For small a and b (e.g. a=0.0025, b=0.0025), beta
# would generate too many zeros.
a = 0.0025
b = 0.0025
n = 1000000
x = self.mt19937.beta(a, b, size=n)
nzeros = np.count_nonzero(x == 0)
# beta CDF at x = np.finfo(np.double).smallest_subnormal/2
# is p = 0.0776169083131899, e.g,
#
# import numpy as np
# from mpmath import mp
# mp.dps = 160
# x = mp.mpf(np.finfo(np.float64).smallest_subnormal)/2
# # CDF of the beta distribution at x:
# p = mp.betainc(a, b, x1=0, x2=x, regularized=True)
# n = 1000000
# exprected_freq = float(n*p)
#
expected_freq = 77616.90831318991
assert 0.95*expected_freq < nzeros < 1.05*expected_freq
def test_choice_sum_of_probs_tolerance(self):
# The sum of probs should be 1.0 with some tolerance.
# For low precision dtypes the tolerance was too tight.
# See numpy github issue 6123.
a = [1, 2, 3]
counts = [4, 4, 2]
for dt in np.float16, np.float32, np.float64:
probs = np.array(counts, dtype=dt) / sum(counts)
c = self.mt19937.choice(a, p=probs)
assert_(c in a)
with pytest.raises(ValueError):
self.mt19937.choice(a, p=probs*0.9)
def test_shuffle_of_array_of_different_length_strings(self):
# Test that permuting an array of different length strings
# will not cause a segfault on garbage collection
# Tests gh-7710
a = np.array(['a', 'a' * 1000])
for _ in range(100):
self.mt19937.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_shuffle_of_array_of_objects(self):
# Test that permuting an array of objects will not cause
# a segfault on garbage collection.
# See gh-7719
a = np.array([np.arange(1), np.arange(4)], dtype=object)
for _ in range(1000):
self.mt19937.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_permutation_subclass(self):
class N(np.ndarray):
pass
mt19937 = Generator(MT19937(1))
orig = np.arange(3).view(N)
perm = mt19937.permutation(orig)
assert_array_equal(perm, np.array([2, 0, 1]))
assert_array_equal(orig, np.arange(3).view(N))
class M:
a = np.arange(5)
def __array__(self, dtype=None, copy=None):
return self.a
mt19937 = Generator(MT19937(1))
m = M()
perm = mt19937.permutation(m)
assert_array_equal(perm, np.array([4, 1, 3, 0, 2]))
assert_array_equal(m.__array__(), np.arange(5))
def test_gamma_0(self):
assert self.mt19937.standard_gamma(0.0) == 0.0
assert_array_equal(self.mt19937.standard_gamma([0.0]), 0.0)
actual = self.mt19937.standard_gamma([0.0], dtype='float')
expected = np.array([0.], dtype=np.float32)
assert_array_equal(actual, expected)
def test_geometric_tiny_prob(self):
# Regression test for gh-17007.
# When p = 1e-30, the probability that a sample will exceed 2**63-1
# is 0.9999999999907766, so we expect the result to be all 2**63-1.
assert_array_equal(self.mt19937.geometric(p=1e-30, size=3),
np.iinfo(np.int64).max)
def test_zipf_large_parameter(self):
# Regression test for part of gh-9829: a call such as rng.zipf(10000)
# would hang.
n = 8
sample = self.mt19937.zipf(10000, size=n)
assert_array_equal(sample, np.ones(n, dtype=np.int64))
def test_zipf_a_near_1(self):
# Regression test for gh-9829: a call such as rng.zipf(1.0000000000001)
# would hang.
n = 100000
sample = self.mt19937.zipf(1.0000000000001, size=n)
# Not much of a test, but let's do something more than verify that
# it doesn't hang. Certainly for a monotonically decreasing
# discrete distribution truncated to signed 64 bit integers, more
# than half should be less than 2**62.
assert np.count_nonzero(sample < 2**62) > n/2

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,216 @@
import sys
import pytest
from numpy.testing import (
assert_, assert_array_equal, assert_raises,
)
import numpy as np
from numpy import random
class TestRegression:
def test_VonMises_range(self):
# Make sure generated random variables are in [-pi, pi].
# Regression test for ticket #986.
for mu in np.linspace(-7., 7., 5):
r = random.vonmises(mu, 1, 50)
assert_(np.all(r > -np.pi) and np.all(r <= np.pi))
def test_hypergeometric_range(self):
# Test for ticket #921
assert_(np.all(random.hypergeometric(3, 18, 11, size=10) < 4))
assert_(np.all(random.hypergeometric(18, 3, 11, size=10) > 0))
# Test for ticket #5623
args = [
(2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems
]
is_64bits = sys.maxsize > 2**32
if is_64bits and sys.platform != 'win32':
# Check for 64-bit systems
args.append((2**40 - 2, 2**40 - 2, 2**40 - 2))
for arg in args:
assert_(random.hypergeometric(*arg) > 0)
def test_logseries_convergence(self):
# Test for ticket #923
N = 1000
random.seed(0)
rvsn = random.logseries(0.8, size=N)
# these two frequency counts should be close to theoretical
# numbers with this large sample
# theoretical large N result is 0.49706795
freq = np.sum(rvsn == 1) / N
msg = f'Frequency was {freq:f}, should be > 0.45'
assert_(freq > 0.45, msg)
# theoretical large N result is 0.19882718
freq = np.sum(rvsn == 2) / N
msg = f'Frequency was {freq:f}, should be < 0.23'
assert_(freq < 0.23, msg)
def test_shuffle_mixed_dimension(self):
# Test for trac ticket #2074
for t in [[1, 2, 3, None],
[(1, 1), (2, 2), (3, 3), None],
[1, (2, 2), (3, 3), None],
[(1, 1), 2, 3, None]]:
random.seed(12345)
shuffled = list(t)
random.shuffle(shuffled)
expected = np.array([t[0], t[3], t[1], t[2]], dtype=object)
assert_array_equal(np.array(shuffled, dtype=object), expected)
def test_call_within_randomstate(self):
# Check that custom RandomState does not call into global state
m = random.RandomState()
res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3])
for i in range(3):
random.seed(i)
m.seed(4321)
# If m.state is not honored, the result will change
assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res)
def test_multivariate_normal_size_types(self):
# Test for multivariate_normal issue with 'size' argument.
# Check that the multivariate_normal size argument can be a
# numpy integer.
random.multivariate_normal([0], [[0]], size=1)
random.multivariate_normal([0], [[0]], size=np.int_(1))
random.multivariate_normal([0], [[0]], size=np.int64(1))
def test_beta_small_parameters(self):
# Test that beta with small a and b parameters does not produce
# NaNs due to roundoff errors causing 0 / 0, gh-5851
random.seed(1234567890)
x = random.beta(0.0001, 0.0001, size=100)
assert_(not np.any(np.isnan(x)), 'Nans in random.beta')
def test_choice_sum_of_probs_tolerance(self):
# The sum of probs should be 1.0 with some tolerance.
# For low precision dtypes the tolerance was too tight.
# See numpy github issue 6123.
random.seed(1234)
a = [1, 2, 3]
counts = [4, 4, 2]
for dt in np.float16, np.float32, np.float64:
probs = np.array(counts, dtype=dt) / sum(counts)
c = random.choice(a, p=probs)
assert_(c in a)
assert_raises(ValueError, random.choice, a, p=probs*0.9)
def test_shuffle_of_array_of_different_length_strings(self):
# Test that permuting an array of different length strings
# will not cause a segfault on garbage collection
# Tests gh-7710
random.seed(1234)
a = np.array(['a', 'a' * 1000])
for _ in range(100):
random.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_shuffle_of_array_of_objects(self):
# Test that permuting an array of objects will not cause
# a segfault on garbage collection.
# See gh-7719
random.seed(1234)
a = np.array([np.arange(1), np.arange(4)], dtype=object)
for _ in range(1000):
random.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_permutation_subclass(self):
class N(np.ndarray):
pass
random.seed(1)
orig = np.arange(3).view(N)
perm = random.permutation(orig)
assert_array_equal(perm, np.array([0, 2, 1]))
assert_array_equal(orig, np.arange(3).view(N))
class M:
a = np.arange(5)
def __array__(self, dtype=None, copy=None):
return self.a
random.seed(1)
m = M()
perm = random.permutation(m)
assert_array_equal(perm, np.array([2, 1, 4, 0, 3]))
assert_array_equal(m.__array__(), np.arange(5))
def test_warns_byteorder(self):
# GH 13159
other_byteord_dt = '<i4' if sys.byteorder == 'big' else '>i4'
with pytest.deprecated_call(match='non-native byteorder is not'):
random.randint(0, 200, size=10, dtype=other_byteord_dt)
def test_named_argument_initialization(self):
# GH 13669
rs1 = np.random.RandomState(123456789)
rs2 = np.random.RandomState(seed=123456789)
assert rs1.randint(0, 100) == rs2.randint(0, 100)
def test_choice_retun_dtype(self):
# GH 9867, now long since the NumPy default changed.
c = np.random.choice(10, p=[.1]*10, size=2)
assert c.dtype == np.dtype(np.long)
c = np.random.choice(10, p=[.1]*10, replace=False, size=2)
assert c.dtype == np.dtype(np.long)
c = np.random.choice(10, size=2)
assert c.dtype == np.dtype(np.long)
c = np.random.choice(10, replace=False, size=2)
assert c.dtype == np.dtype(np.long)
@pytest.mark.skipif(np.iinfo('l').max < 2**32,
reason='Cannot test with 32-bit C long')
def test_randint_117(self):
# GH 14189
random.seed(0)
expected = np.array([2357136044, 2546248239, 3071714933, 3626093760,
2588848963, 3684848379, 2340255427, 3638918503,
1819583497, 2678185683], dtype='int64')
actual = random.randint(2**32, size=10)
assert_array_equal(actual, expected)
def test_p_zero_stream(self):
# Regression test for gh-14522. Ensure that future versions
# generate the same variates as version 1.16.
np.random.seed(12345)
assert_array_equal(random.binomial(1, [0, 0.25, 0.5, 0.75, 1]),
[0, 0, 0, 1, 1])
def test_n_zero_stream(self):
# Regression test for gh-14522. Ensure that future versions
# generate the same variates as version 1.16.
np.random.seed(8675309)
expected = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[3, 4, 2, 3, 3, 1, 5, 3, 1, 3]])
assert_array_equal(random.binomial([[0], [10]], 0.25, size=(2, 10)),
expected)
def test_multinomial_empty():
# gh-20483
# Ensure that empty p-vals are correctly handled
assert random.multinomial(10, []).shape == (0,)
assert random.multinomial(3, [], size=(7, 5, 3)).shape == (7, 5, 3, 0)
def test_multinomial_1d_pval():
# gh-20483
with pytest.raises(TypeError, match="pvals must be a 1-d"):
random.multinomial(10, 0.3)

View File

@ -0,0 +1,149 @@
import sys
from numpy.testing import (
assert_, assert_array_equal, assert_raises,
)
from numpy import random
import numpy as np
class TestRegression:
def test_VonMises_range(self):
# Make sure generated random variables are in [-pi, pi].
# Regression test for ticket #986.
for mu in np.linspace(-7., 7., 5):
r = random.mtrand.vonmises(mu, 1, 50)
assert_(np.all(r > -np.pi) and np.all(r <= np.pi))
def test_hypergeometric_range(self):
# Test for ticket #921
assert_(np.all(np.random.hypergeometric(3, 18, 11, size=10) < 4))
assert_(np.all(np.random.hypergeometric(18, 3, 11, size=10) > 0))
# Test for ticket #5623
args = [
(2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems
]
is_64bits = sys.maxsize > 2**32
if is_64bits and sys.platform != 'win32':
# Check for 64-bit systems
args.append((2**40 - 2, 2**40 - 2, 2**40 - 2))
for arg in args:
assert_(np.random.hypergeometric(*arg) > 0)
def test_logseries_convergence(self):
# Test for ticket #923
N = 1000
np.random.seed(0)
rvsn = np.random.logseries(0.8, size=N)
# these two frequency counts should be close to theoretical
# numbers with this large sample
# theoretical large N result is 0.49706795
freq = np.sum(rvsn == 1) / N
msg = f'Frequency was {freq:f}, should be > 0.45'
assert_(freq > 0.45, msg)
# theoretical large N result is 0.19882718
freq = np.sum(rvsn == 2) / N
msg = f'Frequency was {freq:f}, should be < 0.23'
assert_(freq < 0.23, msg)
def test_shuffle_mixed_dimension(self):
# Test for trac ticket #2074
for t in [[1, 2, 3, None],
[(1, 1), (2, 2), (3, 3), None],
[1, (2, 2), (3, 3), None],
[(1, 1), 2, 3, None]]:
np.random.seed(12345)
shuffled = list(t)
random.shuffle(shuffled)
expected = np.array([t[0], t[3], t[1], t[2]], dtype=object)
assert_array_equal(np.array(shuffled, dtype=object), expected)
def test_call_within_randomstate(self):
# Check that custom RandomState does not call into global state
m = np.random.RandomState()
res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3])
for i in range(3):
np.random.seed(i)
m.seed(4321)
# If m.state is not honored, the result will change
assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res)
def test_multivariate_normal_size_types(self):
# Test for multivariate_normal issue with 'size' argument.
# Check that the multivariate_normal size argument can be a
# numpy integer.
np.random.multivariate_normal([0], [[0]], size=1)
np.random.multivariate_normal([0], [[0]], size=np.int_(1))
np.random.multivariate_normal([0], [[0]], size=np.int64(1))
def test_beta_small_parameters(self):
# Test that beta with small a and b parameters does not produce
# NaNs due to roundoff errors causing 0 / 0, gh-5851
np.random.seed(1234567890)
x = np.random.beta(0.0001, 0.0001, size=100)
assert_(not np.any(np.isnan(x)), 'Nans in np.random.beta')
def test_choice_sum_of_probs_tolerance(self):
# The sum of probs should be 1.0 with some tolerance.
# For low precision dtypes the tolerance was too tight.
# See numpy github issue 6123.
np.random.seed(1234)
a = [1, 2, 3]
counts = [4, 4, 2]
for dt in np.float16, np.float32, np.float64:
probs = np.array(counts, dtype=dt) / sum(counts)
c = np.random.choice(a, p=probs)
assert_(c in a)
assert_raises(ValueError, np.random.choice, a, p=probs*0.9)
def test_shuffle_of_array_of_different_length_strings(self):
# Test that permuting an array of different length strings
# will not cause a segfault on garbage collection
# Tests gh-7710
np.random.seed(1234)
a = np.array(['a', 'a' * 1000])
for _ in range(100):
np.random.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_shuffle_of_array_of_objects(self):
# Test that permuting an array of objects will not cause
# a segfault on garbage collection.
# See gh-7719
np.random.seed(1234)
a = np.array([np.arange(1), np.arange(4)], dtype=object)
for _ in range(1000):
np.random.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_permutation_subclass(self):
class N(np.ndarray):
pass
np.random.seed(1)
orig = np.arange(3).view(N)
perm = np.random.permutation(orig)
assert_array_equal(perm, np.array([0, 2, 1]))
assert_array_equal(orig, np.arange(3).view(N))
class M:
a = np.arange(5)
def __array__(self, dtype=None, copy=None):
return self.a
np.random.seed(1)
m = M()
perm = np.random.permutation(m)
assert_array_equal(perm, np.array([2, 1, 4, 0, 3]))
assert_array_equal(m.__array__(), np.arange(5))

View File

@ -0,0 +1,80 @@
import numpy as np
from numpy.testing import assert_array_equal, assert_array_compare
from numpy.random import SeedSequence
def test_reference_data():
""" Check that SeedSequence generates data the same as the C++ reference.
https://gist.github.com/imneme/540829265469e673d045
"""
inputs = [
[3735928559, 195939070, 229505742, 305419896],
[3668361503, 4165561550, 1661411377, 3634257570],
[164546577, 4166754639, 1765190214, 1303880213],
[446610472, 3941463886, 522937693, 1882353782],
[1864922766, 1719732118, 3882010307, 1776744564],
[4141682960, 3310988675, 553637289, 902896340],
[1134851934, 2352871630, 3699409824, 2648159817],
[1240956131, 3107113773, 1283198141, 1924506131],
[2669565031, 579818610, 3042504477, 2774880435],
[2766103236, 2883057919, 4029656435, 862374500],
]
outputs = [
[3914649087, 576849849, 3593928901, 2229911004],
[2240804226, 3691353228, 1365957195, 2654016646],
[3562296087, 3191708229, 1147942216, 3726991905],
[1403443605, 3591372999, 1291086759, 441919183],
[1086200464, 2191331643, 560336446, 3658716651],
[3249937430, 2346751812, 847844327, 2996632307],
[2584285912, 4034195531, 3523502488, 169742686],
[959045797, 3875435559, 1886309314, 359682705],
[3978441347, 432478529, 3223635119, 138903045],
[296367413, 4262059219, 13109864, 3283683422],
]
outputs64 = [
[2477551240072187391, 9577394838764454085],
[15854241394484835714, 11398914698975566411],
[13708282465491374871, 16007308345579681096],
[15424829579845884309, 1898028439751125927],
[9411697742461147792, 15714068361935982142],
[10079222287618677782, 12870437757549876199],
[17326737873898640088, 729039288628699544],
[16644868984619524261, 1544825456798124994],
[1857481142255628931, 596584038813451439],
[18305404959516669237, 14103312907920476776],
]
for seed, expected, expected64 in zip(inputs, outputs, outputs64):
expected = np.array(expected, dtype=np.uint32)
ss = SeedSequence(seed)
state = ss.generate_state(len(expected))
assert_array_equal(state, expected)
state64 = ss.generate_state(len(expected64), dtype=np.uint64)
assert_array_equal(state64, expected64)
def test_zero_padding():
""" Ensure that the implicit zero-padding does not cause problems.
"""
# Ensure that large integers are inserted in little-endian fashion to avoid
# trailing 0s.
ss0 = SeedSequence(42)
ss1 = SeedSequence(42 << 32)
assert_array_compare(
np.not_equal,
ss0.generate_state(4),
ss1.generate_state(4))
# Ensure backwards compatibility with the original 0.17 release for small
# integers and no spawn key.
expected42 = np.array([3444837047, 2669555309, 2046530742, 3581440988],
dtype=np.uint32)
assert_array_equal(SeedSequence(42).generate_state(4), expected42)
# Regression test for gh-16539 to ensure that the implicit 0s don't
# conflict with spawn keys.
assert_array_compare(
np.not_equal,
SeedSequence(42, spawn_key=(0,)).generate_state(4),
expected42)

View File

@ -0,0 +1,818 @@
import pickle
from functools import partial
import numpy as np
import pytest
from numpy.testing import assert_equal, assert_, assert_array_equal
from numpy.random import (Generator, MT19937, PCG64, PCG64DXSM, Philox, SFC64)
@pytest.fixture(scope='module',
params=(np.bool, np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64))
def dtype(request):
return request.param
def params_0(f):
val = f()
assert_(np.isscalar(val))
val = f(10)
assert_(val.shape == (10,))
val = f((10, 10))
assert_(val.shape == (10, 10))
val = f((10, 10, 10))
assert_(val.shape == (10, 10, 10))
val = f(size=(5, 5))
assert_(val.shape == (5, 5))
def params_1(f, bounded=False):
a = 5.0
b = np.arange(2.0, 12.0)
c = np.arange(2.0, 102.0).reshape((10, 10))
d = np.arange(2.0, 1002.0).reshape((10, 10, 10))
e = np.array([2.0, 3.0])
g = np.arange(2.0, 12.0).reshape((1, 10, 1))
if bounded:
a = 0.5
b = b / (1.5 * b.max())
c = c / (1.5 * c.max())
d = d / (1.5 * d.max())
e = e / (1.5 * e.max())
g = g / (1.5 * g.max())
# Scalar
f(a)
# Scalar - size
f(a, size=(10, 10))
# 1d
f(b)
# 2d
f(c)
# 3d
f(d)
# 1d size
f(b, size=10)
# 2d - size - broadcast
f(e, size=(10, 2))
# 3d - size
f(g, size=(10, 10, 10))
def comp_state(state1, state2):
identical = True
if isinstance(state1, dict):
for key in state1:
identical &= comp_state(state1[key], state2[key])
elif type(state1) != type(state2):
identical &= type(state1) == type(state2)
else:
if (isinstance(state1, (list, tuple, np.ndarray)) and isinstance(
state2, (list, tuple, np.ndarray))):
for s1, s2 in zip(state1, state2):
identical &= comp_state(s1, s2)
else:
identical &= state1 == state2
return identical
def warmup(rg, n=None):
if n is None:
n = 11 + np.random.randint(0, 20)
rg.standard_normal(n)
rg.standard_normal(n)
rg.standard_normal(n, dtype=np.float32)
rg.standard_normal(n, dtype=np.float32)
rg.integers(0, 2 ** 24, n, dtype=np.uint64)
rg.integers(0, 2 ** 48, n, dtype=np.uint64)
rg.standard_gamma(11.0, n)
rg.standard_gamma(11.0, n, dtype=np.float32)
rg.random(n, dtype=np.float64)
rg.random(n, dtype=np.float32)
class RNG:
@classmethod
def setup_class(cls):
# Overridden in test classes. Place holder to silence IDE noise
cls.bit_generator = PCG64
cls.advance = None
cls.seed = [12345]
cls.rg = Generator(cls.bit_generator(*cls.seed))
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 64
cls._extra_setup()
@classmethod
def _extra_setup(cls):
cls.vec_1d = np.arange(2.0, 102.0)
cls.vec_2d = np.arange(2.0, 102.0)[None, :]
cls.mat = np.arange(2.0, 102.0, 0.01).reshape((100, 100))
cls.seed_error = TypeError
def _reset_state(self):
self.rg.bit_generator.state = self.initial_state
def test_init(self):
rg = Generator(self.bit_generator())
state = rg.bit_generator.state
rg.standard_normal(1)
rg.standard_normal(1)
rg.bit_generator.state = state
new_state = rg.bit_generator.state
assert_(comp_state(state, new_state))
def test_advance(self):
state = self.rg.bit_generator.state
if hasattr(self.rg.bit_generator, 'advance'):
self.rg.bit_generator.advance(self.advance)
assert_(not comp_state(state, self.rg.bit_generator.state))
else:
bitgen_name = self.rg.bit_generator.__class__.__name__
pytest.skip(f'Advance is not supported by {bitgen_name}')
def test_jump(self):
state = self.rg.bit_generator.state
if hasattr(self.rg.bit_generator, 'jumped'):
bit_gen2 = self.rg.bit_generator.jumped()
jumped_state = bit_gen2.state
assert_(not comp_state(state, jumped_state))
self.rg.random(2 * 3 * 5 * 7 * 11 * 13 * 17)
self.rg.bit_generator.state = state
bit_gen3 = self.rg.bit_generator.jumped()
rejumped_state = bit_gen3.state
assert_(comp_state(jumped_state, rejumped_state))
else:
bitgen_name = self.rg.bit_generator.__class__.__name__
if bitgen_name not in ('SFC64',):
raise AttributeError(f'no "jumped" in {bitgen_name}')
pytest.skip(f'Jump is not supported by {bitgen_name}')
def test_uniform(self):
r = self.rg.uniform(-1.0, 0.0, size=10)
assert_(len(r) == 10)
assert_((r > -1).all())
assert_((r <= 0).all())
def test_uniform_array(self):
r = self.rg.uniform(np.array([-1.0] * 10), 0.0, size=10)
assert_(len(r) == 10)
assert_((r > -1).all())
assert_((r <= 0).all())
r = self.rg.uniform(np.array([-1.0] * 10),
np.array([0.0] * 10), size=10)
assert_(len(r) == 10)
assert_((r > -1).all())
assert_((r <= 0).all())
r = self.rg.uniform(-1.0, np.array([0.0] * 10), size=10)
assert_(len(r) == 10)
assert_((r > -1).all())
assert_((r <= 0).all())
def test_random(self):
assert_(len(self.rg.random(10)) == 10)
params_0(self.rg.random)
def test_standard_normal_zig(self):
assert_(len(self.rg.standard_normal(10)) == 10)
def test_standard_normal(self):
assert_(len(self.rg.standard_normal(10)) == 10)
params_0(self.rg.standard_normal)
def test_standard_gamma(self):
assert_(len(self.rg.standard_gamma(10, 10)) == 10)
assert_(len(self.rg.standard_gamma(np.array([10] * 10), 10)) == 10)
params_1(self.rg.standard_gamma)
def test_standard_exponential(self):
assert_(len(self.rg.standard_exponential(10)) == 10)
params_0(self.rg.standard_exponential)
def test_standard_exponential_float(self):
randoms = self.rg.standard_exponential(10, dtype='float32')
assert_(len(randoms) == 10)
assert randoms.dtype == np.float32
params_0(partial(self.rg.standard_exponential, dtype='float32'))
def test_standard_exponential_float_log(self):
randoms = self.rg.standard_exponential(10, dtype='float32',
method='inv')
assert_(len(randoms) == 10)
assert randoms.dtype == np.float32
params_0(partial(self.rg.standard_exponential, dtype='float32',
method='inv'))
def test_standard_cauchy(self):
assert_(len(self.rg.standard_cauchy(10)) == 10)
params_0(self.rg.standard_cauchy)
def test_standard_t(self):
assert_(len(self.rg.standard_t(10, 10)) == 10)
params_1(self.rg.standard_t)
def test_binomial(self):
assert_(self.rg.binomial(10, .5) >= 0)
assert_(self.rg.binomial(1000, .5) >= 0)
def test_reset_state(self):
state = self.rg.bit_generator.state
int_1 = self.rg.integers(2**31)
self.rg.bit_generator.state = state
int_2 = self.rg.integers(2**31)
assert_(int_1 == int_2)
def test_entropy_init(self):
rg = Generator(self.bit_generator())
rg2 = Generator(self.bit_generator())
assert_(not comp_state(rg.bit_generator.state,
rg2.bit_generator.state))
def test_seed(self):
rg = Generator(self.bit_generator(*self.seed))
rg2 = Generator(self.bit_generator(*self.seed))
rg.random()
rg2.random()
assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
def test_reset_state_gauss(self):
rg = Generator(self.bit_generator(*self.seed))
rg.standard_normal()
state = rg.bit_generator.state
n1 = rg.standard_normal(size=10)
rg2 = Generator(self.bit_generator())
rg2.bit_generator.state = state
n2 = rg2.standard_normal(size=10)
assert_array_equal(n1, n2)
def test_reset_state_uint32(self):
rg = Generator(self.bit_generator(*self.seed))
rg.integers(0, 2 ** 24, 120, dtype=np.uint32)
state = rg.bit_generator.state
n1 = rg.integers(0, 2 ** 24, 10, dtype=np.uint32)
rg2 = Generator(self.bit_generator())
rg2.bit_generator.state = state
n2 = rg2.integers(0, 2 ** 24, 10, dtype=np.uint32)
assert_array_equal(n1, n2)
def test_reset_state_float(self):
rg = Generator(self.bit_generator(*self.seed))
rg.random(dtype='float32')
state = rg.bit_generator.state
n1 = rg.random(size=10, dtype='float32')
rg2 = Generator(self.bit_generator())
rg2.bit_generator.state = state
n2 = rg2.random(size=10, dtype='float32')
assert_((n1 == n2).all())
def test_shuffle(self):
original = np.arange(200, 0, -1)
permuted = self.rg.permutation(original)
assert_((original != permuted).any())
def test_permutation(self):
original = np.arange(200, 0, -1)
permuted = self.rg.permutation(original)
assert_((original != permuted).any())
def test_beta(self):
vals = self.rg.beta(2.0, 2.0, 10)
assert_(len(vals) == 10)
vals = self.rg.beta(np.array([2.0] * 10), 2.0)
assert_(len(vals) == 10)
vals = self.rg.beta(2.0, np.array([2.0] * 10))
assert_(len(vals) == 10)
vals = self.rg.beta(np.array([2.0] * 10), np.array([2.0] * 10))
assert_(len(vals) == 10)
vals = self.rg.beta(np.array([2.0] * 10), np.array([[2.0]] * 10))
assert_(vals.shape == (10, 10))
def test_bytes(self):
vals = self.rg.bytes(10)
assert_(len(vals) == 10)
def test_chisquare(self):
vals = self.rg.chisquare(2.0, 10)
assert_(len(vals) == 10)
params_1(self.rg.chisquare)
def test_exponential(self):
vals = self.rg.exponential(2.0, 10)
assert_(len(vals) == 10)
params_1(self.rg.exponential)
def test_f(self):
vals = self.rg.f(3, 1000, 10)
assert_(len(vals) == 10)
def test_gamma(self):
vals = self.rg.gamma(3, 2, 10)
assert_(len(vals) == 10)
def test_geometric(self):
vals = self.rg.geometric(0.5, 10)
assert_(len(vals) == 10)
params_1(self.rg.exponential, bounded=True)
def test_gumbel(self):
vals = self.rg.gumbel(2.0, 2.0, 10)
assert_(len(vals) == 10)
def test_laplace(self):
vals = self.rg.laplace(2.0, 2.0, 10)
assert_(len(vals) == 10)
def test_logitic(self):
vals = self.rg.logistic(2.0, 2.0, 10)
assert_(len(vals) == 10)
def test_logseries(self):
vals = self.rg.logseries(0.5, 10)
assert_(len(vals) == 10)
def test_negative_binomial(self):
vals = self.rg.negative_binomial(10, 0.2, 10)
assert_(len(vals) == 10)
def test_noncentral_chisquare(self):
vals = self.rg.noncentral_chisquare(10, 2, 10)
assert_(len(vals) == 10)
def test_noncentral_f(self):
vals = self.rg.noncentral_f(3, 1000, 2, 10)
assert_(len(vals) == 10)
vals = self.rg.noncentral_f(np.array([3] * 10), 1000, 2)
assert_(len(vals) == 10)
vals = self.rg.noncentral_f(3, np.array([1000] * 10), 2)
assert_(len(vals) == 10)
vals = self.rg.noncentral_f(3, 1000, np.array([2] * 10))
assert_(len(vals) == 10)
def test_normal(self):
vals = self.rg.normal(10, 0.2, 10)
assert_(len(vals) == 10)
def test_pareto(self):
vals = self.rg.pareto(3.0, 10)
assert_(len(vals) == 10)
def test_poisson(self):
vals = self.rg.poisson(10, 10)
assert_(len(vals) == 10)
vals = self.rg.poisson(np.array([10] * 10))
assert_(len(vals) == 10)
params_1(self.rg.poisson)
def test_power(self):
vals = self.rg.power(0.2, 10)
assert_(len(vals) == 10)
def test_integers(self):
vals = self.rg.integers(10, 20, 10)
assert_(len(vals) == 10)
def test_rayleigh(self):
vals = self.rg.rayleigh(0.2, 10)
assert_(len(vals) == 10)
params_1(self.rg.rayleigh, bounded=True)
def test_vonmises(self):
vals = self.rg.vonmises(10, 0.2, 10)
assert_(len(vals) == 10)
def test_wald(self):
vals = self.rg.wald(1.0, 1.0, 10)
assert_(len(vals) == 10)
def test_weibull(self):
vals = self.rg.weibull(1.0, 10)
assert_(len(vals) == 10)
def test_zipf(self):
vals = self.rg.zipf(10, 10)
assert_(len(vals) == 10)
vals = self.rg.zipf(self.vec_1d)
assert_(len(vals) == 100)
vals = self.rg.zipf(self.vec_2d)
assert_(vals.shape == (1, 100))
vals = self.rg.zipf(self.mat)
assert_(vals.shape == (100, 100))
def test_hypergeometric(self):
vals = self.rg.hypergeometric(25, 25, 20)
assert_(np.isscalar(vals))
vals = self.rg.hypergeometric(np.array([25] * 10), 25, 20)
assert_(vals.shape == (10,))
def test_triangular(self):
vals = self.rg.triangular(-5, 0, 5)
assert_(np.isscalar(vals))
vals = self.rg.triangular(-5, np.array([0] * 10), 5)
assert_(vals.shape == (10,))
def test_multivariate_normal(self):
mean = [0, 0]
cov = [[1, 0], [0, 100]] # diagonal covariance
x = self.rg.multivariate_normal(mean, cov, 5000)
assert_(x.shape == (5000, 2))
x_zig = self.rg.multivariate_normal(mean, cov, 5000)
assert_(x.shape == (5000, 2))
x_inv = self.rg.multivariate_normal(mean, cov, 5000)
assert_(x.shape == (5000, 2))
assert_((x_zig != x_inv).any())
def test_multinomial(self):
vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3])
assert_(vals.shape == (2,))
vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3], size=10)
assert_(vals.shape == (10, 2))
def test_dirichlet(self):
s = self.rg.dirichlet((10, 5, 3), 20)
assert_(s.shape == (20, 3))
def test_pickle(self):
pick = pickle.dumps(self.rg)
unpick = pickle.loads(pick)
assert_(type(self.rg) == type(unpick))
assert_(comp_state(self.rg.bit_generator.state,
unpick.bit_generator.state))
pick = pickle.dumps(self.rg)
unpick = pickle.loads(pick)
assert_(type(self.rg) == type(unpick))
assert_(comp_state(self.rg.bit_generator.state,
unpick.bit_generator.state))
def test_seed_array(self):
if self.seed_vector_bits is None:
bitgen_name = self.bit_generator.__name__
pytest.skip(f'Vector seeding is not supported by {bitgen_name}')
if self.seed_vector_bits == 32:
dtype = np.uint32
else:
dtype = np.uint64
seed = np.array([1], dtype=dtype)
bg = self.bit_generator(seed)
state1 = bg.state
bg = self.bit_generator(1)
state2 = bg.state
assert_(comp_state(state1, state2))
seed = np.arange(4, dtype=dtype)
bg = self.bit_generator(seed)
state1 = bg.state
bg = self.bit_generator(seed[0])
state2 = bg.state
assert_(not comp_state(state1, state2))
seed = np.arange(1500, dtype=dtype)
bg = self.bit_generator(seed)
state1 = bg.state
bg = self.bit_generator(seed[0])
state2 = bg.state
assert_(not comp_state(state1, state2))
seed = 2 ** np.mod(np.arange(1500, dtype=dtype),
self.seed_vector_bits - 1) + 1
bg = self.bit_generator(seed)
state1 = bg.state
bg = self.bit_generator(seed[0])
state2 = bg.state
assert_(not comp_state(state1, state2))
def test_uniform_float(self):
rg = Generator(self.bit_generator(12345))
warmup(rg)
state = rg.bit_generator.state
r1 = rg.random(11, dtype=np.float32)
rg2 = Generator(self.bit_generator())
warmup(rg2)
rg2.bit_generator.state = state
r2 = rg2.random(11, dtype=np.float32)
assert_array_equal(r1, r2)
assert_equal(r1.dtype, np.float32)
assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
def test_gamma_floats(self):
rg = Generator(self.bit_generator())
warmup(rg)
state = rg.bit_generator.state
r1 = rg.standard_gamma(4.0, 11, dtype=np.float32)
rg2 = Generator(self.bit_generator())
warmup(rg2)
rg2.bit_generator.state = state
r2 = rg2.standard_gamma(4.0, 11, dtype=np.float32)
assert_array_equal(r1, r2)
assert_equal(r1.dtype, np.float32)
assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
def test_normal_floats(self):
rg = Generator(self.bit_generator())
warmup(rg)
state = rg.bit_generator.state
r1 = rg.standard_normal(11, dtype=np.float32)
rg2 = Generator(self.bit_generator())
warmup(rg2)
rg2.bit_generator.state = state
r2 = rg2.standard_normal(11, dtype=np.float32)
assert_array_equal(r1, r2)
assert_equal(r1.dtype, np.float32)
assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
def test_normal_zig_floats(self):
rg = Generator(self.bit_generator())
warmup(rg)
state = rg.bit_generator.state
r1 = rg.standard_normal(11, dtype=np.float32)
rg2 = Generator(self.bit_generator())
warmup(rg2)
rg2.bit_generator.state = state
r2 = rg2.standard_normal(11, dtype=np.float32)
assert_array_equal(r1, r2)
assert_equal(r1.dtype, np.float32)
assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
def test_output_fill(self):
rg = self.rg
state = rg.bit_generator.state
size = (31, 7, 97)
existing = np.empty(size)
rg.bit_generator.state = state
rg.standard_normal(out=existing)
rg.bit_generator.state = state
direct = rg.standard_normal(size=size)
assert_equal(direct, existing)
sized = np.empty(size)
rg.bit_generator.state = state
rg.standard_normal(out=sized, size=sized.shape)
existing = np.empty(size, dtype=np.float32)
rg.bit_generator.state = state
rg.standard_normal(out=existing, dtype=np.float32)
rg.bit_generator.state = state
direct = rg.standard_normal(size=size, dtype=np.float32)
assert_equal(direct, existing)
def test_output_filling_uniform(self):
rg = self.rg
state = rg.bit_generator.state
size = (31, 7, 97)
existing = np.empty(size)
rg.bit_generator.state = state
rg.random(out=existing)
rg.bit_generator.state = state
direct = rg.random(size=size)
assert_equal(direct, existing)
existing = np.empty(size, dtype=np.float32)
rg.bit_generator.state = state
rg.random(out=existing, dtype=np.float32)
rg.bit_generator.state = state
direct = rg.random(size=size, dtype=np.float32)
assert_equal(direct, existing)
def test_output_filling_exponential(self):
rg = self.rg
state = rg.bit_generator.state
size = (31, 7, 97)
existing = np.empty(size)
rg.bit_generator.state = state
rg.standard_exponential(out=existing)
rg.bit_generator.state = state
direct = rg.standard_exponential(size=size)
assert_equal(direct, existing)
existing = np.empty(size, dtype=np.float32)
rg.bit_generator.state = state
rg.standard_exponential(out=existing, dtype=np.float32)
rg.bit_generator.state = state
direct = rg.standard_exponential(size=size, dtype=np.float32)
assert_equal(direct, existing)
def test_output_filling_gamma(self):
rg = self.rg
state = rg.bit_generator.state
size = (31, 7, 97)
existing = np.zeros(size)
rg.bit_generator.state = state
rg.standard_gamma(1.0, out=existing)
rg.bit_generator.state = state
direct = rg.standard_gamma(1.0, size=size)
assert_equal(direct, existing)
existing = np.zeros(size, dtype=np.float32)
rg.bit_generator.state = state
rg.standard_gamma(1.0, out=existing, dtype=np.float32)
rg.bit_generator.state = state
direct = rg.standard_gamma(1.0, size=size, dtype=np.float32)
assert_equal(direct, existing)
def test_output_filling_gamma_broadcast(self):
rg = self.rg
state = rg.bit_generator.state
size = (31, 7, 97)
mu = np.arange(97.0) + 1.0
existing = np.zeros(size)
rg.bit_generator.state = state
rg.standard_gamma(mu, out=existing)
rg.bit_generator.state = state
direct = rg.standard_gamma(mu, size=size)
assert_equal(direct, existing)
existing = np.zeros(size, dtype=np.float32)
rg.bit_generator.state = state
rg.standard_gamma(mu, out=existing, dtype=np.float32)
rg.bit_generator.state = state
direct = rg.standard_gamma(mu, size=size, dtype=np.float32)
assert_equal(direct, existing)
def test_output_fill_error(self):
rg = self.rg
size = (31, 7, 97)
existing = np.empty(size)
with pytest.raises(TypeError):
rg.standard_normal(out=existing, dtype=np.float32)
with pytest.raises(ValueError):
rg.standard_normal(out=existing[::3])
existing = np.empty(size, dtype=np.float32)
with pytest.raises(TypeError):
rg.standard_normal(out=existing, dtype=np.float64)
existing = np.zeros(size, dtype=np.float32)
with pytest.raises(TypeError):
rg.standard_gamma(1.0, out=existing, dtype=np.float64)
with pytest.raises(ValueError):
rg.standard_gamma(1.0, out=existing[::3], dtype=np.float32)
existing = np.zeros(size, dtype=np.float64)
with pytest.raises(TypeError):
rg.standard_gamma(1.0, out=existing, dtype=np.float32)
with pytest.raises(ValueError):
rg.standard_gamma(1.0, out=existing[::3])
def test_integers_broadcast(self, dtype):
if dtype == np.bool:
upper = 2
lower = 0
else:
info = np.iinfo(dtype)
upper = int(info.max) + 1
lower = info.min
self._reset_state()
a = self.rg.integers(lower, [upper] * 10, dtype=dtype)
self._reset_state()
b = self.rg.integers([lower] * 10, upper, dtype=dtype)
assert_equal(a, b)
self._reset_state()
c = self.rg.integers(lower, upper, size=10, dtype=dtype)
assert_equal(a, c)
self._reset_state()
d = self.rg.integers(np.array(
[lower] * 10), np.array([upper], dtype=object), size=10,
dtype=dtype)
assert_equal(a, d)
self._reset_state()
e = self.rg.integers(
np.array([lower] * 10), np.array([upper] * 10), size=10,
dtype=dtype)
assert_equal(a, e)
self._reset_state()
a = self.rg.integers(0, upper, size=10, dtype=dtype)
self._reset_state()
b = self.rg.integers([upper] * 10, dtype=dtype)
assert_equal(a, b)
def test_integers_numpy(self, dtype):
high = np.array([1])
low = np.array([0])
out = self.rg.integers(low, high, dtype=dtype)
assert out.shape == (1,)
out = self.rg.integers(low[0], high, dtype=dtype)
assert out.shape == (1,)
out = self.rg.integers(low, high[0], dtype=dtype)
assert out.shape == (1,)
def test_integers_broadcast_errors(self, dtype):
if dtype == np.bool:
upper = 2
lower = 0
else:
info = np.iinfo(dtype)
upper = int(info.max) + 1
lower = info.min
with pytest.raises(ValueError):
self.rg.integers(lower, [upper + 1] * 10, dtype=dtype)
with pytest.raises(ValueError):
self.rg.integers(lower - 1, [upper] * 10, dtype=dtype)
with pytest.raises(ValueError):
self.rg.integers([lower - 1], [upper] * 10, dtype=dtype)
with pytest.raises(ValueError):
self.rg.integers([0], [0], dtype=dtype)
class TestMT19937(RNG):
@classmethod
def setup_class(cls):
cls.bit_generator = MT19937
cls.advance = None
cls.seed = [2 ** 21 + 2 ** 16 + 2 ** 5 + 1]
cls.rg = Generator(cls.bit_generator(*cls.seed))
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 32
cls._extra_setup()
cls.seed_error = ValueError
def test_numpy_state(self):
nprg = np.random.RandomState()
nprg.standard_normal(99)
state = nprg.get_state()
self.rg.bit_generator.state = state
state2 = self.rg.bit_generator.state
assert_((state[1] == state2['state']['key']).all())
assert_(state[2] == state2['state']['pos'])
class TestPhilox(RNG):
@classmethod
def setup_class(cls):
cls.bit_generator = Philox
cls.advance = 2**63 + 2**31 + 2**15 + 1
cls.seed = [12345]
cls.rg = Generator(cls.bit_generator(*cls.seed))
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 64
cls._extra_setup()
class TestSFC64(RNG):
@classmethod
def setup_class(cls):
cls.bit_generator = SFC64
cls.advance = None
cls.seed = [12345]
cls.rg = Generator(cls.bit_generator(*cls.seed))
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 192
cls._extra_setup()
class TestPCG64(RNG):
@classmethod
def setup_class(cls):
cls.bit_generator = PCG64
cls.advance = 2**63 + 2**31 + 2**15 + 1
cls.seed = [12345]
cls.rg = Generator(cls.bit_generator(*cls.seed))
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 64
cls._extra_setup()
class TestPCG64DXSM(RNG):
@classmethod
def setup_class(cls):
cls.bit_generator = PCG64DXSM
cls.advance = 2**63 + 2**31 + 2**15 + 1
cls.seed = [12345]
cls.rg = Generator(cls.bit_generator(*cls.seed))
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 64
cls._extra_setup()
class TestDefaultRNG(RNG):
@classmethod
def setup_class(cls):
# This will duplicate some tests that directly instantiate a fresh
# Generator(), but that's okay.
cls.bit_generator = PCG64
cls.advance = 2**63 + 2**31 + 2**15 + 1
cls.seed = [12345]
cls.rg = np.random.default_rng(*cls.seed)
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 64
cls._extra_setup()
def test_default_is_pcg64(self):
# In order to change the default BitGenerator, we'll go through
# a deprecation cycle to move to a different function.
assert_(isinstance(self.rg.bit_generator, PCG64))
def test_seed(self):
np.random.default_rng()
np.random.default_rng(None)
np.random.default_rng(12345)
np.random.default_rng(0)
np.random.default_rng(43660444402423911716352051725018508569)
np.random.default_rng([43660444402423911716352051725018508569,
279705150948142787361475340226491943209])
with pytest.raises(ValueError):
np.random.default_rng(-1)
with pytest.raises(ValueError):
np.random.default_rng([12345, -1])