Created starter files for the project.

This commit is contained in:
Batuhan Berk Başoğlu 2020-10-02 21:26:03 -04:00
commit 73f0c0db42
1992 changed files with 769897 additions and 0 deletions

View file

@ -0,0 +1,14 @@
cimport numpy as np
from libc.stdint cimport uint32_t, uint64_t
cdef extern from "numpy/random/bitgen.h":
struct bitgen:
void *state
uint64_t (*next_uint64)(void *st) nogil
uint32_t (*next_uint32)(void *st) nogil
double (*next_double)(void *st) nogil
uint64_t (*next_raw)(void *st) nogil
ctypedef bitgen bitgen_t
from numpy.random.bit_generator cimport BitGenerator, SeedSequence

View file

@ -0,0 +1,213 @@
"""
========================
Random Number Generation
========================
Use ``default_rng()`` to create a `Generator` and call its methods.
=============== =========================================================
Generator
--------------- ---------------------------------------------------------
Generator Class implementing all of the random number distributions
default_rng Default constructor for ``Generator``
=============== =========================================================
============================================= ===
BitGenerator Streams that work with Generator
--------------------------------------------- ---
MT19937
PCG64
Philox
SFC64
============================================= ===
============================================= ===
Getting entropy to initialize a BitGenerator
--------------------------------------------- ---
SeedSequence
============================================= ===
Legacy
------
For backwards compatibility with previous versions of numpy before 1.17, the
various aliases to the global `RandomState` methods are left alone and do not
use the new `Generator` API.
==================== =========================================================
Utility functions
-------------------- ---------------------------------------------------------
random Uniformly distributed floats over ``[0, 1)``
bytes Uniformly distributed random bytes.
permutation Randomly permute a sequence / generate a random sequence.
shuffle Randomly permute a sequence in place.
choice Random sample from 1-D array.
==================== =========================================================
==================== =========================================================
Compatibility
functions - removed
in the new API
-------------------- ---------------------------------------------------------
rand Uniformly distributed values.
randn Normally distributed values.
ranf Uniformly distributed floating point numbers.
random_integers Uniformly distributed integers in a given range.
(deprecated, use ``integers(..., closed=True)`` instead)
random_sample Alias for `random_sample`
randint Uniformly distributed integers in a given range
seed Seed the legacy random number generator.
==================== =========================================================
==================== =========================================================
Univariate
distributions
-------------------- ---------------------------------------------------------
beta Beta distribution over ``[0, 1]``.
binomial Binomial distribution.
chisquare :math:`\\chi^2` distribution.
exponential Exponential distribution.
f F (Fisher-Snedecor) distribution.
gamma Gamma distribution.
geometric Geometric distribution.
gumbel Gumbel distribution.
hypergeometric Hypergeometric distribution.
laplace Laplace distribution.
logistic Logistic distribution.
lognormal Log-normal distribution.
logseries Logarithmic series distribution.
negative_binomial Negative binomial distribution.
noncentral_chisquare Non-central chi-square distribution.
noncentral_f Non-central F distribution.
normal Normal / Gaussian distribution.
pareto Pareto distribution.
poisson Poisson distribution.
power Power distribution.
rayleigh Rayleigh distribution.
triangular Triangular distribution.
uniform Uniform distribution.
vonmises Von Mises circular distribution.
wald Wald (inverse Gaussian) distribution.
weibull Weibull distribution.
zipf Zipf's distribution over ranked data.
==================== =========================================================
==================== ==========================================================
Multivariate
distributions
-------------------- ----------------------------------------------------------
dirichlet Multivariate generalization of Beta distribution.
multinomial Multivariate generalization of the binomial distribution.
multivariate_normal Multivariate generalization of the normal distribution.
==================== ==========================================================
==================== =========================================================
Standard
distributions
-------------------- ---------------------------------------------------------
standard_cauchy Standard Cauchy-Lorentz distribution.
standard_exponential Standard exponential distribution.
standard_gamma Standard Gamma distribution.
standard_normal Standard normal distribution.
standard_t Standard Student's t-distribution.
==================== =========================================================
==================== =========================================================
Internal functions
-------------------- ---------------------------------------------------------
get_state Get tuple representing internal state of generator.
set_state Set state of generator.
==================== =========================================================
"""
__all__ = [
'beta',
'binomial',
'bytes',
'chisquare',
'choice',
'dirichlet',
'exponential',
'f',
'gamma',
'geometric',
'get_state',
'gumbel',
'hypergeometric',
'laplace',
'logistic',
'lognormal',
'logseries',
'multinomial',
'multivariate_normal',
'negative_binomial',
'noncentral_chisquare',
'noncentral_f',
'normal',
'pareto',
'permutation',
'poisson',
'power',
'rand',
'randint',
'randn',
'random',
'random_integers',
'random_sample',
'ranf',
'rayleigh',
'sample',
'seed',
'set_state',
'shuffle',
'standard_cauchy',
'standard_exponential',
'standard_gamma',
'standard_normal',
'standard_t',
'triangular',
'uniform',
'vonmises',
'wald',
'weibull',
'zipf',
]
# add these for module-freeze analysis (like PyInstaller)
from . import _pickle
from . import _common
from . import _bounded_integers
from ._generator import Generator, default_rng
from .bit_generator import SeedSequence, BitGenerator
from ._mt19937 import MT19937
from ._pcg64 import PCG64
from ._philox import Philox
from ._sfc64 import SFC64
from .mtrand import *
__all__ += ['Generator', 'RandomState', 'SeedSequence', 'MT19937',
'Philox', 'PCG64', 'SFC64', 'default_rng', 'BitGenerator']
def __RandomState_ctor():
"""Return a RandomState instance.
This function exists solely to assist (un)pickling.
Note that the state of the RandomState returned here is irrelevant, as this
function's entire purpose is to return a newly allocated RandomState whose
state pickle can set. Consequently the RandomState returned by this function
is a freshly allocated copy with a seed=0.
See https://github.com/numpy/numpy/issues/4763 for a detailed discussion
"""
return RandomState(seed=0)
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester

View file

@ -0,0 +1,29 @@
from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t,
int8_t, int16_t, int32_t, int64_t, intptr_t)
import numpy as np
cimport numpy as np
ctypedef np.npy_bool bool_t
from numpy.random cimport bitgen_t
cdef inline uint64_t _gen_mask(uint64_t max_val) nogil:
"""Mask generator for use in bounded random numbers"""
# Smallest bit mask >= max
cdef uint64_t mask = max_val
mask |= mask >> 1
mask |= mask >> 2
mask |= mask >> 4
mask |= mask >> 8
mask |= mask >> 16
mask |= mask >> 32
return mask
cdef object _rand_uint64(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
cdef object _rand_uint32(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
cdef object _rand_uint16(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
cdef object _rand_uint8(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
cdef object _rand_bool(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
cdef object _rand_int64(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
cdef object _rand_int32(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
cdef object _rand_int16(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
cdef object _rand_int8(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)

View file

@ -0,0 +1,106 @@
#cython: language_level=3
from libc.stdint cimport uint32_t, uint64_t, int32_t, int64_t
import numpy as np
cimport numpy as np
from numpy.random cimport bitgen_t
cdef double POISSON_LAM_MAX
cdef double LEGACY_POISSON_LAM_MAX
cdef uint64_t MAXSIZE
cdef enum ConstraintType:
CONS_NONE
CONS_NON_NEGATIVE
CONS_POSITIVE
CONS_POSITIVE_NOT_NAN
CONS_BOUNDED_0_1
CONS_BOUNDED_0_1_NOTNAN
CONS_BOUNDED_GT_0_1
CONS_GT_1
CONS_GTE_1
CONS_POISSON
LEGACY_CONS_POISSON
ctypedef ConstraintType constraint_type
cdef object benchmark(bitgen_t *bitgen, object lock, Py_ssize_t cnt, object method)
cdef object random_raw(bitgen_t *bitgen, object lock, object size, object output)
cdef object prepare_cffi(bitgen_t *bitgen)
cdef object prepare_ctypes(bitgen_t *bitgen)
cdef int check_constraint(double val, object name, constraint_type cons) except -1
cdef int check_array_constraint(np.ndarray val, object name, constraint_type cons) except -1
cdef extern from "include/aligned_malloc.h":
cdef void *PyArray_realloc_aligned(void *p, size_t n)
cdef void *PyArray_malloc_aligned(size_t n)
cdef void *PyArray_calloc_aligned(size_t n, size_t s)
cdef void PyArray_free_aligned(void *p)
ctypedef double (*random_double_fill)(bitgen_t *state, np.npy_intp count, double* out) nogil
ctypedef double (*random_double_0)(void *state) nogil
ctypedef double (*random_double_1)(void *state, double a) nogil
ctypedef double (*random_double_2)(void *state, double a, double b) nogil
ctypedef double (*random_double_3)(void *state, double a, double b, double c) nogil
ctypedef double (*random_float_fill)(bitgen_t *state, np.npy_intp count, float* out) nogil
ctypedef float (*random_float_0)(bitgen_t *state) nogil
ctypedef float (*random_float_1)(bitgen_t *state, float a) nogil
ctypedef int64_t (*random_uint_0)(void *state) nogil
ctypedef int64_t (*random_uint_d)(void *state, double a) nogil
ctypedef int64_t (*random_uint_dd)(void *state, double a, double b) nogil
ctypedef int64_t (*random_uint_di)(void *state, double a, uint64_t b) nogil
ctypedef int64_t (*random_uint_i)(void *state, int64_t a) nogil
ctypedef int64_t (*random_uint_iii)(void *state, int64_t a, int64_t b, int64_t c) nogil
ctypedef uint32_t (*random_uint_0_32)(bitgen_t *state) nogil
ctypedef uint32_t (*random_uint_1_i_32)(bitgen_t *state, uint32_t a) nogil
ctypedef int32_t (*random_int_2_i_32)(bitgen_t *state, int32_t a, int32_t b) nogil
ctypedef int64_t (*random_int_2_i)(bitgen_t *state, int64_t a, int64_t b) nogil
cdef double kahan_sum(double *darr, np.npy_intp n)
cdef inline double uint64_to_double(uint64_t rnd) nogil:
return (rnd >> 11) * (1.0 / 9007199254740992.0)
cdef object double_fill(void *func, bitgen_t *state, object size, object lock, object out)
cdef object float_fill(void *func, bitgen_t *state, object size, object lock, object out)
cdef object float_fill_from_double(void *func, bitgen_t *state, object size, object lock, object out)
cdef object wrap_int(object val, object bits)
cdef np.ndarray int_to_array(object value, object name, object bits, object uint_size)
cdef validate_output_shape(iter_shape, np.ndarray output)
cdef object cont(void *func, void *state, object size, object lock, int narg,
object a, object a_name, constraint_type a_constraint,
object b, object b_name, constraint_type b_constraint,
object c, object c_name, constraint_type c_constraint,
object out)
cdef object disc(void *func, void *state, object size, object lock,
int narg_double, int narg_int64,
object a, object a_name, constraint_type a_constraint,
object b, object b_name, constraint_type b_constraint,
object c, object c_name, constraint_type c_constraint)
cdef object cont_f(void *func, bitgen_t *state, object size, object lock,
object a, object a_name, constraint_type a_constraint,
object out)
cdef object cont_broadcast_3(void *func, void *state, object size, object lock,
np.ndarray a_arr, object a_name, constraint_type a_constraint,
np.ndarray b_arr, object b_name, constraint_type b_constraint,
np.ndarray c_arr, object c_name, constraint_type c_constraint)
cdef object discrete_broadcast_iii(void *func, void *state, object size, object lock,
np.ndarray a_arr, object a_name, constraint_type a_constraint,
np.ndarray b_arr, object b_name, constraint_type b_constraint,
np.ndarray c_arr, object c_name, constraint_type c_constraint)

View file

@ -0,0 +1,40 @@
"""
Use cffi to access any of the underlying C functions from distributions.h
"""
import os
import numpy as np
import cffi
from .parse import parse_distributions_h
ffi = cffi.FFI()
inc_dir = os.path.join(np.get_include(), 'numpy')
# Basic numpy types
ffi.cdef('''
typedef intptr_t npy_intp;
typedef unsigned char npy_bool;
''')
parse_distributions_h(ffi, inc_dir)
lib = ffi.dlopen(np.random._generator.__file__)
# Compare the distributions.h random_standard_normal_fill to
# Generator.standard_random
bit_gen = np.random.PCG64()
rng = np.random.Generator(bit_gen)
state = bit_gen.state
interface = rng.bit_generator.cffi
n = 100
vals_cffi = ffi.new('double[%d]' % n)
lib.random_standard_normal_fill(interface.bit_generator, n, vals_cffi)
# reset the state
bit_gen.state = state
vals = rng.standard_normal(n)
for i in range(n):
assert vals[i] == vals_cffi[i]

View file

@ -0,0 +1,46 @@
import os
def parse_distributions_h(ffi, inc_dir):
"""
Parse distributions.h located in inc_dir for CFFI, filling in the ffi.cdef
Read the function declarations without the "#define ..." macros that will
be filled in when loading the library.
"""
with open(os.path.join(inc_dir, 'random', 'bitgen.h')) as fid:
s = []
for line in fid:
# massage the include file
if line.strip().startswith('#'):
continue
s.append(line)
ffi.cdef('\n'.join(s))
with open(os.path.join(inc_dir, 'random', 'distributions.h')) as fid:
s = []
in_skip = 0
for line in fid:
# massage the include file
if line.strip().startswith('#'):
continue
# skip any inlined function definition
# which starts with 'static NPY_INLINE xxx(...) {'
# and ends with a closing '}'
if line.strip().startswith('static NPY_INLINE'):
in_skip += line.count('{')
continue
elif in_skip > 0:
in_skip += line.count('{')
in_skip -= line.count('}')
continue
# replace defines with their value or remove them
line = line.replace('DECLDIR', '')
line = line.replace('NPY_INLINE', '')
line = line.replace('RAND_INT_TYPE', 'int64_t')
s.append(line)
ffi.cdef('\n'.join(s))

View file

@ -0,0 +1,78 @@
#!/usr/bin/env python3
#cython: language_level=3
from libc.stdint cimport uint32_t
from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
import numpy as np
cimport numpy as np
cimport cython
from numpy.random cimport bitgen_t
from numpy.random import PCG64
np.import_array()
@cython.boundscheck(False)
@cython.wraparound(False)
def uniform_mean(Py_ssize_t n):
cdef Py_ssize_t i
cdef bitgen_t *rng
cdef const char *capsule_name = "BitGenerator"
cdef double[::1] random_values
cdef np.ndarray randoms
x = PCG64()
capsule = x.capsule
if not PyCapsule_IsValid(capsule, capsule_name):
raise ValueError("Invalid pointer to anon_func_state")
rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
random_values = np.empty(n)
# Best practice is to acquire the lock whenever generating random values.
# This prevents other threads from modifying the state. Acquiring the lock
# is only necessary if if the GIL is also released, as in this example.
with x.lock, nogil:
for i in range(n):
random_values[i] = rng.next_double(rng.state)
randoms = np.asarray(random_values)
return randoms.mean()
# This function is declared nogil so it can be used without the GIL below
cdef uint32_t bounded_uint(uint32_t lb, uint32_t ub, bitgen_t *rng) nogil:
cdef uint32_t mask, delta, val
mask = delta = ub - lb
mask |= mask >> 1
mask |= mask >> 2
mask |= mask >> 4
mask |= mask >> 8
mask |= mask >> 16
val = rng.next_uint32(rng.state) & mask
while val > delta:
val = rng.next_uint32(rng.state) & mask
return lb + val
@cython.boundscheck(False)
@cython.wraparound(False)
def bounded_uints(uint32_t lb, uint32_t ub, Py_ssize_t n):
cdef Py_ssize_t i
cdef bitgen_t *rng
cdef uint32_t[::1] out
cdef const char *capsule_name = "BitGenerator"
x = PCG64()
out = np.empty(n, dtype=np.uint32)
capsule = x.capsule
if not PyCapsule_IsValid(capsule, capsule_name):
raise ValueError("Invalid pointer to anon_func_state")
rng = <bitgen_t *>PyCapsule_GetPointer(capsule, capsule_name)
with x.lock, nogil:
for i in range(n):
out[i] = bounded_uint(lb, ub, rng)
return np.asarray(out)

View file

@ -0,0 +1,117 @@
#!/usr/bin/env python3
#cython: language_level=3
"""
This file shows how the to use a BitGenerator to create a distribution.
"""
import numpy as np
cimport numpy as np
cimport cython
from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
from libc.stdint cimport uint16_t, uint64_t
from numpy.random cimport bitgen_t
from numpy.random import PCG64
from numpy.random.c_distributions cimport (
random_standard_uniform_fill, random_standard_uniform_fill_f)
@cython.boundscheck(False)
@cython.wraparound(False)
def uniforms(Py_ssize_t n):
"""
Create an array of `n` uniformly distributed doubles.
A 'real' distribution would want to process the values into
some non-uniform distribution
"""
cdef Py_ssize_t i
cdef bitgen_t *rng
cdef const char *capsule_name = "BitGenerator"
cdef double[::1] random_values
x = PCG64()
capsule = x.capsule
# Optional check that the capsule if from a BitGenerator
if not PyCapsule_IsValid(capsule, capsule_name):
raise ValueError("Invalid pointer to anon_func_state")
# Cast the pointer
rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
random_values = np.empty(n, dtype='float64')
with x.lock, nogil:
for i in range(n):
# Call the function
random_values[i] = rng.next_double(rng.state)
randoms = np.asarray(random_values)
return randoms
# cython example 2
@cython.boundscheck(False)
@cython.wraparound(False)
def uint10_uniforms(Py_ssize_t n):
"""Uniform 10 bit integers stored as 16-bit unsigned integers"""
cdef Py_ssize_t i
cdef bitgen_t *rng
cdef const char *capsule_name = "BitGenerator"
cdef uint16_t[::1] random_values
cdef int bits_remaining
cdef int width = 10
cdef uint64_t buff, mask = 0x3FF
x = PCG64()
capsule = x.capsule
if not PyCapsule_IsValid(capsule, capsule_name):
raise ValueError("Invalid pointer to anon_func_state")
rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
random_values = np.empty(n, dtype='uint16')
# Best practice is to release GIL and acquire the lock
bits_remaining = 0
with x.lock, nogil:
for i in range(n):
if bits_remaining < width:
buff = rng.next_uint64(rng.state)
random_values[i] = buff & mask
buff >>= width
randoms = np.asarray(random_values)
return randoms
# cython example 3
def uniforms_ex(bit_generator, Py_ssize_t n, dtype=np.float64):
"""
Create an array of `n` uniformly distributed doubles via a "fill" function.
A 'real' distribution would want to process the values into
some non-uniform distribution
Parameters
----------
bit_generator: BitGenerator instance
n: int
Output vector length
dtype: {str, dtype}, optional
Desired dtype, either 'd' (or 'float64') or 'f' (or 'float32'). The
default dtype value is 'd'
"""
cdef Py_ssize_t i
cdef bitgen_t *rng
cdef const char *capsule_name = "BitGenerator"
cdef np.ndarray randoms
capsule = bit_generator.capsule
# Optional check that the capsule if from a BitGenerator
if not PyCapsule_IsValid(capsule, capsule_name):
raise ValueError("Invalid pointer to anon_func_state")
# Cast the pointer
rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
_dtype = np.dtype(dtype)
randoms = np.empty(n, dtype=_dtype)
if _dtype == np.float32:
with bit_generator.lock:
random_standard_uniform_fill_f(rng, n, <float*>np.PyArray_DATA(randoms))
elif _dtype == np.float64:
with bit_generator.lock:
random_standard_uniform_fill(rng, n, <double*>np.PyArray_DATA(randoms))
else:
raise TypeError('Unsupported dtype %r for random' % _dtype)
return randoms

View file

@ -0,0 +1,41 @@
#!/usr/bin/env python3
"""
Build the Cython demonstrations of low-level access to NumPy random
Usage: python setup.py build_ext -i
"""
import numpy as np
from distutils.core import setup
from Cython.Build import cythonize
from setuptools.extension import Extension
from os.path import join, dirname
path = dirname(__file__)
src_dir = join(dirname(path), '..', 'src')
defs = [('NPY_NO_DEPRECATED_API', 0)]
inc_path = np.get_include()
# not so nice. We need the random/lib library from numpy
lib_path = join(np.get_include(), '..', '..', 'random', 'lib')
extending = Extension("extending",
sources=[join(path, 'extending.pyx')],
include_dirs=[
np.get_include(),
join(path, '..', '..')
],
define_macros=defs,
)
distributions = Extension("extending_distributions",
sources=[join(path, 'extending_distributions.pyx')],
include_dirs=[inc_path],
library_dirs=[lib_path],
libraries=['npyrandom'],
define_macros=defs,
)
extensions = [extending, distributions]
setup(
ext_modules=cythonize(extensions)
)

View file

@ -0,0 +1,84 @@
import numpy as np
import numba as nb
from numpy.random import PCG64
from timeit import timeit
bit_gen = PCG64()
next_d = bit_gen.cffi.next_double
state_addr = bit_gen.cffi.state_address
def normals(n, state):
out = np.empty(n)
for i in range((n + 1) // 2):
x1 = 2.0 * next_d(state) - 1.0
x2 = 2.0 * next_d(state) - 1.0
r2 = x1 * x1 + x2 * x2
while r2 >= 1.0 or r2 == 0.0:
x1 = 2.0 * next_d(state) - 1.0
x2 = 2.0 * next_d(state) - 1.0
r2 = x1 * x1 + x2 * x2
f = np.sqrt(-2.0 * np.log(r2) / r2)
out[2 * i] = f * x1
if 2 * i + 1 < n:
out[2 * i + 1] = f * x2
return out
# Compile using Numba
normalsj = nb.jit(normals, nopython=True)
# Must use state address not state with numba
n = 10000
def numbacall():
return normalsj(n, state_addr)
rg = np.random.Generator(PCG64())
def numpycall():
return rg.normal(size=n)
# Check that the functions work
r1 = numbacall()
r2 = numpycall()
assert r1.shape == (n,)
assert r1.shape == r2.shape
t1 = timeit(numbacall, number=1000)
print('{:.2f} secs for {} PCG64 (Numba/PCG64) gaussian randoms'.format(t1, n))
t2 = timeit(numpycall, number=1000)
print('{:.2f} secs for {} PCG64 (NumPy/PCG64) gaussian randoms'.format(t2, n))
# example 2
next_u32 = bit_gen.ctypes.next_uint32
ctypes_state = bit_gen.ctypes.state
@nb.jit(nopython=True)
def bounded_uint(lb, ub, state):
mask = delta = ub - lb
mask |= mask >> 1
mask |= mask >> 2
mask |= mask >> 4
mask |= mask >> 8
mask |= mask >> 16
val = next_u32(state) & mask
while val > delta:
val = next_u32(state) & mask
return lb + val
print(bounded_uint(323, 2394691, ctypes_state.value))
@nb.jit(nopython=True)
def bounded_uints(lb, ub, n, state):
out = np.empty(n, dtype=np.uint32)
for i in range(n):
out[i] = bounded_uint(lb, ub, state)
bounded_uints(323, 2394691, 10000000, ctypes_state.value)

View file

@ -0,0 +1,67 @@
r"""
Building the required library in this example requires a source distribution
of NumPy or clone of the NumPy git repository since distributions.c is not
included in binary distributions.
On *nix, execute in numpy/random/src/distributions
export ${PYTHON_VERSION}=3.8 # Python version
export PYTHON_INCLUDE=#path to Python's include folder, usually \
${PYTHON_HOME}/include/python${PYTHON_VERSION}m
export NUMPY_INCLUDE=#path to numpy's include folder, usually \
${PYTHON_HOME}/lib/python${PYTHON_VERSION}/site-packages/numpy/core/include
gcc -shared -o libdistributions.so -fPIC distributions.c \
-I${NUMPY_INCLUDE} -I${PYTHON_INCLUDE}
mv libdistributions.so ../../_examples/numba/
On Windows
rem PYTHON_HOME and PYTHON_VERSION are setup dependent, this is an example
set PYTHON_HOME=c:\Anaconda
set PYTHON_VERSION=38
cl.exe /LD .\distributions.c -DDLL_EXPORT \
-I%PYTHON_HOME%\lib\site-packages\numpy\core\include \
-I%PYTHON_HOME%\include %PYTHON_HOME%\libs\python%PYTHON_VERSION%.lib
move distributions.dll ../../_examples/numba/
"""
import os
import numba as nb
import numpy as np
from cffi import FFI
from numpy.random import PCG64
ffi = FFI()
if os.path.exists('./distributions.dll'):
lib = ffi.dlopen('./distributions.dll')
elif os.path.exists('./libdistributions.so'):
lib = ffi.dlopen('./libdistributions.so')
else:
raise RuntimeError('Required DLL/so file was not found.')
ffi.cdef("""
double random_standard_normal(void *bitgen_state);
""")
x = PCG64()
xffi = x.cffi
bit_generator = xffi.bit_generator
random_standard_normal = lib.random_standard_normal
def normals(n, bit_generator):
out = np.empty(n)
for i in range(n):
out[i] = random_standard_normal(bit_generator)
return out
normalsj = nb.jit(normals, nopython=True)
# Numba requires a memory address for void *
# Can also get address from x.ctypes.bit_generator.value
bit_generator_address = int(ffi.cast('uintptr_t', bit_generator))
norm = normalsj(1000, bit_generator_address)
print(norm[:12])

View file

@ -0,0 +1,82 @@
from .mtrand import RandomState
from ._philox import Philox
from ._pcg64 import PCG64
from ._sfc64 import SFC64
from ._generator import Generator
from ._mt19937 import MT19937
BitGenerators = {'MT19937': MT19937,
'PCG64': PCG64,
'Philox': Philox,
'SFC64': SFC64,
}
def __generator_ctor(bit_generator_name='MT19937'):
"""
Pickling helper function that returns a Generator object
Parameters
----------
bit_generator_name: str
String containing the core BitGenerator
Returns
-------
rg: Generator
Generator using the named core BitGenerator
"""
if bit_generator_name in BitGenerators:
bit_generator = BitGenerators[bit_generator_name]
else:
raise ValueError(str(bit_generator_name) + ' is not a known '
'BitGenerator module.')
return Generator(bit_generator())
def __bit_generator_ctor(bit_generator_name='MT19937'):
"""
Pickling helper function that returns a bit generator object
Parameters
----------
bit_generator_name: str
String containing the name of the BitGenerator
Returns
-------
bit_generator: BitGenerator
BitGenerator instance
"""
if bit_generator_name in BitGenerators:
bit_generator = BitGenerators[bit_generator_name]
else:
raise ValueError(str(bit_generator_name) + ' is not a known '
'BitGenerator module.')
return bit_generator()
def __randomstate_ctor(bit_generator_name='MT19937'):
"""
Pickling helper function that returns a legacy RandomState-like object
Parameters
----------
bit_generator_name: str
String containing the core BitGenerator
Returns
-------
rs: RandomState
Legacy RandomState using the named core BitGenerator
"""
if bit_generator_name in BitGenerators:
bit_generator = BitGenerators[bit_generator_name]
else:
raise ValueError(str(bit_generator_name) + ' is not a known '
'BitGenerator module.')
return RandomState(bit_generator())

View file

@ -0,0 +1,35 @@
cimport numpy as np
from libc.stdint cimport uint32_t, uint64_t
cdef extern from "numpy/random/bitgen.h":
struct bitgen:
void *state
uint64_t (*next_uint64)(void *st) nogil
uint32_t (*next_uint32)(void *st) nogil
double (*next_double)(void *st) nogil
uint64_t (*next_raw)(void *st) nogil
ctypedef bitgen bitgen_t
cdef class BitGenerator():
cdef readonly object _seed_seq
cdef readonly object lock
cdef bitgen_t _bitgen
cdef readonly object _ctypes
cdef readonly object _cffi
cdef readonly object capsule
cdef class SeedSequence():
cdef readonly object entropy
cdef readonly tuple spawn_key
cdef readonly uint32_t pool_size
cdef readonly object pool
cdef readonly uint32_t n_children_spawned
cdef mix_entropy(self, np.ndarray[np.npy_uint32, ndim=1] mixer,
np.ndarray[np.npy_uint32, ndim=1] entropy_array)
cdef get_assembled_entropy(self)
cdef class SeedlessSequence():
pass

View file

@ -0,0 +1,114 @@
#!python
#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3
from numpy cimport npy_intp
from libc.stdint cimport (uint64_t, int32_t, int64_t)
from numpy.random cimport bitgen_t
cdef extern from "numpy/random/distributions.h":
struct s_binomial_t:
int has_binomial
double psave
int64_t nsave
double r
double q
double fm
int64_t m
double p1
double xm
double xl
double xr
double c
double laml
double lamr
double p2
double p3
double p4
ctypedef s_binomial_t binomial_t
double random_standard_uniform(bitgen_t *bitgen_state) nogil
void random_standard_uniform_fill(bitgen_t* bitgen_state, npy_intp cnt, double *out) nogil
double random_standard_exponential(bitgen_t *bitgen_state) nogil
double random_standard_exponential_f(bitgen_t *bitgen_state) nogil
void random_standard_exponential_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil
void random_standard_exponential_fill_f(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil
void random_standard_exponential_inv_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil
void random_standard_exponential_inv_fill_f(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil
double random_standard_normal(bitgen_t* bitgen_state) nogil
void random_standard_normal_fill(bitgen_t *bitgen_state, npy_intp count, double *out) nogil
void random_standard_normal_fill_f(bitgen_t *bitgen_state, npy_intp count, float *out) nogil
double random_standard_gamma(bitgen_t *bitgen_state, double shape) nogil
float random_standard_uniform_f(bitgen_t *bitgen_state) nogil
void random_standard_uniform_fill_f(bitgen_t* bitgen_state, npy_intp cnt, float *out) nogil
float random_standard_normal_f(bitgen_t* bitgen_state) nogil
float random_standard_gamma_f(bitgen_t *bitgen_state, float shape) nogil
int64_t random_positive_int64(bitgen_t *bitgen_state) nogil
int32_t random_positive_int32(bitgen_t *bitgen_state) nogil
int64_t random_positive_int(bitgen_t *bitgen_state) nogil
uint64_t random_uint(bitgen_t *bitgen_state) nogil
double random_normal(bitgen_t *bitgen_state, double loc, double scale) nogil
double random_gamma(bitgen_t *bitgen_state, double shape, double scale) nogil
float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale) nogil
double random_exponential(bitgen_t *bitgen_state, double scale) nogil
double random_uniform(bitgen_t *bitgen_state, double lower, double range) nogil
double random_beta(bitgen_t *bitgen_state, double a, double b) nogil
double random_chisquare(bitgen_t *bitgen_state, double df) nogil
double random_f(bitgen_t *bitgen_state, double dfnum, double dfden) nogil
double random_standard_cauchy(bitgen_t *bitgen_state) nogil
double random_pareto(bitgen_t *bitgen_state, double a) nogil
double random_weibull(bitgen_t *bitgen_state, double a) nogil
double random_power(bitgen_t *bitgen_state, double a) nogil
double random_laplace(bitgen_t *bitgen_state, double loc, double scale) nogil
double random_gumbel(bitgen_t *bitgen_state, double loc, double scale) nogil
double random_logistic(bitgen_t *bitgen_state, double loc, double scale) nogil
double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma) nogil
double random_rayleigh(bitgen_t *bitgen_state, double mode) nogil
double random_standard_t(bitgen_t *bitgen_state, double df) nogil
double random_noncentral_chisquare(bitgen_t *bitgen_state, double df,
double nonc) nogil
double random_noncentral_f(bitgen_t *bitgen_state, double dfnum,
double dfden, double nonc) nogil
double random_wald(bitgen_t *bitgen_state, double mean, double scale) nogil
double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa) nogil
double random_triangular(bitgen_t *bitgen_state, double left, double mode,
double right) nogil
int64_t random_poisson(bitgen_t *bitgen_state, double lam) nogil
int64_t random_negative_binomial(bitgen_t *bitgen_state, double n, double p) nogil
int64_t random_binomial(bitgen_t *bitgen_state, double p, int64_t n, binomial_t *binomial) nogil
int64_t random_logseries(bitgen_t *bitgen_state, double p) nogil
int64_t random_geometric_search(bitgen_t *bitgen_state, double p) nogil
int64_t random_geometric_inversion(bitgen_t *bitgen_state, double p) nogil
int64_t random_geometric(bitgen_t *bitgen_state, double p) nogil
int64_t random_zipf(bitgen_t *bitgen_state, double a) nogil
int64_t random_hypergeometric(bitgen_t *bitgen_state, int64_t good, int64_t bad,
int64_t sample) nogil
uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max) nogil
# Generate random uint64 numbers in closed interval [off, off + rng].
uint64_t random_bounded_uint64(bitgen_t *bitgen_state,
uint64_t off, uint64_t rng,
uint64_t mask, bint use_masked) nogil
void random_multinomial(bitgen_t *bitgen_state, int64_t n, int64_t *mnix,
double *pix, npy_intp d, binomial_t *binomial) nogil
int random_multivariate_hypergeometric_count(bitgen_t *bitgen_state,
int64_t total,
size_t num_colors, int64_t *colors,
int64_t nsample,
size_t num_variates, int64_t *variates) nogil
void random_multivariate_hypergeometric_marginals(bitgen_t *bitgen_state,
int64_t total,
size_t num_colors, int64_t *colors,
int64_t nsample,
size_t num_variates, int64_t *variates) nogil

Binary file not shown.

View file

@ -0,0 +1,146 @@
import os
import platform
import sys
from os.path import join
from numpy.distutils.system_info import platform_bits
is_msvc = (platform.platform().startswith('Windows') and
platform.python_compiler().startswith('MS'))
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration, get_mathlibs
config = Configuration('random', parent_package, top_path)
def generate_libraries(ext, build_dir):
config_cmd = config.get_config_cmd()
libs = get_mathlibs()
if sys.platform == 'win32':
libs.extend(['Advapi32', 'Kernel32'])
ext.libraries.extend(libs)
return None
# enable unix large file support on 32 bit systems
# (64 bit off_t, lseek -> lseek64 etc.)
if sys.platform[:3] == "aix":
defs = [('_LARGE_FILES', None)]
else:
defs = [('_FILE_OFFSET_BITS', '64'),
('_LARGEFILE_SOURCE', '1'),
('_LARGEFILE64_SOURCE', '1')]
defs.append(('NPY_NO_DEPRECATED_API', 0))
config.add_subpackage('tests')
config.add_data_dir('tests/data')
config.add_data_dir('_examples')
EXTRA_LINK_ARGS = []
EXTRA_LIBRARIES = ['npyrandom']
if os.name != 'nt':
# Math lib
EXTRA_LIBRARIES.append('m')
# Some bit generators exclude GCC inlining
EXTRA_COMPILE_ARGS = ['-U__GNUC_GNU_INLINE__']
if is_msvc and platform_bits == 32:
# 32-bit windows requires explicit sse2 option
EXTRA_COMPILE_ARGS += ['/arch:SSE2']
elif not is_msvc:
# Some bit generators require c99
EXTRA_COMPILE_ARGS += ['-std=c99']
# Use legacy integer variable sizes
LEGACY_DEFS = [('NP_RANDOM_LEGACY', '1')]
PCG64_DEFS = []
# One can force emulated 128-bit arithmetic if one wants.
#PCG64_DEFS += [('PCG_FORCE_EMULATED_128BIT_MATH', '1')]
depends = ['__init__.pxd', 'c_distributions.pxd', 'bit_generator.pxd']
# npyrandom - a library like npymath
npyrandom_sources = [
'src/distributions/logfactorial.c',
'src/distributions/distributions.c',
'src/distributions/random_mvhg_count.c',
'src/distributions/random_mvhg_marginals.c',
'src/distributions/random_hypergeometric.c',
]
config.add_installed_library('npyrandom',
sources=npyrandom_sources,
install_dir='lib',
build_info={
'include_dirs' : [], # empty list required for creating npyrandom.h
'extra_compiler_args' : (['/GL-'] if is_msvc else []),
})
for gen in ['mt19937']:
# gen.pyx, src/gen/gen.c, src/gen/gen-jump.c
config.add_extension(f'_{gen}',
sources=[f'_{gen}.c',
f'src/{gen}/{gen}.c',
f'src/{gen}/{gen}-jump.c'],
include_dirs=['.', 'src', join('src', gen)],
libraries=EXTRA_LIBRARIES,
extra_compile_args=EXTRA_COMPILE_ARGS,
extra_link_args=EXTRA_LINK_ARGS,
depends=depends + [f'_{gen}.pyx'],
define_macros=defs,
)
for gen in ['philox', 'pcg64', 'sfc64']:
# gen.pyx, src/gen/gen.c
_defs = defs + PCG64_DEFS if gen == 'pcg64' else defs
config.add_extension(f'_{gen}',
sources=[f'_{gen}.c',
f'src/{gen}/{gen}.c'],
include_dirs=['.', 'src', join('src', gen)],
libraries=EXTRA_LIBRARIES,
extra_compile_args=EXTRA_COMPILE_ARGS,
extra_link_args=EXTRA_LINK_ARGS,
depends=depends + [f'_{gen}.pyx',
'bit_generator.pyx', 'bit_generator.pxd'],
define_macros=_defs,
)
for gen in ['_common', 'bit_generator']:
# gen.pyx
config.add_extension(gen,
sources=[f'{gen}.c'],
libraries=EXTRA_LIBRARIES,
extra_compile_args=EXTRA_COMPILE_ARGS,
extra_link_args=EXTRA_LINK_ARGS,
include_dirs=['.', 'src'],
depends=depends + [f'{gen}.pyx', f'{gen}.pxd',],
define_macros=defs,
)
config.add_data_files(f'{gen}.pxd')
for gen in ['_generator', '_bounded_integers']:
# gen.pyx, src/distributions/distributions.c
config.add_extension(gen,
sources=[f'{gen}.c'],
libraries=EXTRA_LIBRARIES,
extra_compile_args=EXTRA_COMPILE_ARGS,
include_dirs=['.', 'src'],
extra_link_args=EXTRA_LINK_ARGS,
depends=depends + [f'{gen}.pyx'],
define_macros=defs,
)
config.add_data_files('_bounded_integers.pxd')
config.add_extension('mtrand',
sources=['mtrand.c',
'src/legacy/legacy-distributions.c',
'src/distributions/distributions.c',
],
include_dirs=['.', 'src', 'src/legacy'],
libraries=['m'] if os.name != 'nt' else [],
extra_compile_args=EXTRA_COMPILE_ARGS,
extra_link_args=EXTRA_LINK_ARGS,
depends=depends + ['mtrand.pyx'],
define_macros=defs + LEGACY_DEFS,
)
config.add_data_files(*depends)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,425 @@
import os
from os.path import join
import sys
import numpy as np
from numpy.testing import (assert_equal, assert_allclose, assert_array_equal,
assert_raises)
import pytest
from numpy.random import (
Generator, MT19937, PCG64, Philox, RandomState, SeedSequence, SFC64,
default_rng
)
from numpy.random._common import interface
try:
import cffi # noqa: F401
MISSING_CFFI = False
except ImportError:
MISSING_CFFI = True
try:
import ctypes # noqa: F401
MISSING_CTYPES = False
except ImportError:
MISSING_CTYPES = False
if sys.flags.optimize > 1:
# no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1
# cffi cannot succeed
MISSING_CFFI = True
pwd = os.path.dirname(os.path.abspath(__file__))
def assert_state_equal(actual, target):
for key in actual:
if isinstance(actual[key], dict):
assert_state_equal(actual[key], target[key])
elif isinstance(actual[key], np.ndarray):
assert_array_equal(actual[key], target[key])
else:
assert actual[key] == target[key]
def uniform32_from_uint64(x):
x = np.uint64(x)
upper = np.array(x >> np.uint64(32), dtype=np.uint32)
lower = np.uint64(0xffffffff)
lower = np.array(x & lower, dtype=np.uint32)
joined = np.column_stack([lower, upper]).ravel()
out = (joined >> np.uint32(9)) * (1.0 / 2 ** 23)
return out.astype(np.float32)
def uniform32_from_uint53(x):
x = np.uint64(x) >> np.uint64(16)
x = np.uint32(x & np.uint64(0xffffffff))
out = (x >> np.uint32(9)) * (1.0 / 2 ** 23)
return out.astype(np.float32)
def uniform32_from_uint32(x):
return (x >> np.uint32(9)) * (1.0 / 2 ** 23)
def uniform32_from_uint(x, bits):
if bits == 64:
return uniform32_from_uint64(x)
elif bits == 53:
return uniform32_from_uint53(x)
elif bits == 32:
return uniform32_from_uint32(x)
else:
raise NotImplementedError
def uniform_from_uint(x, bits):
if bits in (64, 63, 53):
return uniform_from_uint64(x)
elif bits == 32:
return uniform_from_uint32(x)
def uniform_from_uint64(x):
return (x >> np.uint64(11)) * (1.0 / 9007199254740992.0)
def uniform_from_uint32(x):
out = np.empty(len(x) // 2)
for i in range(0, len(x), 2):
a = x[i] >> 5
b = x[i + 1] >> 6
out[i // 2] = (a * 67108864.0 + b) / 9007199254740992.0
return out
def uniform_from_dsfmt(x):
return x.view(np.double) - 1.0
def gauss_from_uint(x, n, bits):
if bits in (64, 63):
doubles = uniform_from_uint64(x)
elif bits == 32:
doubles = uniform_from_uint32(x)
else: # bits == 'dsfmt'
doubles = uniform_from_dsfmt(x)
gauss = []
loc = 0
x1 = x2 = 0.0
while len(gauss) < n:
r2 = 2
while r2 >= 1.0 or r2 == 0.0:
x1 = 2.0 * doubles[loc] - 1.0
x2 = 2.0 * doubles[loc + 1] - 1.0
r2 = x1 * x1 + x2 * x2
loc += 2
f = np.sqrt(-2.0 * np.log(r2) / r2)
gauss.append(f * x2)
gauss.append(f * x1)
return gauss[:n]
def test_seedsequence():
from numpy.random.bit_generator import (ISeedSequence,
ISpawnableSeedSequence,
SeedlessSeedSequence)
s1 = SeedSequence(range(10), spawn_key=(1, 2), pool_size=6)
s1.spawn(10)
s2 = SeedSequence(**s1.state)
assert_equal(s1.state, s2.state)
assert_equal(s1.n_children_spawned, s2.n_children_spawned)
# The interfaces cannot be instantiated themselves.
assert_raises(TypeError, ISeedSequence)
assert_raises(TypeError, ISpawnableSeedSequence)
dummy = SeedlessSeedSequence()
assert_raises(NotImplementedError, dummy.generate_state, 10)
assert len(dummy.spawn(10)) == 10
class Base:
dtype = np.uint64
data2 = data1 = {}
@classmethod
def setup_class(cls):
cls.bit_generator = PCG64
cls.bits = 64
cls.dtype = np.uint64
cls.seed_error_type = TypeError
cls.invalid_init_types = []
cls.invalid_init_values = []
@classmethod
def _read_csv(cls, filename):
with open(filename) as csv:
seed = csv.readline()
seed = seed.split(',')
seed = [int(s.strip(), 0) for s in seed[1:]]
data = []
for line in csv:
data.append(int(line.split(',')[-1].strip(), 0))
return {'seed': seed, 'data': np.array(data, dtype=cls.dtype)}
def test_raw(self):
bit_generator = self.bit_generator(*self.data1['seed'])
uints = bit_generator.random_raw(1000)
assert_equal(uints, self.data1['data'])
bit_generator = self.bit_generator(*self.data1['seed'])
uints = bit_generator.random_raw()
assert_equal(uints, self.data1['data'][0])
bit_generator = self.bit_generator(*self.data2['seed'])
uints = bit_generator.random_raw(1000)
assert_equal(uints, self.data2['data'])
def test_random_raw(self):
bit_generator = self.bit_generator(*self.data1['seed'])
uints = bit_generator.random_raw(output=False)
assert uints is None
uints = bit_generator.random_raw(1000, output=False)
assert uints is None
def test_gauss_inv(self):
n = 25
rs = RandomState(self.bit_generator(*self.data1['seed']))
gauss = rs.standard_normal(n)
assert_allclose(gauss,
gauss_from_uint(self.data1['data'], n, self.bits))
rs = RandomState(self.bit_generator(*self.data2['seed']))
gauss = rs.standard_normal(25)
assert_allclose(gauss,
gauss_from_uint(self.data2['data'], n, self.bits))
def test_uniform_double(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
vals = uniform_from_uint(self.data1['data'], self.bits)
uniforms = rs.random(len(vals))
assert_allclose(uniforms, vals)
assert_equal(uniforms.dtype, np.float64)
rs = Generator(self.bit_generator(*self.data2['seed']))
vals = uniform_from_uint(self.data2['data'], self.bits)
uniforms = rs.random(len(vals))
assert_allclose(uniforms, vals)
assert_equal(uniforms.dtype, np.float64)
def test_uniform_float(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
vals = uniform32_from_uint(self.data1['data'], self.bits)
uniforms = rs.random(len(vals), dtype=np.float32)
assert_allclose(uniforms, vals)
assert_equal(uniforms.dtype, np.float32)
rs = Generator(self.bit_generator(*self.data2['seed']))
vals = uniform32_from_uint(self.data2['data'], self.bits)
uniforms = rs.random(len(vals), dtype=np.float32)
assert_allclose(uniforms, vals)
assert_equal(uniforms.dtype, np.float32)
def test_repr(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
assert 'Generator' in repr(rs)
assert '{:#x}'.format(id(rs)).upper().replace('X', 'x') in repr(rs)
def test_str(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
assert 'Generator' in str(rs)
assert str(self.bit_generator.__name__) in str(rs)
assert '{:#x}'.format(id(rs)).upper().replace('X', 'x') not in str(rs)
def test_pickle(self):
import pickle
bit_generator = self.bit_generator(*self.data1['seed'])
state = bit_generator.state
bitgen_pkl = pickle.dumps(bit_generator)
reloaded = pickle.loads(bitgen_pkl)
reloaded_state = reloaded.state
assert_array_equal(Generator(bit_generator).standard_normal(1000),
Generator(reloaded).standard_normal(1000))
assert bit_generator is not reloaded
assert_state_equal(reloaded_state, state)
ss = SeedSequence(100)
aa = pickle.loads(pickle.dumps(ss))
assert_equal(ss.state, aa.state)
def test_invalid_state_type(self):
bit_generator = self.bit_generator(*self.data1['seed'])
with pytest.raises(TypeError):
bit_generator.state = {'1'}
def test_invalid_state_value(self):
bit_generator = self.bit_generator(*self.data1['seed'])
state = bit_generator.state
state['bit_generator'] = 'otherBitGenerator'
with pytest.raises(ValueError):
bit_generator.state = state
def test_invalid_init_type(self):
bit_generator = self.bit_generator
for st in self.invalid_init_types:
with pytest.raises(TypeError):
bit_generator(*st)
def test_invalid_init_values(self):
bit_generator = self.bit_generator
for st in self.invalid_init_values:
with pytest.raises((ValueError, OverflowError)):
bit_generator(*st)
def test_benchmark(self):
bit_generator = self.bit_generator(*self.data1['seed'])
bit_generator._benchmark(1)
bit_generator._benchmark(1, 'double')
with pytest.raises(ValueError):
bit_generator._benchmark(1, 'int32')
@pytest.mark.skipif(MISSING_CFFI, reason='cffi not available')
def test_cffi(self):
bit_generator = self.bit_generator(*self.data1['seed'])
cffi_interface = bit_generator.cffi
assert isinstance(cffi_interface, interface)
other_cffi_interface = bit_generator.cffi
assert other_cffi_interface is cffi_interface
@pytest.mark.skipif(MISSING_CTYPES, reason='ctypes not available')
def test_ctypes(self):
bit_generator = self.bit_generator(*self.data1['seed'])
ctypes_interface = bit_generator.ctypes
assert isinstance(ctypes_interface, interface)
other_ctypes_interface = bit_generator.ctypes
assert other_ctypes_interface is ctypes_interface
def test_getstate(self):
bit_generator = self.bit_generator(*self.data1['seed'])
state = bit_generator.state
alt_state = bit_generator.__getstate__()
assert_state_equal(state, alt_state)
class TestPhilox(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = Philox
cls.bits = 64
cls.dtype = np.uint64
cls.data1 = cls._read_csv(
join(pwd, './data/philox-testset-1.csv'))
cls.data2 = cls._read_csv(
join(pwd, './data/philox-testset-2.csv'))
cls.seed_error_type = TypeError
cls.invalid_init_types = []
cls.invalid_init_values = [(1, None, 1), (-1,), (None, None, 2 ** 257 + 1)]
def test_set_key(self):
bit_generator = self.bit_generator(*self.data1['seed'])
state = bit_generator.state
keyed = self.bit_generator(counter=state['state']['counter'],
key=state['state']['key'])
assert_state_equal(bit_generator.state, keyed.state)
class TestPCG64(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = PCG64
cls.bits = 64
cls.dtype = np.uint64
cls.data1 = cls._read_csv(join(pwd, './data/pcg64-testset-1.csv'))
cls.data2 = cls._read_csv(join(pwd, './data/pcg64-testset-2.csv'))
cls.seed_error_type = (ValueError, TypeError)
cls.invalid_init_types = [(3.2,), ([None],), (1, None)]
cls.invalid_init_values = [(-1,)]
def test_advance_symmetry(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
state = rs.bit_generator.state
step = -0x9e3779b97f4a7c150000000000000000
rs.bit_generator.advance(step)
val_neg = rs.integers(10)
rs.bit_generator.state = state
rs.bit_generator.advance(2**128 + step)
val_pos = rs.integers(10)
rs.bit_generator.state = state
rs.bit_generator.advance(10 * 2**128 + step)
val_big = rs.integers(10)
assert val_neg == val_pos
assert val_big == val_pos
class TestMT19937(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = MT19937
cls.bits = 32
cls.dtype = np.uint32
cls.data1 = cls._read_csv(join(pwd, './data/mt19937-testset-1.csv'))
cls.data2 = cls._read_csv(join(pwd, './data/mt19937-testset-2.csv'))
cls.seed_error_type = ValueError
cls.invalid_init_types = []
cls.invalid_init_values = [(-1,)]
def test_seed_float_array(self):
assert_raises(TypeError, self.bit_generator, np.array([np.pi]))
assert_raises(TypeError, self.bit_generator, np.array([-np.pi]))
assert_raises(TypeError, self.bit_generator, np.array([np.pi, -np.pi]))
assert_raises(TypeError, self.bit_generator, np.array([0, np.pi]))
assert_raises(TypeError, self.bit_generator, [np.pi])
assert_raises(TypeError, self.bit_generator, [0, np.pi])
def test_state_tuple(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
bit_generator = rs.bit_generator
state = bit_generator.state
desired = rs.integers(2 ** 16)
tup = (state['bit_generator'], state['state']['key'],
state['state']['pos'])
bit_generator.state = tup
actual = rs.integers(2 ** 16)
assert_equal(actual, desired)
tup = tup + (0, 0.0)
bit_generator.state = tup
actual = rs.integers(2 ** 16)
assert_equal(actual, desired)
class TestSFC64(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = SFC64
cls.bits = 64
cls.dtype = np.uint64
cls.data1 = cls._read_csv(
join(pwd, './data/sfc64-testset-1.csv'))
cls.data2 = cls._read_csv(
join(pwd, './data/sfc64-testset-2.csv'))
cls.seed_error_type = (ValueError, TypeError)
cls.invalid_init_types = [(3.2,), ([None],), (1, None)]
cls.invalid_init_values = [(-1,)]
class TestDefaultRNG:
def test_seed(self):
for args in [(), (None,), (1234,), ([1234, 5678],)]:
rg = default_rng(*args)
assert isinstance(rg.bit_generator, PCG64)
def test_passthrough(self):
bg = Philox()
rg = default_rng(bg)
assert rg.bit_generator is bg
rg2 = default_rng(rg)
assert rg2 is rg
assert rg2.bit_generator is bg

View file

@ -0,0 +1,95 @@
import os
import pytest
import shutil
import subprocess
import sys
import warnings
import numpy as np
try:
import cffi
except ImportError:
cffi = None
if sys.flags.optimize > 1:
# no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1
# cffi cannot succeed
cffi = None
try:
with warnings.catch_warnings(record=True) as w:
# numba issue gh-4733
warnings.filterwarnings('always', '', DeprecationWarning)
import numba
except ImportError:
numba = None
try:
import cython
from Cython.Compiler.Version import version as cython_version
except ImportError:
cython = None
else:
from distutils.version import LooseVersion
# Cython 0.29.21 is required for Python 3.9 and there are
# other fixes in the 0.29 series that are needed even for earlier
# Python versions.
# Note: keep in sync with the one in pyproject.toml
required_version = LooseVersion('0.29.21')
if LooseVersion(cython_version) < required_version:
# too old or wrong cython, skip the test
cython = None
@pytest.mark.skipif(cython is None, reason="requires cython")
@pytest.mark.slow
def test_cython(tmp_path):
srcdir = os.path.join(os.path.dirname(__file__), '..')
shutil.copytree(srcdir, tmp_path / 'random')
# build the examples and "install" them into a temporary directory
build_dir = tmp_path / 'random' / '_examples' / 'cython'
subprocess.check_call([sys.executable, 'setup.py', 'build', 'install',
'--prefix', str(tmp_path / 'installdir'),
'--single-version-externally-managed',
'--record', str(tmp_path/ 'tmp_install_log.txt'),
],
cwd=str(build_dir),
)
# gh-16162: make sure numpy's __init__.pxd was used for cython
# not really part of this test, but it is a convenient place to check
with open(build_dir / 'extending.c') as fid:
txt_to_find = 'NumPy API declarations from "numpy/__init__.pxd"'
for i, line in enumerate(fid):
if txt_to_find in line:
break
else:
assert False, ("Could not find '{}' in C file, "
"wrong pxd used".format(txt_to_find))
# get the path to the so's
so1 = so2 = None
with open(tmp_path /'tmp_install_log.txt') as fid:
for line in fid:
if 'extending.' in line:
so1 = line.strip()
if 'extending_distributions' in line:
so2 = line.strip()
assert so1 is not None
assert so2 is not None
# import the so's without adding the directory to sys.path
from importlib.machinery import ExtensionFileLoader
extending = ExtensionFileLoader('extending', so1).load_module()
extending_distributions = ExtensionFileLoader('extending_distributions', so2).load_module()
# actually test the cython c-extension
from numpy.random import PCG64
values = extending_distributions.uniforms_ex(PCG64(0), 10, 'd')
assert values.shape == (10,)
assert values.dtype == np.float64
@pytest.mark.skipif(numba is None or cffi is None,
reason="requires numba and cffi")
def test_numba():
from numpy.random._examples.numba import extending # noqa: F401
@pytest.mark.skipif(cffi is None, reason="requires cffi")
def test_cffi():
from numpy.random._examples.cffi import extending # noqa: F401

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,150 @@
from numpy.testing import (assert_, assert_array_equal)
import numpy as np
import pytest
from numpy.random import Generator, MT19937
mt19937 = Generator(MT19937())
class TestRegression:
def test_VonMises_range(self):
# Make sure generated random variables are in [-pi, pi].
# Regression test for ticket #986.
for mu in np.linspace(-7., 7., 5):
r = mt19937.vonmises(mu, 1, 50)
assert_(np.all(r > -np.pi) and np.all(r <= np.pi))
def test_hypergeometric_range(self):
# Test for ticket #921
assert_(np.all(mt19937.hypergeometric(3, 18, 11, size=10) < 4))
assert_(np.all(mt19937.hypergeometric(18, 3, 11, size=10) > 0))
# Test for ticket #5623
args = (2**20 - 2, 2**20 - 2, 2**20 - 2) # Check for 32-bit systems
assert_(mt19937.hypergeometric(*args) > 0)
def test_logseries_convergence(self):
# Test for ticket #923
N = 1000
mt19937 = Generator(MT19937(0))
rvsn = mt19937.logseries(0.8, size=N)
# these two frequency counts should be close to theoretical
# numbers with this large sample
# theoretical large N result is 0.49706795
freq = np.sum(rvsn == 1) / float(N)
msg = "Frequency was %f, should be > 0.45" % freq
assert_(freq > 0.45, msg)
# theoretical large N result is 0.19882718
freq = np.sum(rvsn == 2) / float(N)
msg = "Frequency was %f, should be < 0.23" % freq
assert_(freq < 0.23, msg)
def test_shuffle_mixed_dimension(self):
# Test for trac ticket #2074
for t in [[1, 2, 3, None],
[(1, 1), (2, 2), (3, 3), None],
[1, (2, 2), (3, 3), None],
[(1, 1), 2, 3, None]]:
mt19937 = Generator(MT19937(12345))
shuffled = np.array(t, dtype=object)
mt19937.shuffle(shuffled)
expected = np.array([t[2], t[0], t[3], t[1]], dtype=object)
assert_array_equal(np.array(shuffled, dtype=object), expected)
def test_call_within_randomstate(self):
# Check that custom BitGenerator does not call into global state
res = np.array([1, 8, 0, 1, 5, 3, 3, 8, 1, 4])
for i in range(3):
mt19937 = Generator(MT19937(i))
m = Generator(MT19937(4321))
# If m.state is not honored, the result will change
assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res)
def test_multivariate_normal_size_types(self):
# Test for multivariate_normal issue with 'size' argument.
# Check that the multivariate_normal size argument can be a
# numpy integer.
mt19937.multivariate_normal([0], [[0]], size=1)
mt19937.multivariate_normal([0], [[0]], size=np.int_(1))
mt19937.multivariate_normal([0], [[0]], size=np.int64(1))
def test_beta_small_parameters(self):
# Test that beta with small a and b parameters does not produce
# NaNs due to roundoff errors causing 0 / 0, gh-5851
mt19937 = Generator(MT19937(1234567890))
x = mt19937.beta(0.0001, 0.0001, size=100)
assert_(not np.any(np.isnan(x)), 'Nans in mt19937.beta')
def test_choice_sum_of_probs_tolerance(self):
# The sum of probs should be 1.0 with some tolerance.
# For low precision dtypes the tolerance was too tight.
# See numpy github issue 6123.
mt19937 = Generator(MT19937(1234))
a = [1, 2, 3]
counts = [4, 4, 2]
for dt in np.float16, np.float32, np.float64:
probs = np.array(counts, dtype=dt) / sum(counts)
c = mt19937.choice(a, p=probs)
assert_(c in a)
with pytest.raises(ValueError):
mt19937.choice(a, p=probs*0.9)
def test_shuffle_of_array_of_different_length_strings(self):
# Test that permuting an array of different length strings
# will not cause a segfault on garbage collection
# Tests gh-7710
mt19937 = Generator(MT19937(1234))
a = np.array(['a', 'a' * 1000])
for _ in range(100):
mt19937.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_shuffle_of_array_of_objects(self):
# Test that permuting an array of objects will not cause
# a segfault on garbage collection.
# See gh-7719
mt19937 = Generator(MT19937(1234))
a = np.array([np.arange(1), np.arange(4)], dtype=object)
for _ in range(1000):
mt19937.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_permutation_subclass(self):
class N(np.ndarray):
pass
mt19937 = Generator(MT19937(1))
orig = np.arange(3).view(N)
perm = mt19937.permutation(orig)
assert_array_equal(perm, np.array([2, 0, 1]))
assert_array_equal(orig, np.arange(3).view(N))
class M:
a = np.arange(5)
def __array__(self):
return self.a
mt19937 = Generator(MT19937(1))
m = M()
perm = mt19937.permutation(m)
assert_array_equal(perm, np.array([4, 1, 3, 0, 2]))
assert_array_equal(m.__array__(), np.arange(5))
def test_gamma_0(self):
assert mt19937.standard_gamma(0.0) == 0.0
assert_array_equal(mt19937.standard_gamma([0.0]), 0.0)
actual = mt19937.standard_gamma([0.0], dtype='float')
expected = np.array([0.], dtype=np.float32)
assert_array_equal(actual, expected)

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,203 @@
import sys
import pytest
from numpy.testing import (
assert_, assert_array_equal, assert_raises,
)
import numpy as np
from numpy import random
class TestRegression:
def test_VonMises_range(self):
# Make sure generated random variables are in [-pi, pi].
# Regression test for ticket #986.
for mu in np.linspace(-7., 7., 5):
r = random.vonmises(mu, 1, 50)
assert_(np.all(r > -np.pi) and np.all(r <= np.pi))
def test_hypergeometric_range(self):
# Test for ticket #921
assert_(np.all(random.hypergeometric(3, 18, 11, size=10) < 4))
assert_(np.all(random.hypergeometric(18, 3, 11, size=10) > 0))
# Test for ticket #5623
args = [
(2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems
]
is_64bits = sys.maxsize > 2**32
if is_64bits and sys.platform != 'win32':
# Check for 64-bit systems
args.append((2**40 - 2, 2**40 - 2, 2**40 - 2))
for arg in args:
assert_(random.hypergeometric(*arg) > 0)
def test_logseries_convergence(self):
# Test for ticket #923
N = 1000
random.seed(0)
rvsn = random.logseries(0.8, size=N)
# these two frequency counts should be close to theoretical
# numbers with this large sample
# theoretical large N result is 0.49706795
freq = np.sum(rvsn == 1) / float(N)
msg = "Frequency was %f, should be > 0.45" % freq
assert_(freq > 0.45, msg)
# theoretical large N result is 0.19882718
freq = np.sum(rvsn == 2) / float(N)
msg = "Frequency was %f, should be < 0.23" % freq
assert_(freq < 0.23, msg)
def test_shuffle_mixed_dimension(self):
# Test for trac ticket #2074
for t in [[1, 2, 3, None],
[(1, 1), (2, 2), (3, 3), None],
[1, (2, 2), (3, 3), None],
[(1, 1), 2, 3, None]]:
random.seed(12345)
shuffled = list(t)
random.shuffle(shuffled)
expected = np.array([t[0], t[3], t[1], t[2]], dtype=object)
assert_array_equal(np.array(shuffled, dtype=object), expected)
def test_call_within_randomstate(self):
# Check that custom RandomState does not call into global state
m = random.RandomState()
res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3])
for i in range(3):
random.seed(i)
m.seed(4321)
# If m.state is not honored, the result will change
assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res)
def test_multivariate_normal_size_types(self):
# Test for multivariate_normal issue with 'size' argument.
# Check that the multivariate_normal size argument can be a
# numpy integer.
random.multivariate_normal([0], [[0]], size=1)
random.multivariate_normal([0], [[0]], size=np.int_(1))
random.multivariate_normal([0], [[0]], size=np.int64(1))
def test_beta_small_parameters(self):
# Test that beta with small a and b parameters does not produce
# NaNs due to roundoff errors causing 0 / 0, gh-5851
random.seed(1234567890)
x = random.beta(0.0001, 0.0001, size=100)
assert_(not np.any(np.isnan(x)), 'Nans in random.beta')
def test_choice_sum_of_probs_tolerance(self):
# The sum of probs should be 1.0 with some tolerance.
# For low precision dtypes the tolerance was too tight.
# See numpy github issue 6123.
random.seed(1234)
a = [1, 2, 3]
counts = [4, 4, 2]
for dt in np.float16, np.float32, np.float64:
probs = np.array(counts, dtype=dt) / sum(counts)
c = random.choice(a, p=probs)
assert_(c in a)
assert_raises(ValueError, random.choice, a, p=probs*0.9)
def test_shuffle_of_array_of_different_length_strings(self):
# Test that permuting an array of different length strings
# will not cause a segfault on garbage collection
# Tests gh-7710
random.seed(1234)
a = np.array(['a', 'a' * 1000])
for _ in range(100):
random.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_shuffle_of_array_of_objects(self):
# Test that permuting an array of objects will not cause
# a segfault on garbage collection.
# See gh-7719
random.seed(1234)
a = np.array([np.arange(1), np.arange(4)], dtype=object)
for _ in range(1000):
random.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_permutation_subclass(self):
class N(np.ndarray):
pass
random.seed(1)
orig = np.arange(3).view(N)
perm = random.permutation(orig)
assert_array_equal(perm, np.array([0, 2, 1]))
assert_array_equal(orig, np.arange(3).view(N))
class M:
a = np.arange(5)
def __array__(self):
return self.a
random.seed(1)
m = M()
perm = random.permutation(m)
assert_array_equal(perm, np.array([2, 1, 4, 0, 3]))
assert_array_equal(m.__array__(), np.arange(5))
def test_warns_byteorder(self):
# GH 13159
other_byteord_dt = '<i4' if sys.byteorder == 'big' else '>i4'
with pytest.deprecated_call(match='non-native byteorder is not'):
random.randint(0, 200, size=10, dtype=other_byteord_dt)
def test_named_argument_initialization(self):
# GH 13669
rs1 = np.random.RandomState(123456789)
rs2 = np.random.RandomState(seed=123456789)
assert rs1.randint(0, 100) == rs2.randint(0, 100)
def test_choice_retun_dtype(self):
# GH 9867
c = np.random.choice(10, p=[.1]*10, size=2)
assert c.dtype == np.dtype(int)
c = np.random.choice(10, p=[.1]*10, replace=False, size=2)
assert c.dtype == np.dtype(int)
c = np.random.choice(10, size=2)
assert c.dtype == np.dtype(int)
c = np.random.choice(10, replace=False, size=2)
assert c.dtype == np.dtype(int)
@pytest.mark.skipif(np.iinfo('l').max < 2**32,
reason='Cannot test with 32-bit C long')
def test_randint_117(self):
# GH 14189
random.seed(0)
expected = np.array([2357136044, 2546248239, 3071714933, 3626093760,
2588848963, 3684848379, 2340255427, 3638918503,
1819583497, 2678185683], dtype='int64')
actual = random.randint(2**32, size=10)
assert_array_equal(actual, expected)
def test_p_zero_stream(self):
# Regression test for gh-14522. Ensure that future versions
# generate the same variates as version 1.16.
np.random.seed(12345)
assert_array_equal(random.binomial(1, [0, 0.25, 0.5, 0.75, 1]),
[0, 0, 0, 1, 1])
def test_n_zero_stream(self):
# Regression test for gh-14522. Ensure that future versions
# generate the same variates as version 1.16.
np.random.seed(8675309)
expected = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[3, 4, 2, 3, 3, 1, 5, 3, 1, 3]])
assert_array_equal(random.binomial([[0], [10]], 0.25, size=(2, 10)),
expected)

View file

@ -0,0 +1,149 @@
import sys
from numpy.testing import (
assert_, assert_array_equal, assert_raises,
)
from numpy import random
import numpy as np
class TestRegression:
def test_VonMises_range(self):
# Make sure generated random variables are in [-pi, pi].
# Regression test for ticket #986.
for mu in np.linspace(-7., 7., 5):
r = random.mtrand.vonmises(mu, 1, 50)
assert_(np.all(r > -np.pi) and np.all(r <= np.pi))
def test_hypergeometric_range(self):
# Test for ticket #921
assert_(np.all(np.random.hypergeometric(3, 18, 11, size=10) < 4))
assert_(np.all(np.random.hypergeometric(18, 3, 11, size=10) > 0))
# Test for ticket #5623
args = [
(2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems
]
is_64bits = sys.maxsize > 2**32
if is_64bits and sys.platform != 'win32':
# Check for 64-bit systems
args.append((2**40 - 2, 2**40 - 2, 2**40 - 2))
for arg in args:
assert_(np.random.hypergeometric(*arg) > 0)
def test_logseries_convergence(self):
# Test for ticket #923
N = 1000
np.random.seed(0)
rvsn = np.random.logseries(0.8, size=N)
# these two frequency counts should be close to theoretical
# numbers with this large sample
# theoretical large N result is 0.49706795
freq = np.sum(rvsn == 1) / float(N)
msg = "Frequency was %f, should be > 0.45" % freq
assert_(freq > 0.45, msg)
# theoretical large N result is 0.19882718
freq = np.sum(rvsn == 2) / float(N)
msg = "Frequency was %f, should be < 0.23" % freq
assert_(freq < 0.23, msg)
def test_shuffle_mixed_dimension(self):
# Test for trac ticket #2074
for t in [[1, 2, 3, None],
[(1, 1), (2, 2), (3, 3), None],
[1, (2, 2), (3, 3), None],
[(1, 1), 2, 3, None]]:
np.random.seed(12345)
shuffled = list(t)
random.shuffle(shuffled)
expected = np.array([t[0], t[3], t[1], t[2]], dtype=object)
assert_array_equal(np.array(shuffled, dtype=object), expected)
def test_call_within_randomstate(self):
# Check that custom RandomState does not call into global state
m = np.random.RandomState()
res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3])
for i in range(3):
np.random.seed(i)
m.seed(4321)
# If m.state is not honored, the result will change
assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res)
def test_multivariate_normal_size_types(self):
# Test for multivariate_normal issue with 'size' argument.
# Check that the multivariate_normal size argument can be a
# numpy integer.
np.random.multivariate_normal([0], [[0]], size=1)
np.random.multivariate_normal([0], [[0]], size=np.int_(1))
np.random.multivariate_normal([0], [[0]], size=np.int64(1))
def test_beta_small_parameters(self):
# Test that beta with small a and b parameters does not produce
# NaNs due to roundoff errors causing 0 / 0, gh-5851
np.random.seed(1234567890)
x = np.random.beta(0.0001, 0.0001, size=100)
assert_(not np.any(np.isnan(x)), 'Nans in np.random.beta')
def test_choice_sum_of_probs_tolerance(self):
# The sum of probs should be 1.0 with some tolerance.
# For low precision dtypes the tolerance was too tight.
# See numpy github issue 6123.
np.random.seed(1234)
a = [1, 2, 3]
counts = [4, 4, 2]
for dt in np.float16, np.float32, np.float64:
probs = np.array(counts, dtype=dt) / sum(counts)
c = np.random.choice(a, p=probs)
assert_(c in a)
assert_raises(ValueError, np.random.choice, a, p=probs*0.9)
def test_shuffle_of_array_of_different_length_strings(self):
# Test that permuting an array of different length strings
# will not cause a segfault on garbage collection
# Tests gh-7710
np.random.seed(1234)
a = np.array(['a', 'a' * 1000])
for _ in range(100):
np.random.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_shuffle_of_array_of_objects(self):
# Test that permuting an array of objects will not cause
# a segfault on garbage collection.
# See gh-7719
np.random.seed(1234)
a = np.array([np.arange(1), np.arange(4)], dtype=object)
for _ in range(1000):
np.random.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_permutation_subclass(self):
class N(np.ndarray):
pass
np.random.seed(1)
orig = np.arange(3).view(N)
perm = np.random.permutation(orig)
assert_array_equal(perm, np.array([0, 2, 1]))
assert_array_equal(orig, np.arange(3).view(N))
class M:
a = np.arange(5)
def __array__(self):
return self.a
np.random.seed(1)
m = M()
perm = np.random.permutation(m)
assert_array_equal(perm, np.array([2, 1, 4, 0, 3]))
assert_array_equal(m.__array__(), np.arange(5))

View file

@ -0,0 +1,80 @@
import numpy as np
from numpy.testing import assert_array_equal, assert_array_compare
from numpy.random import SeedSequence
def test_reference_data():
""" Check that SeedSequence generates data the same as the C++ reference.
https://gist.github.com/imneme/540829265469e673d045
"""
inputs = [
[3735928559, 195939070, 229505742, 305419896],
[3668361503, 4165561550, 1661411377, 3634257570],
[164546577, 4166754639, 1765190214, 1303880213],
[446610472, 3941463886, 522937693, 1882353782],
[1864922766, 1719732118, 3882010307, 1776744564],
[4141682960, 3310988675, 553637289, 902896340],
[1134851934, 2352871630, 3699409824, 2648159817],
[1240956131, 3107113773, 1283198141, 1924506131],
[2669565031, 579818610, 3042504477, 2774880435],
[2766103236, 2883057919, 4029656435, 862374500],
]
outputs = [
[3914649087, 576849849, 3593928901, 2229911004],
[2240804226, 3691353228, 1365957195, 2654016646],
[3562296087, 3191708229, 1147942216, 3726991905],
[1403443605, 3591372999, 1291086759, 441919183],
[1086200464, 2191331643, 560336446, 3658716651],
[3249937430, 2346751812, 847844327, 2996632307],
[2584285912, 4034195531, 3523502488, 169742686],
[959045797, 3875435559, 1886309314, 359682705],
[3978441347, 432478529, 3223635119, 138903045],
[296367413, 4262059219, 13109864, 3283683422],
]
outputs64 = [
[2477551240072187391, 9577394838764454085],
[15854241394484835714, 11398914698975566411],
[13708282465491374871, 16007308345579681096],
[15424829579845884309, 1898028439751125927],
[9411697742461147792, 15714068361935982142],
[10079222287618677782, 12870437757549876199],
[17326737873898640088, 729039288628699544],
[16644868984619524261, 1544825456798124994],
[1857481142255628931, 596584038813451439],
[18305404959516669237, 14103312907920476776],
]
for seed, expected, expected64 in zip(inputs, outputs, outputs64):
expected = np.array(expected, dtype=np.uint32)
ss = SeedSequence(seed)
state = ss.generate_state(len(expected))
assert_array_equal(state, expected)
state64 = ss.generate_state(len(expected64), dtype=np.uint64)
assert_array_equal(state64, expected64)
def test_zero_padding():
""" Ensure that the implicit zero-padding does not cause problems.
"""
# Ensure that large integers are inserted in little-endian fashion to avoid
# trailing 0s.
ss0 = SeedSequence(42)
ss1 = SeedSequence(42 << 32)
assert_array_compare(
np.not_equal,
ss0.generate_state(4),
ss1.generate_state(4))
# Ensure backwards compatibility with the original 0.17 release for small
# integers and no spawn key.
expected42 = np.array([3444837047, 2669555309, 2046530742, 3581440988],
dtype=np.uint32)
assert_array_equal(SeedSequence(42).generate_state(4), expected42)
# Regression test for gh-16539 to ensure that the implicit 0s don't
# conflict with spawn keys.
assert_array_compare(
np.not_equal,
SeedSequence(42, spawn_key=(0,)).generate_state(4),
expected42)

View file

@ -0,0 +1,807 @@
import pickle
from functools import partial
import numpy as np
import pytest
from numpy.testing import assert_equal, assert_, assert_array_equal
from numpy.random import (Generator, MT19937, PCG64, Philox, SFC64)
@pytest.fixture(scope='module',
params=(np.bool_, np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64))
def dtype(request):
return request.param
def params_0(f):
val = f()
assert_(np.isscalar(val))
val = f(10)
assert_(val.shape == (10,))
val = f((10, 10))
assert_(val.shape == (10, 10))
val = f((10, 10, 10))
assert_(val.shape == (10, 10, 10))
val = f(size=(5, 5))
assert_(val.shape == (5, 5))
def params_1(f, bounded=False):
a = 5.0
b = np.arange(2.0, 12.0)
c = np.arange(2.0, 102.0).reshape((10, 10))
d = np.arange(2.0, 1002.0).reshape((10, 10, 10))
e = np.array([2.0, 3.0])
g = np.arange(2.0, 12.0).reshape((1, 10, 1))
if bounded:
a = 0.5
b = b / (1.5 * b.max())
c = c / (1.5 * c.max())
d = d / (1.5 * d.max())
e = e / (1.5 * e.max())
g = g / (1.5 * g.max())
# Scalar
f(a)
# Scalar - size
f(a, size=(10, 10))
# 1d
f(b)
# 2d
f(c)
# 3d
f(d)
# 1d size
f(b, size=10)
# 2d - size - broadcast
f(e, size=(10, 2))
# 3d - size
f(g, size=(10, 10, 10))
def comp_state(state1, state2):
identical = True
if isinstance(state1, dict):
for key in state1:
identical &= comp_state(state1[key], state2[key])
elif type(state1) != type(state2):
identical &= type(state1) == type(state2)
else:
if (isinstance(state1, (list, tuple, np.ndarray)) and isinstance(
state2, (list, tuple, np.ndarray))):
for s1, s2 in zip(state1, state2):
identical &= comp_state(s1, s2)
else:
identical &= state1 == state2
return identical
def warmup(rg, n=None):
if n is None:
n = 11 + np.random.randint(0, 20)
rg.standard_normal(n)
rg.standard_normal(n)
rg.standard_normal(n, dtype=np.float32)
rg.standard_normal(n, dtype=np.float32)
rg.integers(0, 2 ** 24, n, dtype=np.uint64)
rg.integers(0, 2 ** 48, n, dtype=np.uint64)
rg.standard_gamma(11.0, n)
rg.standard_gamma(11.0, n, dtype=np.float32)
rg.random(n, dtype=np.float64)
rg.random(n, dtype=np.float32)
class RNG:
@classmethod
def setup_class(cls):
# Overridden in test classes. Place holder to silence IDE noise
cls.bit_generator = PCG64
cls.advance = None
cls.seed = [12345]
cls.rg = Generator(cls.bit_generator(*cls.seed))
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 64
cls._extra_setup()
@classmethod
def _extra_setup(cls):
cls.vec_1d = np.arange(2.0, 102.0)
cls.vec_2d = np.arange(2.0, 102.0)[None, :]
cls.mat = np.arange(2.0, 102.0, 0.01).reshape((100, 100))
cls.seed_error = TypeError
def _reset_state(self):
self.rg.bit_generator.state = self.initial_state
def test_init(self):
rg = Generator(self.bit_generator())
state = rg.bit_generator.state
rg.standard_normal(1)
rg.standard_normal(1)
rg.bit_generator.state = state
new_state = rg.bit_generator.state
assert_(comp_state(state, new_state))
def test_advance(self):
state = self.rg.bit_generator.state
if hasattr(self.rg.bit_generator, 'advance'):
self.rg.bit_generator.advance(self.advance)
assert_(not comp_state(state, self.rg.bit_generator.state))
else:
bitgen_name = self.rg.bit_generator.__class__.__name__
pytest.skip('Advance is not supported by {0}'.format(bitgen_name))
def test_jump(self):
state = self.rg.bit_generator.state
if hasattr(self.rg.bit_generator, 'jumped'):
bit_gen2 = self.rg.bit_generator.jumped()
jumped_state = bit_gen2.state
assert_(not comp_state(state, jumped_state))
self.rg.random(2 * 3 * 5 * 7 * 11 * 13 * 17)
self.rg.bit_generator.state = state
bit_gen3 = self.rg.bit_generator.jumped()
rejumped_state = bit_gen3.state
assert_(comp_state(jumped_state, rejumped_state))
else:
bitgen_name = self.rg.bit_generator.__class__.__name__
if bitgen_name not in ('SFC64',):
raise AttributeError('no "jumped" in %s' % bitgen_name)
pytest.skip('Jump is not supported by {0}'.format(bitgen_name))
def test_uniform(self):
r = self.rg.uniform(-1.0, 0.0, size=10)
assert_(len(r) == 10)
assert_((r > -1).all())
assert_((r <= 0).all())
def test_uniform_array(self):
r = self.rg.uniform(np.array([-1.0] * 10), 0.0, size=10)
assert_(len(r) == 10)
assert_((r > -1).all())
assert_((r <= 0).all())
r = self.rg.uniform(np.array([-1.0] * 10),
np.array([0.0] * 10), size=10)
assert_(len(r) == 10)
assert_((r > -1).all())
assert_((r <= 0).all())
r = self.rg.uniform(-1.0, np.array([0.0] * 10), size=10)
assert_(len(r) == 10)
assert_((r > -1).all())
assert_((r <= 0).all())
def test_random(self):
assert_(len(self.rg.random(10)) == 10)
params_0(self.rg.random)
def test_standard_normal_zig(self):
assert_(len(self.rg.standard_normal(10)) == 10)
def test_standard_normal(self):
assert_(len(self.rg.standard_normal(10)) == 10)
params_0(self.rg.standard_normal)
def test_standard_gamma(self):
assert_(len(self.rg.standard_gamma(10, 10)) == 10)
assert_(len(self.rg.standard_gamma(np.array([10] * 10), 10)) == 10)
params_1(self.rg.standard_gamma)
def test_standard_exponential(self):
assert_(len(self.rg.standard_exponential(10)) == 10)
params_0(self.rg.standard_exponential)
def test_standard_exponential_float(self):
randoms = self.rg.standard_exponential(10, dtype='float32')
assert_(len(randoms) == 10)
assert randoms.dtype == np.float32
params_0(partial(self.rg.standard_exponential, dtype='float32'))
def test_standard_exponential_float_log(self):
randoms = self.rg.standard_exponential(10, dtype='float32',
method='inv')
assert_(len(randoms) == 10)
assert randoms.dtype == np.float32
params_0(partial(self.rg.standard_exponential, dtype='float32',
method='inv'))
def test_standard_cauchy(self):
assert_(len(self.rg.standard_cauchy(10)) == 10)
params_0(self.rg.standard_cauchy)
def test_standard_t(self):
assert_(len(self.rg.standard_t(10, 10)) == 10)
params_1(self.rg.standard_t)
def test_binomial(self):
assert_(self.rg.binomial(10, .5) >= 0)
assert_(self.rg.binomial(1000, .5) >= 0)
def test_reset_state(self):
state = self.rg.bit_generator.state
int_1 = self.rg.integers(2**31)
self.rg.bit_generator.state = state
int_2 = self.rg.integers(2**31)
assert_(int_1 == int_2)
def test_entropy_init(self):
rg = Generator(self.bit_generator())
rg2 = Generator(self.bit_generator())
assert_(not comp_state(rg.bit_generator.state,
rg2.bit_generator.state))
def test_seed(self):
rg = Generator(self.bit_generator(*self.seed))
rg2 = Generator(self.bit_generator(*self.seed))
rg.random()
rg2.random()
assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
def test_reset_state_gauss(self):
rg = Generator(self.bit_generator(*self.seed))
rg.standard_normal()
state = rg.bit_generator.state
n1 = rg.standard_normal(size=10)
rg2 = Generator(self.bit_generator())
rg2.bit_generator.state = state
n2 = rg2.standard_normal(size=10)
assert_array_equal(n1, n2)
def test_reset_state_uint32(self):
rg = Generator(self.bit_generator(*self.seed))
rg.integers(0, 2 ** 24, 120, dtype=np.uint32)
state = rg.bit_generator.state
n1 = rg.integers(0, 2 ** 24, 10, dtype=np.uint32)
rg2 = Generator(self.bit_generator())
rg2.bit_generator.state = state
n2 = rg2.integers(0, 2 ** 24, 10, dtype=np.uint32)
assert_array_equal(n1, n2)
def test_reset_state_float(self):
rg = Generator(self.bit_generator(*self.seed))
rg.random(dtype='float32')
state = rg.bit_generator.state
n1 = rg.random(size=10, dtype='float32')
rg2 = Generator(self.bit_generator())
rg2.bit_generator.state = state
n2 = rg2.random(size=10, dtype='float32')
assert_((n1 == n2).all())
def test_shuffle(self):
original = np.arange(200, 0, -1)
permuted = self.rg.permutation(original)
assert_((original != permuted).any())
def test_permutation(self):
original = np.arange(200, 0, -1)
permuted = self.rg.permutation(original)
assert_((original != permuted).any())
def test_beta(self):
vals = self.rg.beta(2.0, 2.0, 10)
assert_(len(vals) == 10)
vals = self.rg.beta(np.array([2.0] * 10), 2.0)
assert_(len(vals) == 10)
vals = self.rg.beta(2.0, np.array([2.0] * 10))
assert_(len(vals) == 10)
vals = self.rg.beta(np.array([2.0] * 10), np.array([2.0] * 10))
assert_(len(vals) == 10)
vals = self.rg.beta(np.array([2.0] * 10), np.array([[2.0]] * 10))
assert_(vals.shape == (10, 10))
def test_bytes(self):
vals = self.rg.bytes(10)
assert_(len(vals) == 10)
def test_chisquare(self):
vals = self.rg.chisquare(2.0, 10)
assert_(len(vals) == 10)
params_1(self.rg.chisquare)
def test_exponential(self):
vals = self.rg.exponential(2.0, 10)
assert_(len(vals) == 10)
params_1(self.rg.exponential)
def test_f(self):
vals = self.rg.f(3, 1000, 10)
assert_(len(vals) == 10)
def test_gamma(self):
vals = self.rg.gamma(3, 2, 10)
assert_(len(vals) == 10)
def test_geometric(self):
vals = self.rg.geometric(0.5, 10)
assert_(len(vals) == 10)
params_1(self.rg.exponential, bounded=True)
def test_gumbel(self):
vals = self.rg.gumbel(2.0, 2.0, 10)
assert_(len(vals) == 10)
def test_laplace(self):
vals = self.rg.laplace(2.0, 2.0, 10)
assert_(len(vals) == 10)
def test_logitic(self):
vals = self.rg.logistic(2.0, 2.0, 10)
assert_(len(vals) == 10)
def test_logseries(self):
vals = self.rg.logseries(0.5, 10)
assert_(len(vals) == 10)
def test_negative_binomial(self):
vals = self.rg.negative_binomial(10, 0.2, 10)
assert_(len(vals) == 10)
def test_noncentral_chisquare(self):
vals = self.rg.noncentral_chisquare(10, 2, 10)
assert_(len(vals) == 10)
def test_noncentral_f(self):
vals = self.rg.noncentral_f(3, 1000, 2, 10)
assert_(len(vals) == 10)
vals = self.rg.noncentral_f(np.array([3] * 10), 1000, 2)
assert_(len(vals) == 10)
vals = self.rg.noncentral_f(3, np.array([1000] * 10), 2)
assert_(len(vals) == 10)
vals = self.rg.noncentral_f(3, 1000, np.array([2] * 10))
assert_(len(vals) == 10)
def test_normal(self):
vals = self.rg.normal(10, 0.2, 10)
assert_(len(vals) == 10)
def test_pareto(self):
vals = self.rg.pareto(3.0, 10)
assert_(len(vals) == 10)
def test_poisson(self):
vals = self.rg.poisson(10, 10)
assert_(len(vals) == 10)
vals = self.rg.poisson(np.array([10] * 10))
assert_(len(vals) == 10)
params_1(self.rg.poisson)
def test_power(self):
vals = self.rg.power(0.2, 10)
assert_(len(vals) == 10)
def test_integers(self):
vals = self.rg.integers(10, 20, 10)
assert_(len(vals) == 10)
def test_rayleigh(self):
vals = self.rg.rayleigh(0.2, 10)
assert_(len(vals) == 10)
params_1(self.rg.rayleigh, bounded=True)
def test_vonmises(self):
vals = self.rg.vonmises(10, 0.2, 10)
assert_(len(vals) == 10)
def test_wald(self):
vals = self.rg.wald(1.0, 1.0, 10)
assert_(len(vals) == 10)
def test_weibull(self):
vals = self.rg.weibull(1.0, 10)
assert_(len(vals) == 10)
def test_zipf(self):
vals = self.rg.zipf(10, 10)
assert_(len(vals) == 10)
vals = self.rg.zipf(self.vec_1d)
assert_(len(vals) == 100)
vals = self.rg.zipf(self.vec_2d)
assert_(vals.shape == (1, 100))
vals = self.rg.zipf(self.mat)
assert_(vals.shape == (100, 100))
def test_hypergeometric(self):
vals = self.rg.hypergeometric(25, 25, 20)
assert_(np.isscalar(vals))
vals = self.rg.hypergeometric(np.array([25] * 10), 25, 20)
assert_(vals.shape == (10,))
def test_triangular(self):
vals = self.rg.triangular(-5, 0, 5)
assert_(np.isscalar(vals))
vals = self.rg.triangular(-5, np.array([0] * 10), 5)
assert_(vals.shape == (10,))
def test_multivariate_normal(self):
mean = [0, 0]
cov = [[1, 0], [0, 100]] # diagonal covariance
x = self.rg.multivariate_normal(mean, cov, 5000)
assert_(x.shape == (5000, 2))
x_zig = self.rg.multivariate_normal(mean, cov, 5000)
assert_(x.shape == (5000, 2))
x_inv = self.rg.multivariate_normal(mean, cov, 5000)
assert_(x.shape == (5000, 2))
assert_((x_zig != x_inv).any())
def test_multinomial(self):
vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3])
assert_(vals.shape == (2,))
vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3], size=10)
assert_(vals.shape == (10, 2))
def test_dirichlet(self):
s = self.rg.dirichlet((10, 5, 3), 20)
assert_(s.shape == (20, 3))
def test_pickle(self):
pick = pickle.dumps(self.rg)
unpick = pickle.loads(pick)
assert_((type(self.rg) == type(unpick)))
assert_(comp_state(self.rg.bit_generator.state,
unpick.bit_generator.state))
pick = pickle.dumps(self.rg)
unpick = pickle.loads(pick)
assert_((type(self.rg) == type(unpick)))
assert_(comp_state(self.rg.bit_generator.state,
unpick.bit_generator.state))
def test_seed_array(self):
if self.seed_vector_bits is None:
bitgen_name = self.bit_generator.__name__
pytest.skip('Vector seeding is not supported by '
'{0}'.format(bitgen_name))
if self.seed_vector_bits == 32:
dtype = np.uint32
else:
dtype = np.uint64
seed = np.array([1], dtype=dtype)
bg = self.bit_generator(seed)
state1 = bg.state
bg = self.bit_generator(1)
state2 = bg.state
assert_(comp_state(state1, state2))
seed = np.arange(4, dtype=dtype)
bg = self.bit_generator(seed)
state1 = bg.state
bg = self.bit_generator(seed[0])
state2 = bg.state
assert_(not comp_state(state1, state2))
seed = np.arange(1500, dtype=dtype)
bg = self.bit_generator(seed)
state1 = bg.state
bg = self.bit_generator(seed[0])
state2 = bg.state
assert_(not comp_state(state1, state2))
seed = 2 ** np.mod(np.arange(1500, dtype=dtype),
self.seed_vector_bits - 1) + 1
bg = self.bit_generator(seed)
state1 = bg.state
bg = self.bit_generator(seed[0])
state2 = bg.state
assert_(not comp_state(state1, state2))
def test_uniform_float(self):
rg = Generator(self.bit_generator(12345))
warmup(rg)
state = rg.bit_generator.state
r1 = rg.random(11, dtype=np.float32)
rg2 = Generator(self.bit_generator())
warmup(rg2)
rg2.bit_generator.state = state
r2 = rg2.random(11, dtype=np.float32)
assert_array_equal(r1, r2)
assert_equal(r1.dtype, np.float32)
assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
def test_gamma_floats(self):
rg = Generator(self.bit_generator())
warmup(rg)
state = rg.bit_generator.state
r1 = rg.standard_gamma(4.0, 11, dtype=np.float32)
rg2 = Generator(self.bit_generator())
warmup(rg2)
rg2.bit_generator.state = state
r2 = rg2.standard_gamma(4.0, 11, dtype=np.float32)
assert_array_equal(r1, r2)
assert_equal(r1.dtype, np.float32)
assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
def test_normal_floats(self):
rg = Generator(self.bit_generator())
warmup(rg)
state = rg.bit_generator.state
r1 = rg.standard_normal(11, dtype=np.float32)
rg2 = Generator(self.bit_generator())
warmup(rg2)
rg2.bit_generator.state = state
r2 = rg2.standard_normal(11, dtype=np.float32)
assert_array_equal(r1, r2)
assert_equal(r1.dtype, np.float32)
assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
def test_normal_zig_floats(self):
rg = Generator(self.bit_generator())
warmup(rg)
state = rg.bit_generator.state
r1 = rg.standard_normal(11, dtype=np.float32)
rg2 = Generator(self.bit_generator())
warmup(rg2)
rg2.bit_generator.state = state
r2 = rg2.standard_normal(11, dtype=np.float32)
assert_array_equal(r1, r2)
assert_equal(r1.dtype, np.float32)
assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
def test_output_fill(self):
rg = self.rg
state = rg.bit_generator.state
size = (31, 7, 97)
existing = np.empty(size)
rg.bit_generator.state = state
rg.standard_normal(out=existing)
rg.bit_generator.state = state
direct = rg.standard_normal(size=size)
assert_equal(direct, existing)
sized = np.empty(size)
rg.bit_generator.state = state
rg.standard_normal(out=sized, size=sized.shape)
existing = np.empty(size, dtype=np.float32)
rg.bit_generator.state = state
rg.standard_normal(out=existing, dtype=np.float32)
rg.bit_generator.state = state
direct = rg.standard_normal(size=size, dtype=np.float32)
assert_equal(direct, existing)
def test_output_filling_uniform(self):
rg = self.rg
state = rg.bit_generator.state
size = (31, 7, 97)
existing = np.empty(size)
rg.bit_generator.state = state
rg.random(out=existing)
rg.bit_generator.state = state
direct = rg.random(size=size)
assert_equal(direct, existing)
existing = np.empty(size, dtype=np.float32)
rg.bit_generator.state = state
rg.random(out=existing, dtype=np.float32)
rg.bit_generator.state = state
direct = rg.random(size=size, dtype=np.float32)
assert_equal(direct, existing)
def test_output_filling_exponential(self):
rg = self.rg
state = rg.bit_generator.state
size = (31, 7, 97)
existing = np.empty(size)
rg.bit_generator.state = state
rg.standard_exponential(out=existing)
rg.bit_generator.state = state
direct = rg.standard_exponential(size=size)
assert_equal(direct, existing)
existing = np.empty(size, dtype=np.float32)
rg.bit_generator.state = state
rg.standard_exponential(out=existing, dtype=np.float32)
rg.bit_generator.state = state
direct = rg.standard_exponential(size=size, dtype=np.float32)
assert_equal(direct, existing)
def test_output_filling_gamma(self):
rg = self.rg
state = rg.bit_generator.state
size = (31, 7, 97)
existing = np.zeros(size)
rg.bit_generator.state = state
rg.standard_gamma(1.0, out=existing)
rg.bit_generator.state = state
direct = rg.standard_gamma(1.0, size=size)
assert_equal(direct, existing)
existing = np.zeros(size, dtype=np.float32)
rg.bit_generator.state = state
rg.standard_gamma(1.0, out=existing, dtype=np.float32)
rg.bit_generator.state = state
direct = rg.standard_gamma(1.0, size=size, dtype=np.float32)
assert_equal(direct, existing)
def test_output_filling_gamma_broadcast(self):
rg = self.rg
state = rg.bit_generator.state
size = (31, 7, 97)
mu = np.arange(97.0) + 1.0
existing = np.zeros(size)
rg.bit_generator.state = state
rg.standard_gamma(mu, out=existing)
rg.bit_generator.state = state
direct = rg.standard_gamma(mu, size=size)
assert_equal(direct, existing)
existing = np.zeros(size, dtype=np.float32)
rg.bit_generator.state = state
rg.standard_gamma(mu, out=existing, dtype=np.float32)
rg.bit_generator.state = state
direct = rg.standard_gamma(mu, size=size, dtype=np.float32)
assert_equal(direct, existing)
def test_output_fill_error(self):
rg = self.rg
size = (31, 7, 97)
existing = np.empty(size)
with pytest.raises(TypeError):
rg.standard_normal(out=existing, dtype=np.float32)
with pytest.raises(ValueError):
rg.standard_normal(out=existing[::3])
existing = np.empty(size, dtype=np.float32)
with pytest.raises(TypeError):
rg.standard_normal(out=existing, dtype=np.float64)
existing = np.zeros(size, dtype=np.float32)
with pytest.raises(TypeError):
rg.standard_gamma(1.0, out=existing, dtype=np.float64)
with pytest.raises(ValueError):
rg.standard_gamma(1.0, out=existing[::3], dtype=np.float32)
existing = np.zeros(size, dtype=np.float64)
with pytest.raises(TypeError):
rg.standard_gamma(1.0, out=existing, dtype=np.float32)
with pytest.raises(ValueError):
rg.standard_gamma(1.0, out=existing[::3])
def test_integers_broadcast(self, dtype):
if dtype == np.bool_:
upper = 2
lower = 0
else:
info = np.iinfo(dtype)
upper = int(info.max) + 1
lower = info.min
self._reset_state()
a = self.rg.integers(lower, [upper] * 10, dtype=dtype)
self._reset_state()
b = self.rg.integers([lower] * 10, upper, dtype=dtype)
assert_equal(a, b)
self._reset_state()
c = self.rg.integers(lower, upper, size=10, dtype=dtype)
assert_equal(a, c)
self._reset_state()
d = self.rg.integers(np.array(
[lower] * 10), np.array([upper], dtype=object), size=10,
dtype=dtype)
assert_equal(a, d)
self._reset_state()
e = self.rg.integers(
np.array([lower] * 10), np.array([upper] * 10), size=10,
dtype=dtype)
assert_equal(a, e)
self._reset_state()
a = self.rg.integers(0, upper, size=10, dtype=dtype)
self._reset_state()
b = self.rg.integers([upper] * 10, dtype=dtype)
assert_equal(a, b)
def test_integers_numpy(self, dtype):
high = np.array([1])
low = np.array([0])
out = self.rg.integers(low, high, dtype=dtype)
assert out.shape == (1,)
out = self.rg.integers(low[0], high, dtype=dtype)
assert out.shape == (1,)
out = self.rg.integers(low, high[0], dtype=dtype)
assert out.shape == (1,)
def test_integers_broadcast_errors(self, dtype):
if dtype == np.bool_:
upper = 2
lower = 0
else:
info = np.iinfo(dtype)
upper = int(info.max) + 1
lower = info.min
with pytest.raises(ValueError):
self.rg.integers(lower, [upper + 1] * 10, dtype=dtype)
with pytest.raises(ValueError):
self.rg.integers(lower - 1, [upper] * 10, dtype=dtype)
with pytest.raises(ValueError):
self.rg.integers([lower - 1], [upper] * 10, dtype=dtype)
with pytest.raises(ValueError):
self.rg.integers([0], [0], dtype=dtype)
class TestMT19937(RNG):
@classmethod
def setup_class(cls):
cls.bit_generator = MT19937
cls.advance = None
cls.seed = [2 ** 21 + 2 ** 16 + 2 ** 5 + 1]
cls.rg = Generator(cls.bit_generator(*cls.seed))
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 32
cls._extra_setup()
cls.seed_error = ValueError
def test_numpy_state(self):
nprg = np.random.RandomState()
nprg.standard_normal(99)
state = nprg.get_state()
self.rg.bit_generator.state = state
state2 = self.rg.bit_generator.state
assert_((state[1] == state2['state']['key']).all())
assert_((state[2] == state2['state']['pos']))
class TestPhilox(RNG):
@classmethod
def setup_class(cls):
cls.bit_generator = Philox
cls.advance = 2**63 + 2**31 + 2**15 + 1
cls.seed = [12345]
cls.rg = Generator(cls.bit_generator(*cls.seed))
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 64
cls._extra_setup()
class TestSFC64(RNG):
@classmethod
def setup_class(cls):
cls.bit_generator = SFC64
cls.advance = None
cls.seed = [12345]
cls.rg = Generator(cls.bit_generator(*cls.seed))
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 192
cls._extra_setup()
class TestPCG64(RNG):
@classmethod
def setup_class(cls):
cls.bit_generator = PCG64
cls.advance = 2**63 + 2**31 + 2**15 + 1
cls.seed = [12345]
cls.rg = Generator(cls.bit_generator(*cls.seed))
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 64
cls._extra_setup()
class TestDefaultRNG(RNG):
@classmethod
def setup_class(cls):
# This will duplicate some tests that directly instantiate a fresh
# Generator(), but that's okay.
cls.bit_generator = PCG64
cls.advance = 2**63 + 2**31 + 2**15 + 1
cls.seed = [12345]
cls.rg = np.random.default_rng(*cls.seed)
cls.initial_state = cls.rg.bit_generator.state
cls.seed_vector_bits = 64
cls._extra_setup()
def test_default_is_pcg64(self):
# In order to change the default BitGenerator, we'll go through
# a deprecation cycle to move to a different function.
assert_(isinstance(self.rg.bit_generator, PCG64))
def test_seed(self):
np.random.default_rng()
np.random.default_rng(None)
np.random.default_rng(12345)
np.random.default_rng(0)
np.random.default_rng(43660444402423911716352051725018508569)
np.random.default_rng([43660444402423911716352051725018508569,
279705150948142787361475340226491943209])
with pytest.raises(ValueError):
np.random.default_rng(-1)
with pytest.raises(ValueError):
np.random.default_rng([12345, -1])