Created starter files for the project.
This commit is contained in:
commit
73f0c0db42
1992 changed files with 769897 additions and 0 deletions
0
venv/Lib/site-packages/numpy/lib/tests/__init__.py
Normal file
0
venv/Lib/site-packages/numpy/lib/tests/__init__.py
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
venv/Lib/site-packages/numpy/lib/tests/data/py2-objarr.npy
Normal file
BIN
venv/Lib/site-packages/numpy/lib/tests/data/py2-objarr.npy
Normal file
Binary file not shown.
BIN
venv/Lib/site-packages/numpy/lib/tests/data/py2-objarr.npz
Normal file
BIN
venv/Lib/site-packages/numpy/lib/tests/data/py2-objarr.npz
Normal file
Binary file not shown.
BIN
venv/Lib/site-packages/numpy/lib/tests/data/py3-objarr.npy
Normal file
BIN
venv/Lib/site-packages/numpy/lib/tests/data/py3-objarr.npy
Normal file
Binary file not shown.
BIN
venv/Lib/site-packages/numpy/lib/tests/data/py3-objarr.npz
Normal file
BIN
venv/Lib/site-packages/numpy/lib/tests/data/py3-objarr.npz
Normal file
Binary file not shown.
BIN
venv/Lib/site-packages/numpy/lib/tests/data/python3.npy
Normal file
BIN
venv/Lib/site-packages/numpy/lib/tests/data/python3.npy
Normal file
Binary file not shown.
BIN
venv/Lib/site-packages/numpy/lib/tests/data/win64python2.npy
Normal file
BIN
venv/Lib/site-packages/numpy/lib/tests/data/win64python2.npy
Normal file
Binary file not shown.
350
venv/Lib/site-packages/numpy/lib/tests/test__datasource.py
Normal file
350
venv/Lib/site-packages/numpy/lib/tests/test__datasource.py
Normal file
|
|
@ -0,0 +1,350 @@
|
|||
import os
|
||||
import pytest
|
||||
from tempfile import mkdtemp, mkstemp, NamedTemporaryFile
|
||||
from shutil import rmtree
|
||||
|
||||
import numpy.lib._datasource as datasource
|
||||
from numpy.testing import assert_, assert_equal, assert_raises
|
||||
|
||||
import urllib.request as urllib_request
|
||||
from urllib.parse import urlparse
|
||||
from urllib.error import URLError
|
||||
|
||||
|
||||
def urlopen_stub(url, data=None):
|
||||
'''Stub to replace urlopen for testing.'''
|
||||
if url == valid_httpurl():
|
||||
tmpfile = NamedTemporaryFile(prefix='urltmp_')
|
||||
return tmpfile
|
||||
else:
|
||||
raise URLError('Name or service not known')
|
||||
|
||||
# setup and teardown
|
||||
old_urlopen = None
|
||||
|
||||
|
||||
def setup_module():
|
||||
global old_urlopen
|
||||
|
||||
old_urlopen = urllib_request.urlopen
|
||||
urllib_request.urlopen = urlopen_stub
|
||||
|
||||
|
||||
def teardown_module():
|
||||
urllib_request.urlopen = old_urlopen
|
||||
|
||||
# A valid website for more robust testing
|
||||
http_path = 'http://www.google.com/'
|
||||
http_file = 'index.html'
|
||||
|
||||
http_fakepath = 'http://fake.abc.web/site/'
|
||||
http_fakefile = 'fake.txt'
|
||||
|
||||
malicious_files = ['/etc/shadow', '../../shadow',
|
||||
'..\\system.dat', 'c:\\windows\\system.dat']
|
||||
|
||||
magic_line = b'three is the magic number'
|
||||
|
||||
|
||||
# Utility functions used by many tests
|
||||
def valid_textfile(filedir):
|
||||
# Generate and return a valid temporary file.
|
||||
fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir, text=True)
|
||||
os.close(fd)
|
||||
return path
|
||||
|
||||
|
||||
def invalid_textfile(filedir):
|
||||
# Generate and return an invalid filename.
|
||||
fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir)
|
||||
os.close(fd)
|
||||
os.remove(path)
|
||||
return path
|
||||
|
||||
|
||||
def valid_httpurl():
|
||||
return http_path+http_file
|
||||
|
||||
|
||||
def invalid_httpurl():
|
||||
return http_fakepath+http_fakefile
|
||||
|
||||
|
||||
def valid_baseurl():
|
||||
return http_path
|
||||
|
||||
|
||||
def invalid_baseurl():
|
||||
return http_fakepath
|
||||
|
||||
|
||||
def valid_httpfile():
|
||||
return http_file
|
||||
|
||||
|
||||
def invalid_httpfile():
|
||||
return http_fakefile
|
||||
|
||||
|
||||
class TestDataSourceOpen:
|
||||
def setup(self):
|
||||
self.tmpdir = mkdtemp()
|
||||
self.ds = datasource.DataSource(self.tmpdir)
|
||||
|
||||
def teardown(self):
|
||||
rmtree(self.tmpdir)
|
||||
del self.ds
|
||||
|
||||
def test_ValidHTTP(self):
|
||||
fh = self.ds.open(valid_httpurl())
|
||||
assert_(fh)
|
||||
fh.close()
|
||||
|
||||
def test_InvalidHTTP(self):
|
||||
url = invalid_httpurl()
|
||||
assert_raises(IOError, self.ds.open, url)
|
||||
try:
|
||||
self.ds.open(url)
|
||||
except IOError as e:
|
||||
# Regression test for bug fixed in r4342.
|
||||
assert_(e.errno is None)
|
||||
|
||||
def test_InvalidHTTPCacheURLError(self):
|
||||
assert_raises(URLError, self.ds._cache, invalid_httpurl())
|
||||
|
||||
def test_ValidFile(self):
|
||||
local_file = valid_textfile(self.tmpdir)
|
||||
fh = self.ds.open(local_file)
|
||||
assert_(fh)
|
||||
fh.close()
|
||||
|
||||
def test_InvalidFile(self):
|
||||
invalid_file = invalid_textfile(self.tmpdir)
|
||||
assert_raises(IOError, self.ds.open, invalid_file)
|
||||
|
||||
def test_ValidGzipFile(self):
|
||||
try:
|
||||
import gzip
|
||||
except ImportError:
|
||||
# We don't have the gzip capabilities to test.
|
||||
pytest.skip()
|
||||
# Test datasource's internal file_opener for Gzip files.
|
||||
filepath = os.path.join(self.tmpdir, 'foobar.txt.gz')
|
||||
fp = gzip.open(filepath, 'w')
|
||||
fp.write(magic_line)
|
||||
fp.close()
|
||||
fp = self.ds.open(filepath)
|
||||
result = fp.readline()
|
||||
fp.close()
|
||||
assert_equal(magic_line, result)
|
||||
|
||||
def test_ValidBz2File(self):
|
||||
try:
|
||||
import bz2
|
||||
except ImportError:
|
||||
# We don't have the bz2 capabilities to test.
|
||||
pytest.skip()
|
||||
# Test datasource's internal file_opener for BZip2 files.
|
||||
filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2')
|
||||
fp = bz2.BZ2File(filepath, 'w')
|
||||
fp.write(magic_line)
|
||||
fp.close()
|
||||
fp = self.ds.open(filepath)
|
||||
result = fp.readline()
|
||||
fp.close()
|
||||
assert_equal(magic_line, result)
|
||||
|
||||
|
||||
class TestDataSourceExists:
|
||||
def setup(self):
|
||||
self.tmpdir = mkdtemp()
|
||||
self.ds = datasource.DataSource(self.tmpdir)
|
||||
|
||||
def teardown(self):
|
||||
rmtree(self.tmpdir)
|
||||
del self.ds
|
||||
|
||||
def test_ValidHTTP(self):
|
||||
assert_(self.ds.exists(valid_httpurl()))
|
||||
|
||||
def test_InvalidHTTP(self):
|
||||
assert_equal(self.ds.exists(invalid_httpurl()), False)
|
||||
|
||||
def test_ValidFile(self):
|
||||
# Test valid file in destpath
|
||||
tmpfile = valid_textfile(self.tmpdir)
|
||||
assert_(self.ds.exists(tmpfile))
|
||||
# Test valid local file not in destpath
|
||||
localdir = mkdtemp()
|
||||
tmpfile = valid_textfile(localdir)
|
||||
assert_(self.ds.exists(tmpfile))
|
||||
rmtree(localdir)
|
||||
|
||||
def test_InvalidFile(self):
|
||||
tmpfile = invalid_textfile(self.tmpdir)
|
||||
assert_equal(self.ds.exists(tmpfile), False)
|
||||
|
||||
|
||||
class TestDataSourceAbspath:
|
||||
def setup(self):
|
||||
self.tmpdir = os.path.abspath(mkdtemp())
|
||||
self.ds = datasource.DataSource(self.tmpdir)
|
||||
|
||||
def teardown(self):
|
||||
rmtree(self.tmpdir)
|
||||
del self.ds
|
||||
|
||||
def test_ValidHTTP(self):
|
||||
scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl())
|
||||
local_path = os.path.join(self.tmpdir, netloc,
|
||||
upath.strip(os.sep).strip('/'))
|
||||
assert_equal(local_path, self.ds.abspath(valid_httpurl()))
|
||||
|
||||
def test_ValidFile(self):
|
||||
tmpfile = valid_textfile(self.tmpdir)
|
||||
tmpfilename = os.path.split(tmpfile)[-1]
|
||||
# Test with filename only
|
||||
assert_equal(tmpfile, self.ds.abspath(tmpfilename))
|
||||
# Test filename with complete path
|
||||
assert_equal(tmpfile, self.ds.abspath(tmpfile))
|
||||
|
||||
def test_InvalidHTTP(self):
|
||||
scheme, netloc, upath, pms, qry, frg = urlparse(invalid_httpurl())
|
||||
invalidhttp = os.path.join(self.tmpdir, netloc,
|
||||
upath.strip(os.sep).strip('/'))
|
||||
assert_(invalidhttp != self.ds.abspath(valid_httpurl()))
|
||||
|
||||
def test_InvalidFile(self):
|
||||
invalidfile = valid_textfile(self.tmpdir)
|
||||
tmpfile = valid_textfile(self.tmpdir)
|
||||
tmpfilename = os.path.split(tmpfile)[-1]
|
||||
# Test with filename only
|
||||
assert_(invalidfile != self.ds.abspath(tmpfilename))
|
||||
# Test filename with complete path
|
||||
assert_(invalidfile != self.ds.abspath(tmpfile))
|
||||
|
||||
def test_sandboxing(self):
|
||||
tmpfile = valid_textfile(self.tmpdir)
|
||||
tmpfilename = os.path.split(tmpfile)[-1]
|
||||
|
||||
tmp_path = lambda x: os.path.abspath(self.ds.abspath(x))
|
||||
|
||||
assert_(tmp_path(valid_httpurl()).startswith(self.tmpdir))
|
||||
assert_(tmp_path(invalid_httpurl()).startswith(self.tmpdir))
|
||||
assert_(tmp_path(tmpfile).startswith(self.tmpdir))
|
||||
assert_(tmp_path(tmpfilename).startswith(self.tmpdir))
|
||||
for fn in malicious_files:
|
||||
assert_(tmp_path(http_path+fn).startswith(self.tmpdir))
|
||||
assert_(tmp_path(fn).startswith(self.tmpdir))
|
||||
|
||||
def test_windows_os_sep(self):
|
||||
orig_os_sep = os.sep
|
||||
try:
|
||||
os.sep = '\\'
|
||||
self.test_ValidHTTP()
|
||||
self.test_ValidFile()
|
||||
self.test_InvalidHTTP()
|
||||
self.test_InvalidFile()
|
||||
self.test_sandboxing()
|
||||
finally:
|
||||
os.sep = orig_os_sep
|
||||
|
||||
|
||||
class TestRepositoryAbspath:
|
||||
def setup(self):
|
||||
self.tmpdir = os.path.abspath(mkdtemp())
|
||||
self.repos = datasource.Repository(valid_baseurl(), self.tmpdir)
|
||||
|
||||
def teardown(self):
|
||||
rmtree(self.tmpdir)
|
||||
del self.repos
|
||||
|
||||
def test_ValidHTTP(self):
|
||||
scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl())
|
||||
local_path = os.path.join(self.repos._destpath, netloc,
|
||||
upath.strip(os.sep).strip('/'))
|
||||
filepath = self.repos.abspath(valid_httpfile())
|
||||
assert_equal(local_path, filepath)
|
||||
|
||||
def test_sandboxing(self):
|
||||
tmp_path = lambda x: os.path.abspath(self.repos.abspath(x))
|
||||
assert_(tmp_path(valid_httpfile()).startswith(self.tmpdir))
|
||||
for fn in malicious_files:
|
||||
assert_(tmp_path(http_path+fn).startswith(self.tmpdir))
|
||||
assert_(tmp_path(fn).startswith(self.tmpdir))
|
||||
|
||||
def test_windows_os_sep(self):
|
||||
orig_os_sep = os.sep
|
||||
try:
|
||||
os.sep = '\\'
|
||||
self.test_ValidHTTP()
|
||||
self.test_sandboxing()
|
||||
finally:
|
||||
os.sep = orig_os_sep
|
||||
|
||||
|
||||
class TestRepositoryExists:
|
||||
def setup(self):
|
||||
self.tmpdir = mkdtemp()
|
||||
self.repos = datasource.Repository(valid_baseurl(), self.tmpdir)
|
||||
|
||||
def teardown(self):
|
||||
rmtree(self.tmpdir)
|
||||
del self.repos
|
||||
|
||||
def test_ValidFile(self):
|
||||
# Create local temp file
|
||||
tmpfile = valid_textfile(self.tmpdir)
|
||||
assert_(self.repos.exists(tmpfile))
|
||||
|
||||
def test_InvalidFile(self):
|
||||
tmpfile = invalid_textfile(self.tmpdir)
|
||||
assert_equal(self.repos.exists(tmpfile), False)
|
||||
|
||||
def test_RemoveHTTPFile(self):
|
||||
assert_(self.repos.exists(valid_httpurl()))
|
||||
|
||||
def test_CachedHTTPFile(self):
|
||||
localfile = valid_httpurl()
|
||||
# Create a locally cached temp file with an URL based
|
||||
# directory structure. This is similar to what Repository.open
|
||||
# would do.
|
||||
scheme, netloc, upath, pms, qry, frg = urlparse(localfile)
|
||||
local_path = os.path.join(self.repos._destpath, netloc)
|
||||
os.mkdir(local_path, 0o0700)
|
||||
tmpfile = valid_textfile(local_path)
|
||||
assert_(self.repos.exists(tmpfile))
|
||||
|
||||
|
||||
class TestOpenFunc:
|
||||
def setup(self):
|
||||
self.tmpdir = mkdtemp()
|
||||
|
||||
def teardown(self):
|
||||
rmtree(self.tmpdir)
|
||||
|
||||
def test_DataSourceOpen(self):
|
||||
local_file = valid_textfile(self.tmpdir)
|
||||
# Test case where destpath is passed in
|
||||
fp = datasource.open(local_file, destpath=self.tmpdir)
|
||||
assert_(fp)
|
||||
fp.close()
|
||||
# Test case where default destpath is used
|
||||
fp = datasource.open(local_file)
|
||||
assert_(fp)
|
||||
fp.close()
|
||||
|
||||
def test_del_attr_handling():
|
||||
# DataSource __del__ can be called
|
||||
# even if __init__ fails when the
|
||||
# Exception object is caught by the
|
||||
# caller as happens in refguide_check
|
||||
# is_deprecated() function
|
||||
|
||||
ds = datasource.DataSource()
|
||||
# simulate failed __init__ by removing key attribute
|
||||
# produced within __init__ and expected by __del__
|
||||
del ds._istmpdest
|
||||
# should not raise an AttributeError if __del__
|
||||
# gracefully handles failed __init__:
|
||||
ds.__del__()
|
||||
353
venv/Lib/site-packages/numpy/lib/tests/test__iotools.py
Normal file
353
venv/Lib/site-packages/numpy/lib/tests/test__iotools.py
Normal file
|
|
@ -0,0 +1,353 @@
|
|||
import time
|
||||
from datetime import date
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import (
|
||||
assert_, assert_equal, assert_allclose, assert_raises,
|
||||
)
|
||||
from numpy.lib._iotools import (
|
||||
LineSplitter, NameValidator, StringConverter,
|
||||
has_nested_fields, easy_dtype, flatten_dtype
|
||||
)
|
||||
|
||||
|
||||
class TestLineSplitter:
|
||||
"Tests the LineSplitter class."
|
||||
|
||||
def test_no_delimiter(self):
|
||||
"Test LineSplitter w/o delimiter"
|
||||
strg = " 1 2 3 4 5 # test"
|
||||
test = LineSplitter()(strg)
|
||||
assert_equal(test, ['1', '2', '3', '4', '5'])
|
||||
test = LineSplitter('')(strg)
|
||||
assert_equal(test, ['1', '2', '3', '4', '5'])
|
||||
|
||||
def test_space_delimiter(self):
|
||||
"Test space delimiter"
|
||||
strg = " 1 2 3 4 5 # test"
|
||||
test = LineSplitter(' ')(strg)
|
||||
assert_equal(test, ['1', '2', '3', '4', '', '5'])
|
||||
test = LineSplitter(' ')(strg)
|
||||
assert_equal(test, ['1 2 3 4', '5'])
|
||||
|
||||
def test_tab_delimiter(self):
|
||||
"Test tab delimiter"
|
||||
strg = " 1\t 2\t 3\t 4\t 5 6"
|
||||
test = LineSplitter('\t')(strg)
|
||||
assert_equal(test, ['1', '2', '3', '4', '5 6'])
|
||||
strg = " 1 2\t 3 4\t 5 6"
|
||||
test = LineSplitter('\t')(strg)
|
||||
assert_equal(test, ['1 2', '3 4', '5 6'])
|
||||
|
||||
def test_other_delimiter(self):
|
||||
"Test LineSplitter on delimiter"
|
||||
strg = "1,2,3,4,,5"
|
||||
test = LineSplitter(',')(strg)
|
||||
assert_equal(test, ['1', '2', '3', '4', '', '5'])
|
||||
#
|
||||
strg = " 1,2,3,4,,5 # test"
|
||||
test = LineSplitter(',')(strg)
|
||||
assert_equal(test, ['1', '2', '3', '4', '', '5'])
|
||||
|
||||
# gh-11028 bytes comment/delimiters should get encoded
|
||||
strg = b" 1,2,3,4,,5 % test"
|
||||
test = LineSplitter(delimiter=b',', comments=b'%')(strg)
|
||||
assert_equal(test, ['1', '2', '3', '4', '', '5'])
|
||||
|
||||
def test_constant_fixed_width(self):
|
||||
"Test LineSplitter w/ fixed-width fields"
|
||||
strg = " 1 2 3 4 5 # test"
|
||||
test = LineSplitter(3)(strg)
|
||||
assert_equal(test, ['1', '2', '3', '4', '', '5', ''])
|
||||
#
|
||||
strg = " 1 3 4 5 6# test"
|
||||
test = LineSplitter(20)(strg)
|
||||
assert_equal(test, ['1 3 4 5 6'])
|
||||
#
|
||||
strg = " 1 3 4 5 6# test"
|
||||
test = LineSplitter(30)(strg)
|
||||
assert_equal(test, ['1 3 4 5 6'])
|
||||
|
||||
def test_variable_fixed_width(self):
|
||||
strg = " 1 3 4 5 6# test"
|
||||
test = LineSplitter((3, 6, 6, 3))(strg)
|
||||
assert_equal(test, ['1', '3', '4 5', '6'])
|
||||
#
|
||||
strg = " 1 3 4 5 6# test"
|
||||
test = LineSplitter((6, 6, 9))(strg)
|
||||
assert_equal(test, ['1', '3 4', '5 6'])
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestNameValidator:
|
||||
|
||||
def test_case_sensitivity(self):
|
||||
"Test case sensitivity"
|
||||
names = ['A', 'a', 'b', 'c']
|
||||
test = NameValidator().validate(names)
|
||||
assert_equal(test, ['A', 'a', 'b', 'c'])
|
||||
test = NameValidator(case_sensitive=False).validate(names)
|
||||
assert_equal(test, ['A', 'A_1', 'B', 'C'])
|
||||
test = NameValidator(case_sensitive='upper').validate(names)
|
||||
assert_equal(test, ['A', 'A_1', 'B', 'C'])
|
||||
test = NameValidator(case_sensitive='lower').validate(names)
|
||||
assert_equal(test, ['a', 'a_1', 'b', 'c'])
|
||||
|
||||
# check exceptions
|
||||
assert_raises(ValueError, NameValidator, case_sensitive='foobar')
|
||||
|
||||
def test_excludelist(self):
|
||||
"Test excludelist"
|
||||
names = ['dates', 'data', 'Other Data', 'mask']
|
||||
validator = NameValidator(excludelist=['dates', 'data', 'mask'])
|
||||
test = validator.validate(names)
|
||||
assert_equal(test, ['dates_', 'data_', 'Other_Data', 'mask_'])
|
||||
|
||||
def test_missing_names(self):
|
||||
"Test validate missing names"
|
||||
namelist = ('a', 'b', 'c')
|
||||
validator = NameValidator()
|
||||
assert_equal(validator(namelist), ['a', 'b', 'c'])
|
||||
namelist = ('', 'b', 'c')
|
||||
assert_equal(validator(namelist), ['f0', 'b', 'c'])
|
||||
namelist = ('a', 'b', '')
|
||||
assert_equal(validator(namelist), ['a', 'b', 'f0'])
|
||||
namelist = ('', 'f0', '')
|
||||
assert_equal(validator(namelist), ['f1', 'f0', 'f2'])
|
||||
|
||||
def test_validate_nb_names(self):
|
||||
"Test validate nb names"
|
||||
namelist = ('a', 'b', 'c')
|
||||
validator = NameValidator()
|
||||
assert_equal(validator(namelist, nbfields=1), ('a',))
|
||||
assert_equal(validator(namelist, nbfields=5, defaultfmt="g%i"),
|
||||
['a', 'b', 'c', 'g0', 'g1'])
|
||||
|
||||
def test_validate_wo_names(self):
|
||||
"Test validate no names"
|
||||
namelist = None
|
||||
validator = NameValidator()
|
||||
assert_(validator(namelist) is None)
|
||||
assert_equal(validator(namelist, nbfields=3), ['f0', 'f1', 'f2'])
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _bytes_to_date(s):
|
||||
return date(*time.strptime(s, "%Y-%m-%d")[:3])
|
||||
|
||||
|
||||
class TestStringConverter:
|
||||
"Test StringConverter"
|
||||
|
||||
def test_creation(self):
|
||||
"Test creation of a StringConverter"
|
||||
converter = StringConverter(int, -99999)
|
||||
assert_equal(converter._status, 1)
|
||||
assert_equal(converter.default, -99999)
|
||||
|
||||
def test_upgrade(self):
|
||||
"Tests the upgrade method."
|
||||
|
||||
converter = StringConverter()
|
||||
assert_equal(converter._status, 0)
|
||||
|
||||
# test int
|
||||
assert_equal(converter.upgrade('0'), 0)
|
||||
assert_equal(converter._status, 1)
|
||||
|
||||
# On systems where long defaults to 32-bit, the statuses will be
|
||||
# offset by one, so we check for this here.
|
||||
import numpy.core.numeric as nx
|
||||
status_offset = int(nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize)
|
||||
|
||||
# test int > 2**32
|
||||
assert_equal(converter.upgrade('17179869184'), 17179869184)
|
||||
assert_equal(converter._status, 1 + status_offset)
|
||||
|
||||
# test float
|
||||
assert_allclose(converter.upgrade('0.'), 0.0)
|
||||
assert_equal(converter._status, 2 + status_offset)
|
||||
|
||||
# test complex
|
||||
assert_equal(converter.upgrade('0j'), complex('0j'))
|
||||
assert_equal(converter._status, 3 + status_offset)
|
||||
|
||||
# test str
|
||||
# note that the longdouble type has been skipped, so the
|
||||
# _status increases by 2. Everything should succeed with
|
||||
# unicode conversion (8).
|
||||
for s in ['a', b'a']:
|
||||
res = converter.upgrade(s)
|
||||
assert_(type(res) is str)
|
||||
assert_equal(res, 'a')
|
||||
assert_equal(converter._status, 8 + status_offset)
|
||||
|
||||
def test_missing(self):
|
||||
"Tests the use of missing values."
|
||||
converter = StringConverter(missing_values=('missing',
|
||||
'missed'))
|
||||
converter.upgrade('0')
|
||||
assert_equal(converter('0'), 0)
|
||||
assert_equal(converter(''), converter.default)
|
||||
assert_equal(converter('missing'), converter.default)
|
||||
assert_equal(converter('missed'), converter.default)
|
||||
try:
|
||||
converter('miss')
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def test_upgrademapper(self):
|
||||
"Tests updatemapper"
|
||||
dateparser = _bytes_to_date
|
||||
_original_mapper = StringConverter._mapper[:]
|
||||
try:
|
||||
StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1))
|
||||
convert = StringConverter(dateparser, date(2000, 1, 1))
|
||||
test = convert('2001-01-01')
|
||||
assert_equal(test, date(2001, 1, 1))
|
||||
test = convert('2009-01-01')
|
||||
assert_equal(test, date(2009, 1, 1))
|
||||
test = convert('')
|
||||
assert_equal(test, date(2000, 1, 1))
|
||||
finally:
|
||||
StringConverter._mapper = _original_mapper
|
||||
|
||||
def test_string_to_object(self):
|
||||
"Make sure that string-to-object functions are properly recognized"
|
||||
old_mapper = StringConverter._mapper[:] # copy of list
|
||||
conv = StringConverter(_bytes_to_date)
|
||||
assert_equal(conv._mapper, old_mapper)
|
||||
assert_(hasattr(conv, 'default'))
|
||||
|
||||
def test_keep_default(self):
|
||||
"Make sure we don't lose an explicit default"
|
||||
converter = StringConverter(None, missing_values='',
|
||||
default=-999)
|
||||
converter.upgrade('3.14159265')
|
||||
assert_equal(converter.default, -999)
|
||||
assert_equal(converter.type, np.dtype(float))
|
||||
#
|
||||
converter = StringConverter(
|
||||
None, missing_values='', default=0)
|
||||
converter.upgrade('3.14159265')
|
||||
assert_equal(converter.default, 0)
|
||||
assert_equal(converter.type, np.dtype(float))
|
||||
|
||||
def test_keep_default_zero(self):
|
||||
"Check that we don't lose a default of 0"
|
||||
converter = StringConverter(int, default=0,
|
||||
missing_values="N/A")
|
||||
assert_equal(converter.default, 0)
|
||||
|
||||
def test_keep_missing_values(self):
|
||||
"Check that we're not losing missing values"
|
||||
converter = StringConverter(int, default=0,
|
||||
missing_values="N/A")
|
||||
assert_equal(
|
||||
converter.missing_values, {'', 'N/A'})
|
||||
|
||||
def test_int64_dtype(self):
|
||||
"Check that int64 integer types can be specified"
|
||||
converter = StringConverter(np.int64, default=0)
|
||||
val = "-9223372036854775807"
|
||||
assert_(converter(val) == -9223372036854775807)
|
||||
val = "9223372036854775807"
|
||||
assert_(converter(val) == 9223372036854775807)
|
||||
|
||||
def test_uint64_dtype(self):
|
||||
"Check that uint64 integer types can be specified"
|
||||
converter = StringConverter(np.uint64, default=0)
|
||||
val = "9223372043271415339"
|
||||
assert_(converter(val) == 9223372043271415339)
|
||||
|
||||
|
||||
class TestMiscFunctions:
|
||||
|
||||
def test_has_nested_dtype(self):
|
||||
"Test has_nested_dtype"
|
||||
ndtype = np.dtype(float)
|
||||
assert_equal(has_nested_fields(ndtype), False)
|
||||
ndtype = np.dtype([('A', '|S3'), ('B', float)])
|
||||
assert_equal(has_nested_fields(ndtype), False)
|
||||
ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])])
|
||||
assert_equal(has_nested_fields(ndtype), True)
|
||||
|
||||
def test_easy_dtype(self):
|
||||
"Test ndtype on dtypes"
|
||||
# Simple case
|
||||
ndtype = float
|
||||
assert_equal(easy_dtype(ndtype), np.dtype(float))
|
||||
# As string w/o names
|
||||
ndtype = "i4, f8"
|
||||
assert_equal(easy_dtype(ndtype),
|
||||
np.dtype([('f0', "i4"), ('f1', "f8")]))
|
||||
# As string w/o names but different default format
|
||||
assert_equal(easy_dtype(ndtype, defaultfmt="field_%03i"),
|
||||
np.dtype([('field_000', "i4"), ('field_001', "f8")]))
|
||||
# As string w/ names
|
||||
ndtype = "i4, f8"
|
||||
assert_equal(easy_dtype(ndtype, names="a, b"),
|
||||
np.dtype([('a', "i4"), ('b', "f8")]))
|
||||
# As string w/ names (too many)
|
||||
ndtype = "i4, f8"
|
||||
assert_equal(easy_dtype(ndtype, names="a, b, c"),
|
||||
np.dtype([('a', "i4"), ('b', "f8")]))
|
||||
# As string w/ names (not enough)
|
||||
ndtype = "i4, f8"
|
||||
assert_equal(easy_dtype(ndtype, names=", b"),
|
||||
np.dtype([('f0', "i4"), ('b', "f8")]))
|
||||
# ... (with different default format)
|
||||
assert_equal(easy_dtype(ndtype, names="a", defaultfmt="f%02i"),
|
||||
np.dtype([('a', "i4"), ('f00', "f8")]))
|
||||
# As list of tuples w/o names
|
||||
ndtype = [('A', int), ('B', float)]
|
||||
assert_equal(easy_dtype(ndtype), np.dtype([('A', int), ('B', float)]))
|
||||
# As list of tuples w/ names
|
||||
assert_equal(easy_dtype(ndtype, names="a,b"),
|
||||
np.dtype([('a', int), ('b', float)]))
|
||||
# As list of tuples w/ not enough names
|
||||
assert_equal(easy_dtype(ndtype, names="a"),
|
||||
np.dtype([('a', int), ('f0', float)]))
|
||||
# As list of tuples w/ too many names
|
||||
assert_equal(easy_dtype(ndtype, names="a,b,c"),
|
||||
np.dtype([('a', int), ('b', float)]))
|
||||
# As list of types w/o names
|
||||
ndtype = (int, float, float)
|
||||
assert_equal(easy_dtype(ndtype),
|
||||
np.dtype([('f0', int), ('f1', float), ('f2', float)]))
|
||||
# As list of types w names
|
||||
ndtype = (int, float, float)
|
||||
assert_equal(easy_dtype(ndtype, names="a, b, c"),
|
||||
np.dtype([('a', int), ('b', float), ('c', float)]))
|
||||
# As simple dtype w/ names
|
||||
ndtype = np.dtype(float)
|
||||
assert_equal(easy_dtype(ndtype, names="a, b, c"),
|
||||
np.dtype([(_, float) for _ in ('a', 'b', 'c')]))
|
||||
# As simple dtype w/o names (but multiple fields)
|
||||
ndtype = np.dtype(float)
|
||||
assert_equal(
|
||||
easy_dtype(ndtype, names=['', '', ''], defaultfmt="f%02i"),
|
||||
np.dtype([(_, float) for _ in ('f00', 'f01', 'f02')]))
|
||||
|
||||
def test_flatten_dtype(self):
|
||||
"Testing flatten_dtype"
|
||||
# Standard dtype
|
||||
dt = np.dtype([("a", "f8"), ("b", "f8")])
|
||||
dt_flat = flatten_dtype(dt)
|
||||
assert_equal(dt_flat, [float, float])
|
||||
# Recursive dtype
|
||||
dt = np.dtype([("a", [("aa", '|S1'), ("ab", '|S2')]), ("b", int)])
|
||||
dt_flat = flatten_dtype(dt)
|
||||
assert_equal(dt_flat, [np.dtype('|S1'), np.dtype('|S2'), int])
|
||||
# dtype with shaped fields
|
||||
dt = np.dtype([("a", (float, 2)), ("b", (int, 3))])
|
||||
dt_flat = flatten_dtype(dt)
|
||||
assert_equal(dt_flat, [float, int])
|
||||
dt_flat = flatten_dtype(dt, True)
|
||||
assert_equal(dt_flat, [float] * 2 + [int] * 3)
|
||||
# dtype w/ titles
|
||||
dt = np.dtype([(("a", "A"), "f8"), (("b", "B"), "f8")])
|
||||
dt_flat = flatten_dtype(dt)
|
||||
assert_equal(dt_flat, [float, float])
|
||||
64
venv/Lib/site-packages/numpy/lib/tests/test__version.py
Normal file
64
venv/Lib/site-packages/numpy/lib/tests/test__version.py
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
"""Tests for the NumpyVersion class.
|
||||
|
||||
"""
|
||||
from numpy.testing import assert_, assert_raises
|
||||
from numpy.lib import NumpyVersion
|
||||
|
||||
|
||||
def test_main_versions():
|
||||
assert_(NumpyVersion('1.8.0') == '1.8.0')
|
||||
for ver in ['1.9.0', '2.0.0', '1.8.1']:
|
||||
assert_(NumpyVersion('1.8.0') < ver)
|
||||
|
||||
for ver in ['1.7.0', '1.7.1', '0.9.9']:
|
||||
assert_(NumpyVersion('1.8.0') > ver)
|
||||
|
||||
|
||||
def test_version_1_point_10():
|
||||
# regression test for gh-2998.
|
||||
assert_(NumpyVersion('1.9.0') < '1.10.0')
|
||||
assert_(NumpyVersion('1.11.0') < '1.11.1')
|
||||
assert_(NumpyVersion('1.11.0') == '1.11.0')
|
||||
assert_(NumpyVersion('1.99.11') < '1.99.12')
|
||||
|
||||
|
||||
def test_alpha_beta_rc():
|
||||
assert_(NumpyVersion('1.8.0rc1') == '1.8.0rc1')
|
||||
for ver in ['1.8.0', '1.8.0rc2']:
|
||||
assert_(NumpyVersion('1.8.0rc1') < ver)
|
||||
|
||||
for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']:
|
||||
assert_(NumpyVersion('1.8.0rc1') > ver)
|
||||
|
||||
assert_(NumpyVersion('1.8.0b1') > '1.8.0a2')
|
||||
|
||||
|
||||
def test_dev_version():
|
||||
assert_(NumpyVersion('1.9.0.dev-Unknown') < '1.9.0')
|
||||
for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev-ffffffff']:
|
||||
assert_(NumpyVersion('1.9.0.dev-f16acvda') < ver)
|
||||
|
||||
assert_(NumpyVersion('1.9.0.dev-f16acvda') == '1.9.0.dev-11111111')
|
||||
|
||||
|
||||
def test_dev_a_b_rc_mixed():
|
||||
assert_(NumpyVersion('1.9.0a2.dev-f16acvda') == '1.9.0a2.dev-11111111')
|
||||
assert_(NumpyVersion('1.9.0a2.dev-6acvda54') < '1.9.0a2')
|
||||
|
||||
|
||||
def test_dev0_version():
|
||||
assert_(NumpyVersion('1.9.0.dev0+Unknown') < '1.9.0')
|
||||
for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']:
|
||||
assert_(NumpyVersion('1.9.0.dev0+f16acvda') < ver)
|
||||
|
||||
assert_(NumpyVersion('1.9.0.dev0+f16acvda') == '1.9.0.dev0+11111111')
|
||||
|
||||
|
||||
def test_dev0_a_b_rc_mixed():
|
||||
assert_(NumpyVersion('1.9.0a2.dev0+f16acvda') == '1.9.0a2.dev0+11111111')
|
||||
assert_(NumpyVersion('1.9.0a2.dev0+6acvda54') < '1.9.0a2')
|
||||
|
||||
|
||||
def test_raises():
|
||||
for ver in ['1.9', '1,9.0', '1.7.x']:
|
||||
assert_raises(ValueError, NumpyVersion, ver)
|
||||
1364
venv/Lib/site-packages/numpy/lib/tests/test_arraypad.py
Normal file
1364
venv/Lib/site-packages/numpy/lib/tests/test_arraypad.py
Normal file
File diff suppressed because it is too large
Load diff
673
venv/Lib/site-packages/numpy/lib/tests/test_arraysetops.py
Normal file
673
venv/Lib/site-packages/numpy/lib/tests/test_arraysetops.py
Normal file
|
|
@ -0,0 +1,673 @@
|
|||
"""Test functions for 1D array set operations.
|
||||
|
||||
"""
|
||||
import numpy as np
|
||||
|
||||
from numpy.testing import (assert_array_equal, assert_equal,
|
||||
assert_raises, assert_raises_regex)
|
||||
from numpy.lib.arraysetops import (
|
||||
ediff1d, intersect1d, setxor1d, union1d, setdiff1d, unique, in1d, isin
|
||||
)
|
||||
import pytest
|
||||
|
||||
|
||||
class TestSetOps:
|
||||
|
||||
def test_intersect1d(self):
|
||||
# unique inputs
|
||||
a = np.array([5, 7, 1, 2])
|
||||
b = np.array([2, 4, 3, 1, 5])
|
||||
|
||||
ec = np.array([1, 2, 5])
|
||||
c = intersect1d(a, b, assume_unique=True)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
# non-unique inputs
|
||||
a = np.array([5, 5, 7, 1, 2])
|
||||
b = np.array([2, 1, 4, 3, 3, 1, 5])
|
||||
|
||||
ed = np.array([1, 2, 5])
|
||||
c = intersect1d(a, b)
|
||||
assert_array_equal(c, ed)
|
||||
assert_array_equal([], intersect1d([], []))
|
||||
|
||||
def test_intersect1d_array_like(self):
|
||||
# See gh-11772
|
||||
class Test:
|
||||
def __array__(self):
|
||||
return np.arange(3)
|
||||
|
||||
a = Test()
|
||||
res = intersect1d(a, a)
|
||||
assert_array_equal(res, a)
|
||||
res = intersect1d([1, 2, 3], [1, 2, 3])
|
||||
assert_array_equal(res, [1, 2, 3])
|
||||
|
||||
def test_intersect1d_indices(self):
|
||||
# unique inputs
|
||||
a = np.array([1, 2, 3, 4])
|
||||
b = np.array([2, 1, 4, 6])
|
||||
c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True)
|
||||
ee = np.array([1, 2, 4])
|
||||
assert_array_equal(c, ee)
|
||||
assert_array_equal(a[i1], ee)
|
||||
assert_array_equal(b[i2], ee)
|
||||
|
||||
# non-unique inputs
|
||||
a = np.array([1, 2, 2, 3, 4, 3, 2])
|
||||
b = np.array([1, 8, 4, 2, 2, 3, 2, 3])
|
||||
c, i1, i2 = intersect1d(a, b, return_indices=True)
|
||||
ef = np.array([1, 2, 3, 4])
|
||||
assert_array_equal(c, ef)
|
||||
assert_array_equal(a[i1], ef)
|
||||
assert_array_equal(b[i2], ef)
|
||||
|
||||
# non1d, unique inputs
|
||||
a = np.array([[2, 4, 5, 6], [7, 8, 1, 15]])
|
||||
b = np.array([[3, 2, 7, 6], [10, 12, 8, 9]])
|
||||
c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True)
|
||||
ui1 = np.unravel_index(i1, a.shape)
|
||||
ui2 = np.unravel_index(i2, b.shape)
|
||||
ea = np.array([2, 6, 7, 8])
|
||||
assert_array_equal(ea, a[ui1])
|
||||
assert_array_equal(ea, b[ui2])
|
||||
|
||||
# non1d, not assumed to be uniqueinputs
|
||||
a = np.array([[2, 4, 5, 6, 6], [4, 7, 8, 7, 2]])
|
||||
b = np.array([[3, 2, 7, 7], [10, 12, 8, 7]])
|
||||
c, i1, i2 = intersect1d(a, b, return_indices=True)
|
||||
ui1 = np.unravel_index(i1, a.shape)
|
||||
ui2 = np.unravel_index(i2, b.shape)
|
||||
ea = np.array([2, 7, 8])
|
||||
assert_array_equal(ea, a[ui1])
|
||||
assert_array_equal(ea, b[ui2])
|
||||
|
||||
def test_setxor1d(self):
|
||||
a = np.array([5, 7, 1, 2])
|
||||
b = np.array([2, 4, 3, 1, 5])
|
||||
|
||||
ec = np.array([3, 4, 7])
|
||||
c = setxor1d(a, b)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
a = np.array([1, 2, 3])
|
||||
b = np.array([6, 5, 4])
|
||||
|
||||
ec = np.array([1, 2, 3, 4, 5, 6])
|
||||
c = setxor1d(a, b)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
a = np.array([1, 8, 2, 3])
|
||||
b = np.array([6, 5, 4, 8])
|
||||
|
||||
ec = np.array([1, 2, 3, 4, 5, 6])
|
||||
c = setxor1d(a, b)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
assert_array_equal([], setxor1d([], []))
|
||||
|
||||
def test_ediff1d(self):
|
||||
zero_elem = np.array([])
|
||||
one_elem = np.array([1])
|
||||
two_elem = np.array([1, 2])
|
||||
|
||||
assert_array_equal([], ediff1d(zero_elem))
|
||||
assert_array_equal([0], ediff1d(zero_elem, to_begin=0))
|
||||
assert_array_equal([0], ediff1d(zero_elem, to_end=0))
|
||||
assert_array_equal([-1, 0], ediff1d(zero_elem, to_begin=-1, to_end=0))
|
||||
assert_array_equal([], ediff1d(one_elem))
|
||||
assert_array_equal([1], ediff1d(two_elem))
|
||||
assert_array_equal([7, 1, 9], ediff1d(two_elem, to_begin=7, to_end=9))
|
||||
assert_array_equal([5, 6, 1, 7, 8],
|
||||
ediff1d(two_elem, to_begin=[5, 6], to_end=[7, 8]))
|
||||
assert_array_equal([1, 9], ediff1d(two_elem, to_end=9))
|
||||
assert_array_equal([1, 7, 8], ediff1d(two_elem, to_end=[7, 8]))
|
||||
assert_array_equal([7, 1], ediff1d(two_elem, to_begin=7))
|
||||
assert_array_equal([5, 6, 1], ediff1d(two_elem, to_begin=[5, 6]))
|
||||
|
||||
@pytest.mark.parametrize("ary, prepend, append", [
|
||||
# should fail because trying to cast
|
||||
# np.nan standard floating point value
|
||||
# into an integer array:
|
||||
(np.array([1, 2, 3], dtype=np.int64),
|
||||
None,
|
||||
np.nan),
|
||||
# should fail because attempting
|
||||
# to downcast to int type:
|
||||
(np.array([1, 2, 3], dtype=np.int64),
|
||||
np.array([5, 7, 2], dtype=np.float32),
|
||||
None),
|
||||
# should fail because attempting to cast
|
||||
# two special floating point values
|
||||
# to integers (on both sides of ary):
|
||||
(np.array([1., 3., 9.], dtype=np.int8),
|
||||
np.nan,
|
||||
np.nan),
|
||||
])
|
||||
def test_ediff1d_forbidden_type_casts(self, ary, prepend, append):
|
||||
# verify resolution of gh-11490
|
||||
|
||||
# specifically, raise an appropriate
|
||||
# Exception when attempting to append or
|
||||
# prepend with an incompatible type
|
||||
msg = 'must be compatible'
|
||||
with assert_raises_regex(TypeError, msg):
|
||||
ediff1d(ary=ary,
|
||||
to_end=append,
|
||||
to_begin=prepend)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"ary,prepend,append,expected",
|
||||
[
|
||||
(np.array([1, 2, 3], dtype=np.int16),
|
||||
2**16, # will be cast to int16 under same kind rule.
|
||||
2**16 + 4,
|
||||
np.array([0, 1, 1, 4], dtype=np.int16)),
|
||||
(np.array([1, 2, 3], dtype=np.float32),
|
||||
np.array([5], dtype=np.float64),
|
||||
None,
|
||||
np.array([5, 1, 1], dtype=np.float32)),
|
||||
(np.array([1, 2, 3], dtype=np.int32),
|
||||
0,
|
||||
0,
|
||||
np.array([0, 1, 1, 0], dtype=np.int32)),
|
||||
(np.array([1, 2, 3], dtype=np.int64),
|
||||
3,
|
||||
-9,
|
||||
np.array([3, 1, 1, -9], dtype=np.int64)),
|
||||
]
|
||||
)
|
||||
def test_ediff1d_scalar_handling(self,
|
||||
ary,
|
||||
prepend,
|
||||
append,
|
||||
expected):
|
||||
# maintain backwards-compatibility
|
||||
# of scalar prepend / append behavior
|
||||
# in ediff1d following fix for gh-11490
|
||||
actual = np.ediff1d(ary=ary,
|
||||
to_end=append,
|
||||
to_begin=prepend)
|
||||
assert_equal(actual, expected)
|
||||
assert actual.dtype == expected.dtype
|
||||
|
||||
def test_isin(self):
|
||||
# the tests for in1d cover most of isin's behavior
|
||||
# if in1d is removed, would need to change those tests to test
|
||||
# isin instead.
|
||||
def _isin_slow(a, b):
|
||||
b = np.asarray(b).flatten().tolist()
|
||||
return a in b
|
||||
isin_slow = np.vectorize(_isin_slow, otypes=[bool], excluded={1})
|
||||
|
||||
def assert_isin_equal(a, b):
|
||||
x = isin(a, b)
|
||||
y = isin_slow(a, b)
|
||||
assert_array_equal(x, y)
|
||||
|
||||
# multidimensional arrays in both arguments
|
||||
a = np.arange(24).reshape([2, 3, 4])
|
||||
b = np.array([[10, 20, 30], [0, 1, 3], [11, 22, 33]])
|
||||
assert_isin_equal(a, b)
|
||||
|
||||
# array-likes as both arguments
|
||||
c = [(9, 8), (7, 6)]
|
||||
d = (9, 7)
|
||||
assert_isin_equal(c, d)
|
||||
|
||||
# zero-d array:
|
||||
f = np.array(3)
|
||||
assert_isin_equal(f, b)
|
||||
assert_isin_equal(a, f)
|
||||
assert_isin_equal(f, f)
|
||||
|
||||
# scalar:
|
||||
assert_isin_equal(5, b)
|
||||
assert_isin_equal(a, 6)
|
||||
assert_isin_equal(5, 6)
|
||||
|
||||
# empty array-like:
|
||||
x = []
|
||||
assert_isin_equal(x, b)
|
||||
assert_isin_equal(a, x)
|
||||
assert_isin_equal(x, x)
|
||||
|
||||
def test_in1d(self):
|
||||
# we use two different sizes for the b array here to test the
|
||||
# two different paths in in1d().
|
||||
for mult in (1, 10):
|
||||
# One check without np.array to make sure lists are handled correct
|
||||
a = [5, 7, 1, 2]
|
||||
b = [2, 4, 3, 1, 5] * mult
|
||||
ec = np.array([True, False, True, True])
|
||||
c = in1d(a, b, assume_unique=True)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
a[0] = 8
|
||||
ec = np.array([False, False, True, True])
|
||||
c = in1d(a, b, assume_unique=True)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
a[0], a[3] = 4, 8
|
||||
ec = np.array([True, False, True, False])
|
||||
c = in1d(a, b, assume_unique=True)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5])
|
||||
b = [2, 3, 4] * mult
|
||||
ec = [False, True, False, True, True, True, True, True, True,
|
||||
False, True, False, False, False]
|
||||
c = in1d(a, b)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
b = b + [5, 5, 4] * mult
|
||||
ec = [True, True, True, True, True, True, True, True, True, True,
|
||||
True, False, True, True]
|
||||
c = in1d(a, b)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
a = np.array([5, 7, 1, 2])
|
||||
b = np.array([2, 4, 3, 1, 5] * mult)
|
||||
ec = np.array([True, False, True, True])
|
||||
c = in1d(a, b)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
a = np.array([5, 7, 1, 1, 2])
|
||||
b = np.array([2, 4, 3, 3, 1, 5] * mult)
|
||||
ec = np.array([True, False, True, True, True])
|
||||
c = in1d(a, b)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
a = np.array([5, 5])
|
||||
b = np.array([2, 2] * mult)
|
||||
ec = np.array([False, False])
|
||||
c = in1d(a, b)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
a = np.array([5])
|
||||
b = np.array([2])
|
||||
ec = np.array([False])
|
||||
c = in1d(a, b)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
assert_array_equal(in1d([], []), [])
|
||||
|
||||
def test_in1d_char_array(self):
|
||||
a = np.array(['a', 'b', 'c', 'd', 'e', 'c', 'e', 'b'])
|
||||
b = np.array(['a', 'c'])
|
||||
|
||||
ec = np.array([True, False, True, False, False, True, False, False])
|
||||
c = in1d(a, b)
|
||||
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
def test_in1d_invert(self):
|
||||
"Test in1d's invert parameter"
|
||||
# We use two different sizes for the b array here to test the
|
||||
# two different paths in in1d().
|
||||
for mult in (1, 10):
|
||||
a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5])
|
||||
b = [2, 3, 4] * mult
|
||||
assert_array_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True))
|
||||
|
||||
def test_in1d_ravel(self):
|
||||
# Test that in1d ravels its input arrays. This is not documented
|
||||
# behavior however. The test is to ensure consistentency.
|
||||
a = np.arange(6).reshape(2, 3)
|
||||
b = np.arange(3, 9).reshape(3, 2)
|
||||
long_b = np.arange(3, 63).reshape(30, 2)
|
||||
ec = np.array([False, False, False, True, True, True])
|
||||
|
||||
assert_array_equal(in1d(a, b, assume_unique=True), ec)
|
||||
assert_array_equal(in1d(a, b, assume_unique=False), ec)
|
||||
assert_array_equal(in1d(a, long_b, assume_unique=True), ec)
|
||||
assert_array_equal(in1d(a, long_b, assume_unique=False), ec)
|
||||
|
||||
def test_in1d_first_array_is_object(self):
|
||||
ar1 = [None]
|
||||
ar2 = np.array([1]*10)
|
||||
expected = np.array([False])
|
||||
result = np.in1d(ar1, ar2)
|
||||
assert_array_equal(result, expected)
|
||||
|
||||
def test_in1d_second_array_is_object(self):
|
||||
ar1 = 1
|
||||
ar2 = np.array([None]*10)
|
||||
expected = np.array([False])
|
||||
result = np.in1d(ar1, ar2)
|
||||
assert_array_equal(result, expected)
|
||||
|
||||
def test_in1d_both_arrays_are_object(self):
|
||||
ar1 = [None]
|
||||
ar2 = np.array([None]*10)
|
||||
expected = np.array([True])
|
||||
result = np.in1d(ar1, ar2)
|
||||
assert_array_equal(result, expected)
|
||||
|
||||
def test_in1d_both_arrays_have_structured_dtype(self):
|
||||
# Test arrays of a structured data type containing an integer field
|
||||
# and a field of dtype `object` allowing for arbitrary Python objects
|
||||
dt = np.dtype([('field1', int), ('field2', object)])
|
||||
ar1 = np.array([(1, None)], dtype=dt)
|
||||
ar2 = np.array([(1, None)]*10, dtype=dt)
|
||||
expected = np.array([True])
|
||||
result = np.in1d(ar1, ar2)
|
||||
assert_array_equal(result, expected)
|
||||
|
||||
def test_union1d(self):
|
||||
a = np.array([5, 4, 7, 1, 2])
|
||||
b = np.array([2, 4, 3, 3, 2, 1, 5])
|
||||
|
||||
ec = np.array([1, 2, 3, 4, 5, 7])
|
||||
c = union1d(a, b)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
# Tests gh-10340, arguments to union1d should be
|
||||
# flattened if they are not already 1D
|
||||
x = np.array([[0, 1, 2], [3, 4, 5]])
|
||||
y = np.array([0, 1, 2, 3, 4])
|
||||
ez = np.array([0, 1, 2, 3, 4, 5])
|
||||
z = union1d(x, y)
|
||||
assert_array_equal(z, ez)
|
||||
|
||||
assert_array_equal([], union1d([], []))
|
||||
|
||||
def test_setdiff1d(self):
|
||||
a = np.array([6, 5, 4, 7, 1, 2, 7, 4])
|
||||
b = np.array([2, 4, 3, 3, 2, 1, 5])
|
||||
|
||||
ec = np.array([6, 7])
|
||||
c = setdiff1d(a, b)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
a = np.arange(21)
|
||||
b = np.arange(19)
|
||||
ec = np.array([19, 20])
|
||||
c = setdiff1d(a, b)
|
||||
assert_array_equal(c, ec)
|
||||
|
||||
assert_array_equal([], setdiff1d([], []))
|
||||
a = np.array((), np.uint32)
|
||||
assert_equal(setdiff1d(a, []).dtype, np.uint32)
|
||||
|
||||
def test_setdiff1d_unique(self):
|
||||
a = np.array([3, 2, 1])
|
||||
b = np.array([7, 5, 2])
|
||||
expected = np.array([3, 1])
|
||||
actual = setdiff1d(a, b, assume_unique=True)
|
||||
assert_equal(actual, expected)
|
||||
|
||||
def test_setdiff1d_char_array(self):
|
||||
a = np.array(['a', 'b', 'c'])
|
||||
b = np.array(['a', 'b', 's'])
|
||||
assert_array_equal(setdiff1d(a, b), np.array(['c']))
|
||||
|
||||
def test_manyways(self):
|
||||
a = np.array([5, 7, 1, 2, 8])
|
||||
b = np.array([9, 8, 2, 4, 3, 1, 5])
|
||||
|
||||
c1 = setxor1d(a, b)
|
||||
aux1 = intersect1d(a, b)
|
||||
aux2 = union1d(a, b)
|
||||
c2 = setdiff1d(aux2, aux1)
|
||||
assert_array_equal(c1, c2)
|
||||
|
||||
|
||||
class TestUnique:
|
||||
|
||||
def test_unique_1d(self):
|
||||
|
||||
def check_all(a, b, i1, i2, c, dt):
|
||||
base_msg = 'check {0} failed for type {1}'
|
||||
|
||||
msg = base_msg.format('values', dt)
|
||||
v = unique(a)
|
||||
assert_array_equal(v, b, msg)
|
||||
|
||||
msg = base_msg.format('return_index', dt)
|
||||
v, j = unique(a, True, False, False)
|
||||
assert_array_equal(v, b, msg)
|
||||
assert_array_equal(j, i1, msg)
|
||||
|
||||
msg = base_msg.format('return_inverse', dt)
|
||||
v, j = unique(a, False, True, False)
|
||||
assert_array_equal(v, b, msg)
|
||||
assert_array_equal(j, i2, msg)
|
||||
|
||||
msg = base_msg.format('return_counts', dt)
|
||||
v, j = unique(a, False, False, True)
|
||||
assert_array_equal(v, b, msg)
|
||||
assert_array_equal(j, c, msg)
|
||||
|
||||
msg = base_msg.format('return_index and return_inverse', dt)
|
||||
v, j1, j2 = unique(a, True, True, False)
|
||||
assert_array_equal(v, b, msg)
|
||||
assert_array_equal(j1, i1, msg)
|
||||
assert_array_equal(j2, i2, msg)
|
||||
|
||||
msg = base_msg.format('return_index and return_counts', dt)
|
||||
v, j1, j2 = unique(a, True, False, True)
|
||||
assert_array_equal(v, b, msg)
|
||||
assert_array_equal(j1, i1, msg)
|
||||
assert_array_equal(j2, c, msg)
|
||||
|
||||
msg = base_msg.format('return_inverse and return_counts', dt)
|
||||
v, j1, j2 = unique(a, False, True, True)
|
||||
assert_array_equal(v, b, msg)
|
||||
assert_array_equal(j1, i2, msg)
|
||||
assert_array_equal(j2, c, msg)
|
||||
|
||||
msg = base_msg.format(('return_index, return_inverse '
|
||||
'and return_counts'), dt)
|
||||
v, j1, j2, j3 = unique(a, True, True, True)
|
||||
assert_array_equal(v, b, msg)
|
||||
assert_array_equal(j1, i1, msg)
|
||||
assert_array_equal(j2, i2, msg)
|
||||
assert_array_equal(j3, c, msg)
|
||||
|
||||
a = [5, 7, 1, 2, 1, 5, 7]*10
|
||||
b = [1, 2, 5, 7]
|
||||
i1 = [2, 3, 0, 1]
|
||||
i2 = [2, 3, 0, 1, 0, 2, 3]*10
|
||||
c = np.multiply([2, 1, 2, 2], 10)
|
||||
|
||||
# test for numeric arrays
|
||||
types = []
|
||||
types.extend(np.typecodes['AllInteger'])
|
||||
types.extend(np.typecodes['AllFloat'])
|
||||
types.append('datetime64[D]')
|
||||
types.append('timedelta64[D]')
|
||||
for dt in types:
|
||||
aa = np.array(a, dt)
|
||||
bb = np.array(b, dt)
|
||||
check_all(aa, bb, i1, i2, c, dt)
|
||||
|
||||
# test for object arrays
|
||||
dt = 'O'
|
||||
aa = np.empty(len(a), dt)
|
||||
aa[:] = a
|
||||
bb = np.empty(len(b), dt)
|
||||
bb[:] = b
|
||||
check_all(aa, bb, i1, i2, c, dt)
|
||||
|
||||
# test for structured arrays
|
||||
dt = [('', 'i'), ('', 'i')]
|
||||
aa = np.array(list(zip(a, a)), dt)
|
||||
bb = np.array(list(zip(b, b)), dt)
|
||||
check_all(aa, bb, i1, i2, c, dt)
|
||||
|
||||
# test for ticket #2799
|
||||
aa = [1. + 0.j, 1 - 1.j, 1]
|
||||
assert_array_equal(np.unique(aa), [1. - 1.j, 1. + 0.j])
|
||||
|
||||
# test for ticket #4785
|
||||
a = [(1, 2), (1, 2), (2, 3)]
|
||||
unq = [1, 2, 3]
|
||||
inv = [0, 1, 0, 1, 1, 2]
|
||||
a1 = unique(a)
|
||||
assert_array_equal(a1, unq)
|
||||
a2, a2_inv = unique(a, return_inverse=True)
|
||||
assert_array_equal(a2, unq)
|
||||
assert_array_equal(a2_inv, inv)
|
||||
|
||||
# test for chararrays with return_inverse (gh-5099)
|
||||
a = np.chararray(5)
|
||||
a[...] = ''
|
||||
a2, a2_inv = np.unique(a, return_inverse=True)
|
||||
assert_array_equal(a2_inv, np.zeros(5))
|
||||
|
||||
# test for ticket #9137
|
||||
a = []
|
||||
a1_idx = np.unique(a, return_index=True)[1]
|
||||
a2_inv = np.unique(a, return_inverse=True)[1]
|
||||
a3_idx, a3_inv = np.unique(a, return_index=True,
|
||||
return_inverse=True)[1:]
|
||||
assert_equal(a1_idx.dtype, np.intp)
|
||||
assert_equal(a2_inv.dtype, np.intp)
|
||||
assert_equal(a3_idx.dtype, np.intp)
|
||||
assert_equal(a3_inv.dtype, np.intp)
|
||||
|
||||
def test_unique_axis_errors(self):
|
||||
assert_raises(TypeError, self._run_axis_tests, object)
|
||||
assert_raises(TypeError, self._run_axis_tests,
|
||||
[('a', int), ('b', object)])
|
||||
|
||||
assert_raises(np.AxisError, unique, np.arange(10), axis=2)
|
||||
assert_raises(np.AxisError, unique, np.arange(10), axis=-2)
|
||||
|
||||
def test_unique_axis_list(self):
|
||||
msg = "Unique failed on list of lists"
|
||||
inp = [[0, 1, 0], [0, 1, 0]]
|
||||
inp_arr = np.asarray(inp)
|
||||
assert_array_equal(unique(inp, axis=0), unique(inp_arr, axis=0), msg)
|
||||
assert_array_equal(unique(inp, axis=1), unique(inp_arr, axis=1), msg)
|
||||
|
||||
def test_unique_axis(self):
|
||||
types = []
|
||||
types.extend(np.typecodes['AllInteger'])
|
||||
types.extend(np.typecodes['AllFloat'])
|
||||
types.append('datetime64[D]')
|
||||
types.append('timedelta64[D]')
|
||||
types.append([('a', int), ('b', int)])
|
||||
types.append([('a', int), ('b', float)])
|
||||
|
||||
for dtype in types:
|
||||
self._run_axis_tests(dtype)
|
||||
|
||||
msg = 'Non-bitwise-equal booleans test failed'
|
||||
data = np.arange(10, dtype=np.uint8).reshape(-1, 2).view(bool)
|
||||
result = np.array([[False, True], [True, True]], dtype=bool)
|
||||
assert_array_equal(unique(data, axis=0), result, msg)
|
||||
|
||||
msg = 'Negative zero equality test failed'
|
||||
data = np.array([[-0.0, 0.0], [0.0, -0.0], [-0.0, 0.0], [0.0, -0.0]])
|
||||
result = np.array([[-0.0, 0.0]])
|
||||
assert_array_equal(unique(data, axis=0), result, msg)
|
||||
|
||||
@pytest.mark.parametrize("axis", [0, -1])
|
||||
def test_unique_1d_with_axis(self, axis):
|
||||
x = np.array([4, 3, 2, 3, 2, 1, 2, 2])
|
||||
uniq = unique(x, axis=axis)
|
||||
assert_array_equal(uniq, [1, 2, 3, 4])
|
||||
|
||||
def test_unique_axis_zeros(self):
|
||||
# issue 15559
|
||||
single_zero = np.empty(shape=(2, 0), dtype=np.int8)
|
||||
uniq, idx, inv, cnt = unique(single_zero, axis=0, return_index=True,
|
||||
return_inverse=True, return_counts=True)
|
||||
|
||||
# there's 1 element of shape (0,) along axis 0
|
||||
assert_equal(uniq.dtype, single_zero.dtype)
|
||||
assert_array_equal(uniq, np.empty(shape=(1, 0)))
|
||||
assert_array_equal(idx, np.array([0]))
|
||||
assert_array_equal(inv, np.array([0, 0]))
|
||||
assert_array_equal(cnt, np.array([2]))
|
||||
|
||||
# there's 0 elements of shape (2,) along axis 1
|
||||
uniq, idx, inv, cnt = unique(single_zero, axis=1, return_index=True,
|
||||
return_inverse=True, return_counts=True)
|
||||
|
||||
assert_equal(uniq.dtype, single_zero.dtype)
|
||||
assert_array_equal(uniq, np.empty(shape=(2, 0)))
|
||||
assert_array_equal(idx, np.array([]))
|
||||
assert_array_equal(inv, np.array([]))
|
||||
assert_array_equal(cnt, np.array([]))
|
||||
|
||||
# test a "complicated" shape
|
||||
shape = (0, 2, 0, 3, 0, 4, 0)
|
||||
multiple_zeros = np.empty(shape=shape)
|
||||
for axis in range(len(shape)):
|
||||
expected_shape = list(shape)
|
||||
if shape[axis] == 0:
|
||||
expected_shape[axis] = 0
|
||||
else:
|
||||
expected_shape[axis] = 1
|
||||
|
||||
assert_array_equal(unique(multiple_zeros, axis=axis),
|
||||
np.empty(shape=expected_shape))
|
||||
|
||||
def test_unique_masked(self):
|
||||
# issue 8664
|
||||
x = np.array([64, 0, 1, 2, 3, 63, 63, 0, 0, 0, 1, 2, 0, 63, 0],
|
||||
dtype='uint8')
|
||||
y = np.ma.masked_equal(x, 0)
|
||||
|
||||
v = np.unique(y)
|
||||
v2, i, c = np.unique(y, return_index=True, return_counts=True)
|
||||
|
||||
msg = 'Unique returned different results when asked for index'
|
||||
assert_array_equal(v.data, v2.data, msg)
|
||||
assert_array_equal(v.mask, v2.mask, msg)
|
||||
|
||||
def test_unique_sort_order_with_axis(self):
|
||||
# These tests fail if sorting along axis is done by treating subarrays
|
||||
# as unsigned byte strings. See gh-10495.
|
||||
fmt = "sort order incorrect for integer type '%s'"
|
||||
for dt in 'bhilq':
|
||||
a = np.array([[-1], [0]], dt)
|
||||
b = np.unique(a, axis=0)
|
||||
assert_array_equal(a, b, fmt % dt)
|
||||
|
||||
def _run_axis_tests(self, dtype):
|
||||
data = np.array([[0, 1, 0, 0],
|
||||
[1, 0, 0, 0],
|
||||
[0, 1, 0, 0],
|
||||
[1, 0, 0, 0]]).astype(dtype)
|
||||
|
||||
msg = 'Unique with 1d array and axis=0 failed'
|
||||
result = np.array([0, 1])
|
||||
assert_array_equal(unique(data), result.astype(dtype), msg)
|
||||
|
||||
msg = 'Unique with 2d array and axis=0 failed'
|
||||
result = np.array([[0, 1, 0, 0], [1, 0, 0, 0]])
|
||||
assert_array_equal(unique(data, axis=0), result.astype(dtype), msg)
|
||||
|
||||
msg = 'Unique with 2d array and axis=1 failed'
|
||||
result = np.array([[0, 0, 1], [0, 1, 0], [0, 0, 1], [0, 1, 0]])
|
||||
assert_array_equal(unique(data, axis=1), result.astype(dtype), msg)
|
||||
|
||||
msg = 'Unique with 3d array and axis=2 failed'
|
||||
data3d = np.array([[[1, 1],
|
||||
[1, 0]],
|
||||
[[0, 1],
|
||||
[0, 0]]]).astype(dtype)
|
||||
result = np.take(data3d, [1, 0], axis=2)
|
||||
assert_array_equal(unique(data3d, axis=2), result, msg)
|
||||
|
||||
uniq, idx, inv, cnt = unique(data, axis=0, return_index=True,
|
||||
return_inverse=True, return_counts=True)
|
||||
msg = "Unique's return_index=True failed with axis=0"
|
||||
assert_array_equal(data[idx], uniq, msg)
|
||||
msg = "Unique's return_inverse=True failed with axis=0"
|
||||
assert_array_equal(uniq[inv], data)
|
||||
msg = "Unique's return_counts=True failed with axis=0"
|
||||
assert_array_equal(cnt, np.array([2, 2]), msg)
|
||||
|
||||
uniq, idx, inv, cnt = unique(data, axis=1, return_index=True,
|
||||
return_inverse=True, return_counts=True)
|
||||
msg = "Unique's return_index=True failed with axis=1"
|
||||
assert_array_equal(data[:, idx], uniq)
|
||||
msg = "Unique's return_inverse=True failed with axis=1"
|
||||
assert_array_equal(uniq[:, inv], data)
|
||||
msg = "Unique's return_counts=True failed with axis=1"
|
||||
assert_array_equal(cnt, np.array([2, 1, 1]), msg)
|
||||
46
venv/Lib/site-packages/numpy/lib/tests/test_arrayterator.py
Normal file
46
venv/Lib/site-packages/numpy/lib/tests/test_arrayterator.py
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
from operator import mul
|
||||
from functools import reduce
|
||||
|
||||
import numpy as np
|
||||
from numpy.random import randint
|
||||
from numpy.lib import Arrayterator
|
||||
from numpy.testing import assert_
|
||||
|
||||
|
||||
def test():
|
||||
np.random.seed(np.arange(10))
|
||||
|
||||
# Create a random array
|
||||
ndims = randint(5)+1
|
||||
shape = tuple(randint(10)+1 for dim in range(ndims))
|
||||
els = reduce(mul, shape)
|
||||
a = np.arange(els)
|
||||
a.shape = shape
|
||||
|
||||
buf_size = randint(2*els)
|
||||
b = Arrayterator(a, buf_size)
|
||||
|
||||
# Check that each block has at most ``buf_size`` elements
|
||||
for block in b:
|
||||
assert_(len(block.flat) <= (buf_size or els))
|
||||
|
||||
# Check that all elements are iterated correctly
|
||||
assert_(list(b.flat) == list(a.flat))
|
||||
|
||||
# Slice arrayterator
|
||||
start = [randint(dim) for dim in shape]
|
||||
stop = [randint(dim)+1 for dim in shape]
|
||||
step = [randint(dim)+1 for dim in shape]
|
||||
slice_ = tuple(slice(*t) for t in zip(start, stop, step))
|
||||
c = b[slice_]
|
||||
d = a[slice_]
|
||||
|
||||
# Check that each block has at most ``buf_size`` elements
|
||||
for block in c:
|
||||
assert_(len(block.flat) <= (buf_size or els))
|
||||
|
||||
# Check that the arrayterator is sliced correctly
|
||||
assert_(np.all(c.__array__() == d))
|
||||
|
||||
# Check that all elements are iterated correctly
|
||||
assert_(list(c.flat) == list(d.flat))
|
||||
380
venv/Lib/site-packages/numpy/lib/tests/test_financial.py
Normal file
380
venv/Lib/site-packages/numpy/lib/tests/test_financial.py
Normal file
|
|
@ -0,0 +1,380 @@
|
|||
import warnings
|
||||
from decimal import Decimal
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import (
|
||||
assert_, assert_almost_equal, assert_allclose, assert_equal, assert_raises
|
||||
)
|
||||
|
||||
|
||||
def filter_deprecation(func):
|
||||
def newfunc(*args, **kwargs):
|
||||
with warnings.catch_warnings(record=True) as ws:
|
||||
warnings.filterwarnings('always', category=DeprecationWarning)
|
||||
func(*args, **kwargs)
|
||||
assert_(all(w.category is DeprecationWarning for w in ws))
|
||||
return newfunc
|
||||
|
||||
|
||||
class TestFinancial:
|
||||
@filter_deprecation
|
||||
def test_npv_irr_congruence(self):
|
||||
# IRR is defined as the rate required for the present value of a
|
||||
# a series of cashflows to be zero i.e. NPV(IRR(x), x) = 0
|
||||
cashflows = np.array([-40000, 5000, 8000, 12000, 30000])
|
||||
assert_allclose(np.npv(np.irr(cashflows), cashflows), 0, atol=1e-10, rtol=0)
|
||||
|
||||
@filter_deprecation
|
||||
def test_rate(self):
|
||||
assert_almost_equal(
|
||||
np.rate(10, 0, -3500, 10000),
|
||||
0.1107, 4)
|
||||
|
||||
@filter_deprecation
|
||||
def test_rate_decimal(self):
|
||||
rate = np.rate(Decimal('10'), Decimal('0'), Decimal('-3500'), Decimal('10000'))
|
||||
assert_equal(Decimal('0.1106908537142689284704528100'), rate)
|
||||
|
||||
@filter_deprecation
|
||||
def test_irr(self):
|
||||
v = [-150000, 15000, 25000, 35000, 45000, 60000]
|
||||
assert_almost_equal(np.irr(v), 0.0524, 2)
|
||||
v = [-100, 0, 0, 74]
|
||||
assert_almost_equal(np.irr(v), -0.0955, 2)
|
||||
v = [-100, 39, 59, 55, 20]
|
||||
assert_almost_equal(np.irr(v), 0.28095, 2)
|
||||
v = [-100, 100, 0, -7]
|
||||
assert_almost_equal(np.irr(v), -0.0833, 2)
|
||||
v = [-100, 100, 0, 7]
|
||||
assert_almost_equal(np.irr(v), 0.06206, 2)
|
||||
v = [-5, 10.5, 1, -8, 1]
|
||||
assert_almost_equal(np.irr(v), 0.0886, 2)
|
||||
|
||||
# Test that if there is no solution then np.irr returns nan
|
||||
# Fixes gh-6744
|
||||
v = [-1, -2, -3]
|
||||
assert_equal(np.irr(v), np.nan)
|
||||
|
||||
@filter_deprecation
|
||||
def test_pv(self):
|
||||
assert_almost_equal(np.pv(0.07, 20, 12000, 0), -127128.17, 2)
|
||||
|
||||
@filter_deprecation
|
||||
def test_pv_decimal(self):
|
||||
assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0')),
|
||||
Decimal('-127128.1709461939327295222005'))
|
||||
|
||||
@filter_deprecation
|
||||
def test_fv(self):
|
||||
assert_equal(np.fv(0.075, 20, -2000, 0, 0), 86609.362673042924)
|
||||
|
||||
@filter_deprecation
|
||||
def test_fv_decimal(self):
|
||||
assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), 0, 0),
|
||||
Decimal('86609.36267304300040536731624'))
|
||||
|
||||
@filter_deprecation
|
||||
def test_pmt(self):
|
||||
res = np.pmt(0.08 / 12, 5 * 12, 15000)
|
||||
tgt = -304.145914
|
||||
assert_allclose(res, tgt)
|
||||
# Test the edge case where rate == 0.0
|
||||
res = np.pmt(0.0, 5 * 12, 15000)
|
||||
tgt = -250.0
|
||||
assert_allclose(res, tgt)
|
||||
# Test the case where we use broadcast and
|
||||
# the arguments passed in are arrays.
|
||||
res = np.pmt([[0.0, 0.8], [0.3, 0.8]], [12, 3], [2000, 20000])
|
||||
tgt = np.array([[-166.66667, -19311.258], [-626.90814, -19311.258]])
|
||||
assert_allclose(res, tgt)
|
||||
|
||||
@filter_deprecation
|
||||
def test_pmt_decimal(self):
|
||||
res = np.pmt(Decimal('0.08') / Decimal('12'), 5 * 12, 15000)
|
||||
tgt = Decimal('-304.1459143262052370338701494')
|
||||
assert_equal(res, tgt)
|
||||
# Test the edge case where rate == 0.0
|
||||
res = np.pmt(Decimal('0'), Decimal('60'), Decimal('15000'))
|
||||
tgt = -250
|
||||
assert_equal(res, tgt)
|
||||
# Test the case where we use broadcast and
|
||||
# the arguments passed in are arrays.
|
||||
res = np.pmt([[Decimal('0'), Decimal('0.8')], [Decimal('0.3'), Decimal('0.8')]],
|
||||
[Decimal('12'), Decimal('3')], [Decimal('2000'), Decimal('20000')])
|
||||
tgt = np.array([[Decimal('-166.6666666666666666666666667'), Decimal('-19311.25827814569536423841060')],
|
||||
[Decimal('-626.9081401700757748402586600'), Decimal('-19311.25827814569536423841060')]])
|
||||
|
||||
# Cannot use the `assert_allclose` because it uses isfinite under the covers
|
||||
# which does not support the Decimal type
|
||||
# See issue: https://github.com/numpy/numpy/issues/9954
|
||||
assert_equal(res[0][0], tgt[0][0])
|
||||
assert_equal(res[0][1], tgt[0][1])
|
||||
assert_equal(res[1][0], tgt[1][0])
|
||||
assert_equal(res[1][1], tgt[1][1])
|
||||
|
||||
@filter_deprecation
|
||||
def test_ppmt(self):
|
||||
assert_equal(np.round(np.ppmt(0.1 / 12, 1, 60, 55000), 2), -710.25)
|
||||
|
||||
@filter_deprecation
|
||||
def test_ppmt_decimal(self):
|
||||
assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000')),
|
||||
Decimal('-710.2541257864217612489830917'))
|
||||
|
||||
# Two tests showing how Decimal is actually getting at a more exact result
|
||||
# .23 / 12 does not come out nicely as a float but does as a decimal
|
||||
@filter_deprecation
|
||||
def test_ppmt_special_rate(self):
|
||||
assert_equal(np.round(np.ppmt(0.23 / 12, 1, 60, 10000000000), 8), -90238044.232277036)
|
||||
|
||||
@filter_deprecation
|
||||
def test_ppmt_special_rate_decimal(self):
|
||||
# When rounded out to 8 decimal places like the float based test, this should not equal the same value
|
||||
# as the float, substituted for the decimal
|
||||
def raise_error_because_not_equal():
|
||||
assert_equal(
|
||||
round(np.ppmt(Decimal('0.23') / Decimal('12'), 1, 60, Decimal('10000000000')), 8),
|
||||
Decimal('-90238044.232277036'))
|
||||
|
||||
assert_raises(AssertionError, raise_error_because_not_equal)
|
||||
assert_equal(np.ppmt(Decimal('0.23') / Decimal('12'), 1, 60, Decimal('10000000000')),
|
||||
Decimal('-90238044.2322778884413969909'))
|
||||
|
||||
@filter_deprecation
|
||||
def test_ipmt(self):
|
||||
assert_almost_equal(np.round(np.ipmt(0.1 / 12, 1, 24, 2000), 2), -16.67)
|
||||
|
||||
@filter_deprecation
|
||||
def test_ipmt_decimal(self):
|
||||
result = np.ipmt(Decimal('0.1') / Decimal('12'), 1, 24, 2000)
|
||||
assert_equal(result.flat[0], Decimal('-16.66666666666666666666666667'))
|
||||
|
||||
@filter_deprecation
|
||||
def test_nper(self):
|
||||
assert_almost_equal(np.nper(0.075, -2000, 0, 100000.),
|
||||
21.54, 2)
|
||||
|
||||
@filter_deprecation
|
||||
def test_nper2(self):
|
||||
assert_almost_equal(np.nper(0.0, -2000, 0, 100000.),
|
||||
50.0, 1)
|
||||
|
||||
@filter_deprecation
|
||||
def test_npv(self):
|
||||
assert_almost_equal(
|
||||
np.npv(0.05, [-15000, 1500, 2500, 3500, 4500, 6000]),
|
||||
122.89, 2)
|
||||
|
||||
@filter_deprecation
|
||||
def test_npv_decimal(self):
|
||||
assert_equal(
|
||||
np.npv(Decimal('0.05'), [-15000, 1500, 2500, 3500, 4500, 6000]),
|
||||
Decimal('122.894854950942692161628715'))
|
||||
|
||||
@filter_deprecation
|
||||
def test_mirr(self):
|
||||
val = [-4500, -800, 800, 800, 600, 600, 800, 800, 700, 3000]
|
||||
assert_almost_equal(np.mirr(val, 0.08, 0.055), 0.0666, 4)
|
||||
|
||||
val = [-120000, 39000, 30000, 21000, 37000, 46000]
|
||||
assert_almost_equal(np.mirr(val, 0.10, 0.12), 0.126094, 6)
|
||||
|
||||
val = [100, 200, -50, 300, -200]
|
||||
assert_almost_equal(np.mirr(val, 0.05, 0.06), 0.3428, 4)
|
||||
|
||||
val = [39000, 30000, 21000, 37000, 46000]
|
||||
assert_(np.isnan(np.mirr(val, 0.10, 0.12)))
|
||||
|
||||
@filter_deprecation
|
||||
def test_mirr_decimal(self):
|
||||
val = [Decimal('-4500'), Decimal('-800'), Decimal('800'), Decimal('800'),
|
||||
Decimal('600'), Decimal('600'), Decimal('800'), Decimal('800'),
|
||||
Decimal('700'), Decimal('3000')]
|
||||
assert_equal(np.mirr(val, Decimal('0.08'), Decimal('0.055')),
|
||||
Decimal('0.066597175031553548874239618'))
|
||||
|
||||
val = [Decimal('-120000'), Decimal('39000'), Decimal('30000'),
|
||||
Decimal('21000'), Decimal('37000'), Decimal('46000')]
|
||||
assert_equal(np.mirr(val, Decimal('0.10'), Decimal('0.12')), Decimal('0.126094130365905145828421880'))
|
||||
|
||||
val = [Decimal('100'), Decimal('200'), Decimal('-50'),
|
||||
Decimal('300'), Decimal('-200')]
|
||||
assert_equal(np.mirr(val, Decimal('0.05'), Decimal('0.06')), Decimal('0.342823387842176663647819868'))
|
||||
|
||||
val = [Decimal('39000'), Decimal('30000'), Decimal('21000'), Decimal('37000'), Decimal('46000')]
|
||||
assert_(np.isnan(np.mirr(val, Decimal('0.10'), Decimal('0.12'))))
|
||||
|
||||
@filter_deprecation
|
||||
def test_when(self):
|
||||
# begin
|
||||
assert_equal(np.rate(10, 20, -3500, 10000, 1),
|
||||
np.rate(10, 20, -3500, 10000, 'begin'))
|
||||
# end
|
||||
assert_equal(np.rate(10, 20, -3500, 10000),
|
||||
np.rate(10, 20, -3500, 10000, 'end'))
|
||||
assert_equal(np.rate(10, 20, -3500, 10000, 0),
|
||||
np.rate(10, 20, -3500, 10000, 'end'))
|
||||
|
||||
# begin
|
||||
assert_equal(np.pv(0.07, 20, 12000, 0, 1),
|
||||
np.pv(0.07, 20, 12000, 0, 'begin'))
|
||||
# end
|
||||
assert_equal(np.pv(0.07, 20, 12000, 0),
|
||||
np.pv(0.07, 20, 12000, 0, 'end'))
|
||||
assert_equal(np.pv(0.07, 20, 12000, 0, 0),
|
||||
np.pv(0.07, 20, 12000, 0, 'end'))
|
||||
|
||||
# begin
|
||||
assert_equal(np.fv(0.075, 20, -2000, 0, 1),
|
||||
np.fv(0.075, 20, -2000, 0, 'begin'))
|
||||
# end
|
||||
assert_equal(np.fv(0.075, 20, -2000, 0),
|
||||
np.fv(0.075, 20, -2000, 0, 'end'))
|
||||
assert_equal(np.fv(0.075, 20, -2000, 0, 0),
|
||||
np.fv(0.075, 20, -2000, 0, 'end'))
|
||||
|
||||
# begin
|
||||
assert_equal(np.pmt(0.08 / 12, 5 * 12, 15000., 0, 1),
|
||||
np.pmt(0.08 / 12, 5 * 12, 15000., 0, 'begin'))
|
||||
# end
|
||||
assert_equal(np.pmt(0.08 / 12, 5 * 12, 15000., 0),
|
||||
np.pmt(0.08 / 12, 5 * 12, 15000., 0, 'end'))
|
||||
assert_equal(np.pmt(0.08 / 12, 5 * 12, 15000., 0, 0),
|
||||
np.pmt(0.08 / 12, 5 * 12, 15000., 0, 'end'))
|
||||
|
||||
# begin
|
||||
assert_equal(np.ppmt(0.1 / 12, 1, 60, 55000, 0, 1),
|
||||
np.ppmt(0.1 / 12, 1, 60, 55000, 0, 'begin'))
|
||||
# end
|
||||
assert_equal(np.ppmt(0.1 / 12, 1, 60, 55000, 0),
|
||||
np.ppmt(0.1 / 12, 1, 60, 55000, 0, 'end'))
|
||||
assert_equal(np.ppmt(0.1 / 12, 1, 60, 55000, 0, 0),
|
||||
np.ppmt(0.1 / 12, 1, 60, 55000, 0, 'end'))
|
||||
|
||||
# begin
|
||||
assert_equal(np.ipmt(0.1 / 12, 1, 24, 2000, 0, 1),
|
||||
np.ipmt(0.1 / 12, 1, 24, 2000, 0, 'begin'))
|
||||
# end
|
||||
assert_equal(np.ipmt(0.1 / 12, 1, 24, 2000, 0),
|
||||
np.ipmt(0.1 / 12, 1, 24, 2000, 0, 'end'))
|
||||
assert_equal(np.ipmt(0.1 / 12, 1, 24, 2000, 0, 0),
|
||||
np.ipmt(0.1 / 12, 1, 24, 2000, 0, 'end'))
|
||||
|
||||
# begin
|
||||
assert_equal(np.nper(0.075, -2000, 0, 100000., 1),
|
||||
np.nper(0.075, -2000, 0, 100000., 'begin'))
|
||||
# end
|
||||
assert_equal(np.nper(0.075, -2000, 0, 100000.),
|
||||
np.nper(0.075, -2000, 0, 100000., 'end'))
|
||||
assert_equal(np.nper(0.075, -2000, 0, 100000., 0),
|
||||
np.nper(0.075, -2000, 0, 100000., 'end'))
|
||||
|
||||
@filter_deprecation
|
||||
def test_decimal_with_when(self):
|
||||
"""Test that decimals are still supported if the when argument is passed"""
|
||||
# begin
|
||||
assert_equal(np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), Decimal('1')),
|
||||
np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), 'begin'))
|
||||
# end
|
||||
assert_equal(np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000')),
|
||||
np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), 'end'))
|
||||
assert_equal(np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), Decimal('0')),
|
||||
np.rate(Decimal('10'), Decimal('20'), Decimal('-3500'), Decimal('10000'), 'end'))
|
||||
|
||||
# begin
|
||||
assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), Decimal('1')),
|
||||
np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), 'begin'))
|
||||
# end
|
||||
assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0')),
|
||||
np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), 'end'))
|
||||
assert_equal(np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), Decimal('0')),
|
||||
np.pv(Decimal('0.07'), Decimal('20'), Decimal('12000'), Decimal('0'), 'end'))
|
||||
|
||||
# begin
|
||||
assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), Decimal('1')),
|
||||
np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), 'begin'))
|
||||
# end
|
||||
assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0')),
|
||||
np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), 'end'))
|
||||
assert_equal(np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), Decimal('0')),
|
||||
np.fv(Decimal('0.075'), Decimal('20'), Decimal('-2000'), Decimal('0'), 'end'))
|
||||
|
||||
# begin
|
||||
assert_equal(np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'),
|
||||
Decimal('0'), Decimal('1')),
|
||||
np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'),
|
||||
Decimal('0'), 'begin'))
|
||||
# end
|
||||
assert_equal(np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'),
|
||||
Decimal('0')),
|
||||
np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'),
|
||||
Decimal('0'), 'end'))
|
||||
assert_equal(np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'),
|
||||
Decimal('0'), Decimal('0')),
|
||||
np.pmt(Decimal('0.08') / Decimal('12'), Decimal('5') * Decimal('12'), Decimal('15000.'),
|
||||
Decimal('0'), 'end'))
|
||||
|
||||
# begin
|
||||
assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'),
|
||||
Decimal('0'), Decimal('1')),
|
||||
np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'),
|
||||
Decimal('0'), 'begin'))
|
||||
# end
|
||||
assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'),
|
||||
Decimal('0')),
|
||||
np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'),
|
||||
Decimal('0'), 'end'))
|
||||
assert_equal(np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'),
|
||||
Decimal('0'), Decimal('0')),
|
||||
np.ppmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('60'), Decimal('55000'),
|
||||
Decimal('0'), 'end'))
|
||||
|
||||
# begin
|
||||
assert_equal(np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'),
|
||||
Decimal('0'), Decimal('1')).flat[0],
|
||||
np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'),
|
||||
Decimal('0'), 'begin').flat[0])
|
||||
# end
|
||||
assert_equal(np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'),
|
||||
Decimal('0')).flat[0],
|
||||
np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'),
|
||||
Decimal('0'), 'end').flat[0])
|
||||
assert_equal(np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'),
|
||||
Decimal('0'), Decimal('0')).flat[0],
|
||||
np.ipmt(Decimal('0.1') / Decimal('12'), Decimal('1'), Decimal('24'), Decimal('2000'),
|
||||
Decimal('0'), 'end').flat[0])
|
||||
|
||||
@filter_deprecation
|
||||
def test_broadcast(self):
|
||||
assert_almost_equal(np.nper(0.075, -2000, 0, 100000., [0, 1]),
|
||||
[21.5449442, 20.76156441], 4)
|
||||
|
||||
assert_almost_equal(np.ipmt(0.1 / 12, list(range(5)), 24, 2000),
|
||||
[-17.29165168, -16.66666667, -16.03647345,
|
||||
-15.40102862, -14.76028842], 4)
|
||||
|
||||
assert_almost_equal(np.ppmt(0.1 / 12, list(range(5)), 24, 2000),
|
||||
[-74.998201, -75.62318601, -76.25337923,
|
||||
-76.88882405, -77.52956425], 4)
|
||||
|
||||
assert_almost_equal(np.ppmt(0.1 / 12, list(range(5)), 24, 2000, 0,
|
||||
[0, 0, 1, 'end', 'begin']),
|
||||
[-74.998201, -75.62318601, -75.62318601,
|
||||
-76.88882405, -76.88882405], 4)
|
||||
|
||||
@filter_deprecation
|
||||
def test_broadcast_decimal(self):
|
||||
# Use almost equal because precision is tested in the explicit tests, this test is to ensure
|
||||
# broadcast with Decimal is not broken.
|
||||
assert_almost_equal(np.ipmt(Decimal('0.1') / Decimal('12'), list(range(5)), Decimal('24'), Decimal('2000')),
|
||||
[Decimal('-17.29165168'), Decimal('-16.66666667'), Decimal('-16.03647345'),
|
||||
Decimal('-15.40102862'), Decimal('-14.76028842')], 4)
|
||||
|
||||
assert_almost_equal(np.ppmt(Decimal('0.1') / Decimal('12'), list(range(5)), Decimal('24'), Decimal('2000')),
|
||||
[Decimal('-74.998201'), Decimal('-75.62318601'), Decimal('-76.25337923'),
|
||||
Decimal('-76.88882405'), Decimal('-77.52956425')], 4)
|
||||
|
||||
assert_almost_equal(np.ppmt(Decimal('0.1') / Decimal('12'), list(range(5)), Decimal('24'), Decimal('2000'),
|
||||
Decimal('0'), [Decimal('0'), Decimal('0'), Decimal('1'), 'end', 'begin']),
|
||||
[Decimal('-74.998201'), Decimal('-75.62318601'), Decimal('-75.62318601'),
|
||||
Decimal('-76.88882405'), Decimal('-76.88882405')], 4)
|
||||
982
venv/Lib/site-packages/numpy/lib/tests/test_format.py
Normal file
982
venv/Lib/site-packages/numpy/lib/tests/test_format.py
Normal file
|
|
@ -0,0 +1,982 @@
|
|||
# doctest
|
||||
r''' Test the .npy file format.
|
||||
|
||||
Set up:
|
||||
|
||||
>>> import sys
|
||||
>>> from io import BytesIO
|
||||
>>> from numpy.lib import format
|
||||
>>>
|
||||
>>> scalars = [
|
||||
... np.uint8,
|
||||
... np.int8,
|
||||
... np.uint16,
|
||||
... np.int16,
|
||||
... np.uint32,
|
||||
... np.int32,
|
||||
... np.uint64,
|
||||
... np.int64,
|
||||
... np.float32,
|
||||
... np.float64,
|
||||
... np.complex64,
|
||||
... np.complex128,
|
||||
... object,
|
||||
... ]
|
||||
>>>
|
||||
>>> basic_arrays = []
|
||||
>>>
|
||||
>>> for scalar in scalars:
|
||||
... for endian in '<>':
|
||||
... dtype = np.dtype(scalar).newbyteorder(endian)
|
||||
... basic = np.arange(15).astype(dtype)
|
||||
... basic_arrays.extend([
|
||||
... np.array([], dtype=dtype),
|
||||
... np.array(10, dtype=dtype),
|
||||
... basic,
|
||||
... basic.reshape((3,5)),
|
||||
... basic.reshape((3,5)).T,
|
||||
... basic.reshape((3,5))[::-1,::2],
|
||||
... ])
|
||||
...
|
||||
>>>
|
||||
>>> Pdescr = [
|
||||
... ('x', 'i4', (2,)),
|
||||
... ('y', 'f8', (2, 2)),
|
||||
... ('z', 'u1')]
|
||||
>>>
|
||||
>>>
|
||||
>>> PbufferT = [
|
||||
... ([3,2], [[6.,4.],[6.,4.]], 8),
|
||||
... ([4,3], [[7.,5.],[7.,5.]], 9),
|
||||
... ]
|
||||
>>>
|
||||
>>>
|
||||
>>> Ndescr = [
|
||||
... ('x', 'i4', (2,)),
|
||||
... ('Info', [
|
||||
... ('value', 'c16'),
|
||||
... ('y2', 'f8'),
|
||||
... ('Info2', [
|
||||
... ('name', 'S2'),
|
||||
... ('value', 'c16', (2,)),
|
||||
... ('y3', 'f8', (2,)),
|
||||
... ('z3', 'u4', (2,))]),
|
||||
... ('name', 'S2'),
|
||||
... ('z2', 'b1')]),
|
||||
... ('color', 'S2'),
|
||||
... ('info', [
|
||||
... ('Name', 'U8'),
|
||||
... ('Value', 'c16')]),
|
||||
... ('y', 'f8', (2, 2)),
|
||||
... ('z', 'u1')]
|
||||
>>>
|
||||
>>>
|
||||
>>> NbufferT = [
|
||||
... ([3,2], (6j, 6., ('nn', [6j,4j], [6.,4.], [1,2]), 'NN', True), 'cc', ('NN', 6j), [[6.,4.],[6.,4.]], 8),
|
||||
... ([4,3], (7j, 7., ('oo', [7j,5j], [7.,5.], [2,1]), 'OO', False), 'dd', ('OO', 7j), [[7.,5.],[7.,5.]], 9),
|
||||
... ]
|
||||
>>>
|
||||
>>>
|
||||
>>> record_arrays = [
|
||||
... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')),
|
||||
... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')),
|
||||
... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')),
|
||||
... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')),
|
||||
... ]
|
||||
|
||||
Test the magic string writing.
|
||||
|
||||
>>> format.magic(1, 0)
|
||||
'\x93NUMPY\x01\x00'
|
||||
>>> format.magic(0, 0)
|
||||
'\x93NUMPY\x00\x00'
|
||||
>>> format.magic(255, 255)
|
||||
'\x93NUMPY\xff\xff'
|
||||
>>> format.magic(2, 5)
|
||||
'\x93NUMPY\x02\x05'
|
||||
|
||||
Test the magic string reading.
|
||||
|
||||
>>> format.read_magic(BytesIO(format.magic(1, 0)))
|
||||
(1, 0)
|
||||
>>> format.read_magic(BytesIO(format.magic(0, 0)))
|
||||
(0, 0)
|
||||
>>> format.read_magic(BytesIO(format.magic(255, 255)))
|
||||
(255, 255)
|
||||
>>> format.read_magic(BytesIO(format.magic(2, 5)))
|
||||
(2, 5)
|
||||
|
||||
Test the header writing.
|
||||
|
||||
>>> for arr in basic_arrays + record_arrays:
|
||||
... f = BytesIO()
|
||||
... format.write_array_header_1_0(f, arr) # XXX: arr is not a dict, items gets called on it
|
||||
... print(repr(f.getvalue()))
|
||||
...
|
||||
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n"
|
||||
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n"
|
||||
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n"
|
||||
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n"
|
||||
"F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n"
|
||||
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n"
|
||||
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n"
|
||||
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n"
|
||||
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n"
|
||||
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n"
|
||||
"F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n"
|
||||
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n"
|
||||
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n"
|
||||
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n"
|
||||
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n"
|
||||
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n"
|
||||
"F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n"
|
||||
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n"
|
||||
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n"
|
||||
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n"
|
||||
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n"
|
||||
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n"
|
||||
"F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n"
|
||||
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n"
|
||||
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (0,)} \n"
|
||||
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': ()} \n"
|
||||
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (15,)} \n"
|
||||
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (3, 5)} \n"
|
||||
"F\x00{'descr': '<u2', 'fortran_order': True, 'shape': (5, 3)} \n"
|
||||
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (3, 3)} \n"
|
||||
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (0,)} \n"
|
||||
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': ()} \n"
|
||||
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (15,)} \n"
|
||||
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 5)} \n"
|
||||
"F\x00{'descr': '>u2', 'fortran_order': True, 'shape': (5, 3)} \n"
|
||||
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 3)} \n"
|
||||
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (0,)} \n"
|
||||
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': ()} \n"
|
||||
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (15,)} \n"
|
||||
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (3, 5)} \n"
|
||||
"F\x00{'descr': '<i2', 'fortran_order': True, 'shape': (5, 3)} \n"
|
||||
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (3, 3)} \n"
|
||||
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (0,)} \n"
|
||||
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': ()} \n"
|
||||
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (15,)} \n"
|
||||
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 5)} \n"
|
||||
"F\x00{'descr': '>i2', 'fortran_order': True, 'shape': (5, 3)} \n"
|
||||
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 3)} \n"
|
||||
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (0,)} \n"
|
||||
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': ()} \n"
|
||||
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (15,)} \n"
|
||||
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (3, 5)} \n"
|
||||
"F\x00{'descr': '<u4', 'fortran_order': True, 'shape': (5, 3)} \n"
|
||||
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (3, 3)} \n"
|
||||
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (0,)} \n"
|
||||
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': ()} \n"
|
||||
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (15,)} \n"
|
||||
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 5)} \n"
|
||||
"F\x00{'descr': '>u4', 'fortran_order': True, 'shape': (5, 3)} \n"
|
||||
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 3)} \n"
|
||||
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (0,)} \n"
|
||||
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': ()} \n"
|
||||
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (15,)} \n"
|
||||
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (3, 5)} \n"
|
||||
"F\x00{'descr': '<i4', 'fortran_order': True, 'shape': (5, 3)} \n"
|
||||
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (3, 3)} \n"
|
||||
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (0,)} \n"
|
||||
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': ()} \n"
|
||||
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (15,)} \n"
|
||||
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 5)} \n"
|
||||
"F\x00{'descr': '>i4', 'fortran_order': True, 'shape': (5, 3)} \n"
|
||||
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 3)} \n"
|
||||
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (0,)} \n"
|
||||
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': ()} \n"
|
||||
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (15,)} \n"
|
||||
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (3, 5)} \n"
|
||||
"F\x00{'descr': '<u8', 'fortran_order': True, 'shape': (5, 3)} \n"
|
||||
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (3, 3)} \n"
|
||||
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (0,)} \n"
|
||||
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': ()} \n"
|
||||
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (15,)} \n"
|
||||
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 5)} \n"
|
||||
"F\x00{'descr': '>u8', 'fortran_order': True, 'shape': (5, 3)} \n"
|
||||
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 3)} \n"
|
||||
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (0,)} \n"
|
||||
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': ()} \n"
|
||||
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (15,)} \n"
|
||||
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (3, 5)} \n"
|
||||
"F\x00{'descr': '<i8', 'fortran_order': True, 'shape': (5, 3)} \n"
|
||||
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (3, 3)} \n"
|
||||
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (0,)} \n"
|
||||
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': ()} \n"
|
||||
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (15,)} \n"
|
||||
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 5)} \n"
|
||||
"F\x00{'descr': '>i8', 'fortran_order': True, 'shape': (5, 3)} \n"
|
||||
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 3)} \n"
|
||||
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (0,)} \n"
|
||||
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': ()} \n"
|
||||
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (15,)} \n"
|
||||
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (3, 5)} \n"
|
||||
"F\x00{'descr': '<f4', 'fortran_order': True, 'shape': (5, 3)} \n"
|
||||
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (3, 3)} \n"
|
||||
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (0,)} \n"
|
||||
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': ()} \n"
|
||||
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (15,)} \n"
|
||||
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 5)} \n"
|
||||
"F\x00{'descr': '>f4', 'fortran_order': True, 'shape': (5, 3)} \n"
|
||||
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 3)} \n"
|
||||
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (0,)} \n"
|
||||
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': ()} \n"
|
||||
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (15,)} \n"
|
||||
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (3, 5)} \n"
|
||||
"F\x00{'descr': '<f8', 'fortran_order': True, 'shape': (5, 3)} \n"
|
||||
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (3, 3)} \n"
|
||||
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (0,)} \n"
|
||||
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': ()} \n"
|
||||
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (15,)} \n"
|
||||
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 5)} \n"
|
||||
"F\x00{'descr': '>f8', 'fortran_order': True, 'shape': (5, 3)} \n"
|
||||
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 3)} \n"
|
||||
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (0,)} \n"
|
||||
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': ()} \n"
|
||||
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (15,)} \n"
|
||||
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (3, 5)} \n"
|
||||
"F\x00{'descr': '<c8', 'fortran_order': True, 'shape': (5, 3)} \n"
|
||||
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (3, 3)} \n"
|
||||
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (0,)} \n"
|
||||
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': ()} \n"
|
||||
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (15,)} \n"
|
||||
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 5)} \n"
|
||||
"F\x00{'descr': '>c8', 'fortran_order': True, 'shape': (5, 3)} \n"
|
||||
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 3)} \n"
|
||||
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (0,)} \n"
|
||||
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': ()} \n"
|
||||
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (15,)} \n"
|
||||
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (3, 5)} \n"
|
||||
"F\x00{'descr': '<c16', 'fortran_order': True, 'shape': (5, 3)} \n"
|
||||
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (3, 3)} \n"
|
||||
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (0,)} \n"
|
||||
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': ()} \n"
|
||||
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (15,)} \n"
|
||||
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 5)} \n"
|
||||
"F\x00{'descr': '>c16', 'fortran_order': True, 'shape': (5, 3)} \n"
|
||||
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 3)} \n"
|
||||
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n"
|
||||
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n"
|
||||
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n"
|
||||
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n"
|
||||
"F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n"
|
||||
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n"
|
||||
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n"
|
||||
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n"
|
||||
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n"
|
||||
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n"
|
||||
"F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n"
|
||||
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n"
|
||||
"v\x00{'descr': [('x', '<i4', (2,)), ('y', '<f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
|
||||
"\x16\x02{'descr': [('x', '<i4', (2,)),\n ('Info',\n [('value', '<c16'),\n ('y2', '<f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '<c16', (2,)),\n ('y3', '<f8', (2,)),\n ('z3', '<u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '<U8'), ('Value', '<c16')]),\n ('y', '<f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
|
||||
"v\x00{'descr': [('x', '>i4', (2,)), ('y', '>f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
|
||||
"\x16\x02{'descr': [('x', '>i4', (2,)),\n ('Info',\n [('value', '>c16'),\n ('y2', '>f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '>c16', (2,)),\n ('y3', '>f8', (2,)),\n ('z3', '>u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '>U8'), ('Value', '>c16')]),\n ('y', '>f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
|
||||
'''
|
||||
import sys
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
import warnings
|
||||
import pytest
|
||||
from io import BytesIO
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import (
|
||||
assert_, assert_array_equal, assert_raises, assert_raises_regex,
|
||||
assert_warns
|
||||
)
|
||||
from numpy.lib import format
|
||||
|
||||
|
||||
tempdir = None
|
||||
|
||||
# Module-level setup.
|
||||
|
||||
|
||||
def setup_module():
|
||||
global tempdir
|
||||
tempdir = tempfile.mkdtemp()
|
||||
|
||||
|
||||
def teardown_module():
|
||||
global tempdir
|
||||
if tempdir is not None and os.path.isdir(tempdir):
|
||||
shutil.rmtree(tempdir)
|
||||
tempdir = None
|
||||
|
||||
|
||||
# Generate some basic arrays to test with.
|
||||
scalars = [
|
||||
np.uint8,
|
||||
np.int8,
|
||||
np.uint16,
|
||||
np.int16,
|
||||
np.uint32,
|
||||
np.int32,
|
||||
np.uint64,
|
||||
np.int64,
|
||||
np.float32,
|
||||
np.float64,
|
||||
np.complex64,
|
||||
np.complex128,
|
||||
object,
|
||||
]
|
||||
basic_arrays = []
|
||||
for scalar in scalars:
|
||||
for endian in '<>':
|
||||
dtype = np.dtype(scalar).newbyteorder(endian)
|
||||
basic = np.arange(1500).astype(dtype)
|
||||
basic_arrays.extend([
|
||||
# Empty
|
||||
np.array([], dtype=dtype),
|
||||
# Rank-0
|
||||
np.array(10, dtype=dtype),
|
||||
# 1-D
|
||||
basic,
|
||||
# 2-D C-contiguous
|
||||
basic.reshape((30, 50)),
|
||||
# 2-D F-contiguous
|
||||
basic.reshape((30, 50)).T,
|
||||
# 2-D non-contiguous
|
||||
basic.reshape((30, 50))[::-1, ::2],
|
||||
])
|
||||
|
||||
# More complicated record arrays.
|
||||
# This is the structure of the table used for plain objects:
|
||||
#
|
||||
# +-+-+-+
|
||||
# |x|y|z|
|
||||
# +-+-+-+
|
||||
|
||||
# Structure of a plain array description:
|
||||
Pdescr = [
|
||||
('x', 'i4', (2,)),
|
||||
('y', 'f8', (2, 2)),
|
||||
('z', 'u1')]
|
||||
|
||||
# A plain list of tuples with values for testing:
|
||||
PbufferT = [
|
||||
# x y z
|
||||
([3, 2], [[6., 4.], [6., 4.]], 8),
|
||||
([4, 3], [[7., 5.], [7., 5.]], 9),
|
||||
]
|
||||
|
||||
|
||||
# This is the structure of the table used for nested objects (DON'T PANIC!):
|
||||
#
|
||||
# +-+---------------------------------+-----+----------+-+-+
|
||||
# |x|Info |color|info |y|z|
|
||||
# | +-----+--+----------------+----+--+ +----+-----+ | |
|
||||
# | |value|y2|Info2 |name|z2| |Name|Value| | |
|
||||
# | | | +----+-----+--+--+ | | | | | | |
|
||||
# | | | |name|value|y3|z3| | | | | | | |
|
||||
# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+
|
||||
#
|
||||
|
||||
# The corresponding nested array description:
|
||||
Ndescr = [
|
||||
('x', 'i4', (2,)),
|
||||
('Info', [
|
||||
('value', 'c16'),
|
||||
('y2', 'f8'),
|
||||
('Info2', [
|
||||
('name', 'S2'),
|
||||
('value', 'c16', (2,)),
|
||||
('y3', 'f8', (2,)),
|
||||
('z3', 'u4', (2,))]),
|
||||
('name', 'S2'),
|
||||
('z2', 'b1')]),
|
||||
('color', 'S2'),
|
||||
('info', [
|
||||
('Name', 'U8'),
|
||||
('Value', 'c16')]),
|
||||
('y', 'f8', (2, 2)),
|
||||
('z', 'u1')]
|
||||
|
||||
NbufferT = [
|
||||
# x Info color info y z
|
||||
# value y2 Info2 name z2 Name Value
|
||||
# name value y3 z3
|
||||
([3, 2], (6j, 6., ('nn', [6j, 4j], [6., 4.], [1, 2]), 'NN', True),
|
||||
'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8),
|
||||
([4, 3], (7j, 7., ('oo', [7j, 5j], [7., 5.], [2, 1]), 'OO', False),
|
||||
'dd', ('OO', 7j), [[7., 5.], [7., 5.]], 9),
|
||||
]
|
||||
|
||||
record_arrays = [
|
||||
np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')),
|
||||
np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')),
|
||||
np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')),
|
||||
np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')),
|
||||
np.zeros(1, dtype=[('c', ('<f8', (5,)), (2,))])
|
||||
]
|
||||
|
||||
|
||||
#BytesIO that reads a random number of bytes at a time
|
||||
class BytesIOSRandomSize(BytesIO):
|
||||
def read(self, size=None):
|
||||
import random
|
||||
size = random.randint(1, size)
|
||||
return super(BytesIOSRandomSize, self).read(size)
|
||||
|
||||
|
||||
def roundtrip(arr):
|
||||
f = BytesIO()
|
||||
format.write_array(f, arr)
|
||||
f2 = BytesIO(f.getvalue())
|
||||
arr2 = format.read_array(f2, allow_pickle=True)
|
||||
return arr2
|
||||
|
||||
|
||||
def roundtrip_randsize(arr):
|
||||
f = BytesIO()
|
||||
format.write_array(f, arr)
|
||||
f2 = BytesIOSRandomSize(f.getvalue())
|
||||
arr2 = format.read_array(f2)
|
||||
return arr2
|
||||
|
||||
|
||||
def roundtrip_truncated(arr):
|
||||
f = BytesIO()
|
||||
format.write_array(f, arr)
|
||||
#BytesIO is one byte short
|
||||
f2 = BytesIO(f.getvalue()[0:-1])
|
||||
arr2 = format.read_array(f2)
|
||||
return arr2
|
||||
|
||||
|
||||
def assert_equal_(o1, o2):
|
||||
assert_(o1 == o2)
|
||||
|
||||
|
||||
def test_roundtrip():
|
||||
for arr in basic_arrays + record_arrays:
|
||||
arr2 = roundtrip(arr)
|
||||
assert_array_equal(arr, arr2)
|
||||
|
||||
|
||||
def test_roundtrip_randsize():
|
||||
for arr in basic_arrays + record_arrays:
|
||||
if arr.dtype != object:
|
||||
arr2 = roundtrip_randsize(arr)
|
||||
assert_array_equal(arr, arr2)
|
||||
|
||||
|
||||
def test_roundtrip_truncated():
|
||||
for arr in basic_arrays:
|
||||
if arr.dtype != object:
|
||||
assert_raises(ValueError, roundtrip_truncated, arr)
|
||||
|
||||
|
||||
def test_long_str():
|
||||
# check items larger than internal buffer size, gh-4027
|
||||
long_str_arr = np.ones(1, dtype=np.dtype((str, format.BUFFER_SIZE + 1)))
|
||||
long_str_arr2 = roundtrip(long_str_arr)
|
||||
assert_array_equal(long_str_arr, long_str_arr2)
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
def test_memmap_roundtrip():
|
||||
# Fixme: used to crash on windows
|
||||
if not (sys.platform == 'win32' or sys.platform == 'cygwin'):
|
||||
for arr in basic_arrays + record_arrays:
|
||||
if arr.dtype.hasobject:
|
||||
# Skip these since they can't be mmap'ed.
|
||||
continue
|
||||
# Write it out normally and through mmap.
|
||||
nfn = os.path.join(tempdir, 'normal.npy')
|
||||
mfn = os.path.join(tempdir, 'memmap.npy')
|
||||
fp = open(nfn, 'wb')
|
||||
try:
|
||||
format.write_array(fp, arr)
|
||||
finally:
|
||||
fp.close()
|
||||
|
||||
fortran_order = (
|
||||
arr.flags.f_contiguous and not arr.flags.c_contiguous)
|
||||
ma = format.open_memmap(mfn, mode='w+', dtype=arr.dtype,
|
||||
shape=arr.shape, fortran_order=fortran_order)
|
||||
ma[...] = arr
|
||||
del ma
|
||||
|
||||
# Check that both of these files' contents are the same.
|
||||
fp = open(nfn, 'rb')
|
||||
normal_bytes = fp.read()
|
||||
fp.close()
|
||||
fp = open(mfn, 'rb')
|
||||
memmap_bytes = fp.read()
|
||||
fp.close()
|
||||
assert_equal_(normal_bytes, memmap_bytes)
|
||||
|
||||
# Check that reading the file using memmap works.
|
||||
ma = format.open_memmap(nfn, mode='r')
|
||||
del ma
|
||||
|
||||
|
||||
def test_compressed_roundtrip():
|
||||
arr = np.random.rand(200, 200)
|
||||
npz_file = os.path.join(tempdir, 'compressed.npz')
|
||||
np.savez_compressed(npz_file, arr=arr)
|
||||
arr1 = np.load(npz_file)['arr']
|
||||
assert_array_equal(arr, arr1)
|
||||
|
||||
|
||||
# aligned
|
||||
dt1 = np.dtype('i1, i4, i1', align=True)
|
||||
# non-aligned, explicit offsets
|
||||
dt2 = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'i4'],
|
||||
'offsets': [1, 6]})
|
||||
# nested struct-in-struct
|
||||
dt3 = np.dtype({'names': ['c', 'd'], 'formats': ['i4', dt2]})
|
||||
# field with '' name
|
||||
dt4 = np.dtype({'names': ['a', '', 'b'], 'formats': ['i4']*3})
|
||||
# titles
|
||||
dt5 = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'i4'],
|
||||
'offsets': [1, 6], 'titles': ['aa', 'bb']})
|
||||
# empty
|
||||
dt6 = np.dtype({'names': [], 'formats': [], 'itemsize': 8})
|
||||
|
||||
@pytest.mark.parametrize("dt", [dt1, dt2, dt3, dt4, dt5, dt6])
|
||||
def test_load_padded_dtype(dt):
|
||||
arr = np.zeros(3, dt)
|
||||
for i in range(3):
|
||||
arr[i] = i + 5
|
||||
npz_file = os.path.join(tempdir, 'aligned.npz')
|
||||
np.savez(npz_file, arr=arr)
|
||||
arr1 = np.load(npz_file)['arr']
|
||||
assert_array_equal(arr, arr1)
|
||||
|
||||
|
||||
def test_python2_python3_interoperability():
|
||||
fname = 'win64python2.npy'
|
||||
path = os.path.join(os.path.dirname(__file__), 'data', fname)
|
||||
data = np.load(path)
|
||||
assert_array_equal(data, np.ones(2))
|
||||
|
||||
def test_pickle_python2_python3():
|
||||
# Test that loading object arrays saved on Python 2 works both on
|
||||
# Python 2 and Python 3 and vice versa
|
||||
data_dir = os.path.join(os.path.dirname(__file__), 'data')
|
||||
|
||||
expected = np.array([None, range, u'\u512a\u826f',
|
||||
b'\xe4\xb8\x8d\xe8\x89\xaf'],
|
||||
dtype=object)
|
||||
|
||||
for fname in ['py2-objarr.npy', 'py2-objarr.npz',
|
||||
'py3-objarr.npy', 'py3-objarr.npz']:
|
||||
path = os.path.join(data_dir, fname)
|
||||
|
||||
for encoding in ['bytes', 'latin1']:
|
||||
data_f = np.load(path, allow_pickle=True, encoding=encoding)
|
||||
if fname.endswith('.npz'):
|
||||
data = data_f['x']
|
||||
data_f.close()
|
||||
else:
|
||||
data = data_f
|
||||
|
||||
if encoding == 'latin1' and fname.startswith('py2'):
|
||||
assert_(isinstance(data[3], str))
|
||||
assert_array_equal(data[:-1], expected[:-1])
|
||||
# mojibake occurs
|
||||
assert_array_equal(data[-1].encode(encoding), expected[-1])
|
||||
else:
|
||||
assert_(isinstance(data[3], bytes))
|
||||
assert_array_equal(data, expected)
|
||||
|
||||
if fname.startswith('py2'):
|
||||
if fname.endswith('.npz'):
|
||||
data = np.load(path, allow_pickle=True)
|
||||
assert_raises(UnicodeError, data.__getitem__, 'x')
|
||||
data.close()
|
||||
data = np.load(path, allow_pickle=True, fix_imports=False,
|
||||
encoding='latin1')
|
||||
assert_raises(ImportError, data.__getitem__, 'x')
|
||||
data.close()
|
||||
else:
|
||||
assert_raises(UnicodeError, np.load, path,
|
||||
allow_pickle=True)
|
||||
assert_raises(ImportError, np.load, path,
|
||||
allow_pickle=True, fix_imports=False,
|
||||
encoding='latin1')
|
||||
|
||||
|
||||
def test_pickle_disallow():
|
||||
data_dir = os.path.join(os.path.dirname(__file__), 'data')
|
||||
|
||||
path = os.path.join(data_dir, 'py2-objarr.npy')
|
||||
assert_raises(ValueError, np.load, path,
|
||||
allow_pickle=False, encoding='latin1')
|
||||
|
||||
path = os.path.join(data_dir, 'py2-objarr.npz')
|
||||
f = np.load(path, allow_pickle=False, encoding='latin1')
|
||||
assert_raises(ValueError, f.__getitem__, 'x')
|
||||
|
||||
path = os.path.join(tempdir, 'pickle-disabled.npy')
|
||||
assert_raises(ValueError, np.save, path, np.array([None], dtype=object),
|
||||
allow_pickle=False)
|
||||
|
||||
@pytest.mark.parametrize('dt', [
|
||||
np.dtype(np.dtype([('a', np.int8),
|
||||
('b', np.int16),
|
||||
('c', np.int32),
|
||||
], align=True),
|
||||
(3,)),
|
||||
np.dtype([('x', np.dtype({'names':['a','b'],
|
||||
'formats':['i1','i1'],
|
||||
'offsets':[0,4],
|
||||
'itemsize':8,
|
||||
},
|
||||
(3,)),
|
||||
(4,),
|
||||
)]),
|
||||
np.dtype([('x',
|
||||
('<f8', (5,)),
|
||||
(2,),
|
||||
)]),
|
||||
np.dtype([('x', np.dtype((
|
||||
np.dtype((
|
||||
np.dtype({'names':['a','b'],
|
||||
'formats':['i1','i1'],
|
||||
'offsets':[0,4],
|
||||
'itemsize':8}),
|
||||
(3,)
|
||||
)),
|
||||
(4,)
|
||||
)))
|
||||
]),
|
||||
np.dtype([
|
||||
('a', np.dtype((
|
||||
np.dtype((
|
||||
np.dtype((
|
||||
np.dtype([
|
||||
('a', int),
|
||||
('b', np.dtype({'names':['a','b'],
|
||||
'formats':['i1','i1'],
|
||||
'offsets':[0,4],
|
||||
'itemsize':8})),
|
||||
]),
|
||||
(3,),
|
||||
)),
|
||||
(4,),
|
||||
)),
|
||||
(5,),
|
||||
)))
|
||||
]),
|
||||
])
|
||||
|
||||
def test_descr_to_dtype(dt):
|
||||
dt1 = format.descr_to_dtype(dt.descr)
|
||||
assert_equal_(dt1, dt)
|
||||
arr1 = np.zeros(3, dt)
|
||||
arr2 = roundtrip(arr1)
|
||||
assert_array_equal(arr1, arr2)
|
||||
|
||||
def test_version_2_0():
|
||||
f = BytesIO()
|
||||
# requires more than 2 byte for header
|
||||
dt = [(("%d" % i) * 100, float) for i in range(500)]
|
||||
d = np.ones(1000, dtype=dt)
|
||||
|
||||
format.write_array(f, d, version=(2, 0))
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
warnings.filterwarnings('always', '', UserWarning)
|
||||
format.write_array(f, d)
|
||||
assert_(w[0].category is UserWarning)
|
||||
|
||||
# check alignment of data portion
|
||||
f.seek(0)
|
||||
header = f.readline()
|
||||
assert_(len(header) % format.ARRAY_ALIGN == 0)
|
||||
|
||||
f.seek(0)
|
||||
n = format.read_array(f)
|
||||
assert_array_equal(d, n)
|
||||
|
||||
# 1.0 requested but data cannot be saved this way
|
||||
assert_raises(ValueError, format.write_array, f, d, (1, 0))
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
def test_version_2_0_memmap():
|
||||
# requires more than 2 byte for header
|
||||
dt = [(("%d" % i) * 100, float) for i in range(500)]
|
||||
d = np.ones(1000, dtype=dt)
|
||||
tf = tempfile.mktemp('', 'mmap', dir=tempdir)
|
||||
|
||||
# 1.0 requested but data cannot be saved this way
|
||||
assert_raises(ValueError, format.open_memmap, tf, mode='w+', dtype=d.dtype,
|
||||
shape=d.shape, version=(1, 0))
|
||||
|
||||
ma = format.open_memmap(tf, mode='w+', dtype=d.dtype,
|
||||
shape=d.shape, version=(2, 0))
|
||||
ma[...] = d
|
||||
del ma
|
||||
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
warnings.filterwarnings('always', '', UserWarning)
|
||||
ma = format.open_memmap(tf, mode='w+', dtype=d.dtype,
|
||||
shape=d.shape, version=None)
|
||||
assert_(w[0].category is UserWarning)
|
||||
ma[...] = d
|
||||
del ma
|
||||
|
||||
ma = format.open_memmap(tf, mode='r')
|
||||
assert_array_equal(ma, d)
|
||||
|
||||
|
||||
def test_write_version():
|
||||
f = BytesIO()
|
||||
arr = np.arange(1)
|
||||
# These should pass.
|
||||
format.write_array(f, arr, version=(1, 0))
|
||||
format.write_array(f, arr)
|
||||
|
||||
format.write_array(f, arr, version=None)
|
||||
format.write_array(f, arr)
|
||||
|
||||
format.write_array(f, arr, version=(2, 0))
|
||||
format.write_array(f, arr)
|
||||
|
||||
# These should all fail.
|
||||
bad_versions = [
|
||||
(1, 1),
|
||||
(0, 0),
|
||||
(0, 1),
|
||||
(2, 2),
|
||||
(255, 255),
|
||||
]
|
||||
for version in bad_versions:
|
||||
with assert_raises_regex(ValueError,
|
||||
'we only support format version.*'):
|
||||
format.write_array(f, arr, version=version)
|
||||
|
||||
|
||||
bad_version_magic = [
|
||||
b'\x93NUMPY\x01\x01',
|
||||
b'\x93NUMPY\x00\x00',
|
||||
b'\x93NUMPY\x00\x01',
|
||||
b'\x93NUMPY\x02\x00',
|
||||
b'\x93NUMPY\x02\x02',
|
||||
b'\x93NUMPY\xff\xff',
|
||||
]
|
||||
malformed_magic = [
|
||||
b'\x92NUMPY\x01\x00',
|
||||
b'\x00NUMPY\x01\x00',
|
||||
b'\x93numpy\x01\x00',
|
||||
b'\x93MATLB\x01\x00',
|
||||
b'\x93NUMPY\x01',
|
||||
b'\x93NUMPY',
|
||||
b'',
|
||||
]
|
||||
|
||||
def test_read_magic():
|
||||
s1 = BytesIO()
|
||||
s2 = BytesIO()
|
||||
|
||||
arr = np.ones((3, 6), dtype=float)
|
||||
|
||||
format.write_array(s1, arr, version=(1, 0))
|
||||
format.write_array(s2, arr, version=(2, 0))
|
||||
|
||||
s1.seek(0)
|
||||
s2.seek(0)
|
||||
|
||||
version1 = format.read_magic(s1)
|
||||
version2 = format.read_magic(s2)
|
||||
|
||||
assert_(version1 == (1, 0))
|
||||
assert_(version2 == (2, 0))
|
||||
|
||||
assert_(s1.tell() == format.MAGIC_LEN)
|
||||
assert_(s2.tell() == format.MAGIC_LEN)
|
||||
|
||||
def test_read_magic_bad_magic():
|
||||
for magic in malformed_magic:
|
||||
f = BytesIO(magic)
|
||||
assert_raises(ValueError, format.read_array, f)
|
||||
|
||||
|
||||
def test_read_version_1_0_bad_magic():
|
||||
for magic in bad_version_magic + malformed_magic:
|
||||
f = BytesIO(magic)
|
||||
assert_raises(ValueError, format.read_array, f)
|
||||
|
||||
|
||||
def test_bad_magic_args():
|
||||
assert_raises(ValueError, format.magic, -1, 1)
|
||||
assert_raises(ValueError, format.magic, 256, 1)
|
||||
assert_raises(ValueError, format.magic, 1, -1)
|
||||
assert_raises(ValueError, format.magic, 1, 256)
|
||||
|
||||
|
||||
def test_large_header():
|
||||
s = BytesIO()
|
||||
d = {'a': 1, 'b': 2}
|
||||
format.write_array_header_1_0(s, d)
|
||||
|
||||
s = BytesIO()
|
||||
d = {'a': 1, 'b': 2, 'c': 'x'*256*256}
|
||||
assert_raises(ValueError, format.write_array_header_1_0, s, d)
|
||||
|
||||
|
||||
def test_read_array_header_1_0():
|
||||
s = BytesIO()
|
||||
|
||||
arr = np.ones((3, 6), dtype=float)
|
||||
format.write_array(s, arr, version=(1, 0))
|
||||
|
||||
s.seek(format.MAGIC_LEN)
|
||||
shape, fortran, dtype = format.read_array_header_1_0(s)
|
||||
|
||||
assert_(s.tell() % format.ARRAY_ALIGN == 0)
|
||||
assert_((shape, fortran, dtype) == ((3, 6), False, float))
|
||||
|
||||
|
||||
def test_read_array_header_2_0():
|
||||
s = BytesIO()
|
||||
|
||||
arr = np.ones((3, 6), dtype=float)
|
||||
format.write_array(s, arr, version=(2, 0))
|
||||
|
||||
s.seek(format.MAGIC_LEN)
|
||||
shape, fortran, dtype = format.read_array_header_2_0(s)
|
||||
|
||||
assert_(s.tell() % format.ARRAY_ALIGN == 0)
|
||||
assert_((shape, fortran, dtype) == ((3, 6), False, float))
|
||||
|
||||
|
||||
def test_bad_header():
|
||||
# header of length less than 2 should fail
|
||||
s = BytesIO()
|
||||
assert_raises(ValueError, format.read_array_header_1_0, s)
|
||||
s = BytesIO(b'1')
|
||||
assert_raises(ValueError, format.read_array_header_1_0, s)
|
||||
|
||||
# header shorter than indicated size should fail
|
||||
s = BytesIO(b'\x01\x00')
|
||||
assert_raises(ValueError, format.read_array_header_1_0, s)
|
||||
|
||||
# headers without the exact keys required should fail
|
||||
d = {"shape": (1, 2),
|
||||
"descr": "x"}
|
||||
s = BytesIO()
|
||||
format.write_array_header_1_0(s, d)
|
||||
assert_raises(ValueError, format.read_array_header_1_0, s)
|
||||
|
||||
d = {"shape": (1, 2),
|
||||
"fortran_order": False,
|
||||
"descr": "x",
|
||||
"extrakey": -1}
|
||||
s = BytesIO()
|
||||
format.write_array_header_1_0(s, d)
|
||||
assert_raises(ValueError, format.read_array_header_1_0, s)
|
||||
|
||||
|
||||
def test_large_file_support():
|
||||
if (sys.platform == 'win32' or sys.platform == 'cygwin'):
|
||||
pytest.skip("Unknown if Windows has sparse filesystems")
|
||||
# try creating a large sparse file
|
||||
tf_name = os.path.join(tempdir, 'sparse_file')
|
||||
try:
|
||||
# seek past end would work too, but linux truncate somewhat
|
||||
# increases the chances that we have a sparse filesystem and can
|
||||
# avoid actually writing 5GB
|
||||
import subprocess as sp
|
||||
sp.check_call(["truncate", "-s", "5368709120", tf_name])
|
||||
except Exception:
|
||||
pytest.skip("Could not create 5GB large file")
|
||||
# write a small array to the end
|
||||
with open(tf_name, "wb") as f:
|
||||
f.seek(5368709120)
|
||||
d = np.arange(5)
|
||||
np.save(f, d)
|
||||
# read it back
|
||||
with open(tf_name, "rb") as f:
|
||||
f.seek(5368709120)
|
||||
r = np.load(f)
|
||||
assert_array_equal(r, d)
|
||||
|
||||
|
||||
@pytest.mark.skipif(np.dtype(np.intp).itemsize < 8,
|
||||
reason="test requires 64-bit system")
|
||||
@pytest.mark.slow
|
||||
def test_large_archive():
|
||||
# Regression test for product of saving arrays with dimensions of array
|
||||
# having a product that doesn't fit in int32. See gh-7598 for details.
|
||||
try:
|
||||
a = np.empty((2**30, 2), dtype=np.uint8)
|
||||
except MemoryError:
|
||||
pytest.skip("Could not create large file")
|
||||
|
||||
fname = os.path.join(tempdir, "large_archive")
|
||||
|
||||
with open(fname, "wb") as f:
|
||||
np.savez(f, arr=a)
|
||||
|
||||
with open(fname, "rb") as f:
|
||||
new_a = np.load(f)["arr"]
|
||||
|
||||
assert_(a.shape == new_a.shape)
|
||||
|
||||
|
||||
def test_empty_npz():
|
||||
# Test for gh-9989
|
||||
fname = os.path.join(tempdir, "nothing.npz")
|
||||
np.savez(fname)
|
||||
np.load(fname)
|
||||
|
||||
|
||||
def test_unicode_field_names():
|
||||
# gh-7391
|
||||
arr = np.array([
|
||||
(1, 3),
|
||||
(1, 2),
|
||||
(1, 3),
|
||||
(1, 2)
|
||||
], dtype=[
|
||||
('int', int),
|
||||
(u'\N{CJK UNIFIED IDEOGRAPH-6574}\N{CJK UNIFIED IDEOGRAPH-5F62}', int)
|
||||
])
|
||||
fname = os.path.join(tempdir, "unicode.npy")
|
||||
with open(fname, 'wb') as f:
|
||||
format.write_array(f, arr, version=(3, 0))
|
||||
with open(fname, 'rb') as f:
|
||||
arr2 = format.read_array(f)
|
||||
assert_array_equal(arr, arr2)
|
||||
|
||||
# notifies the user that 3.0 is selected
|
||||
with open(fname, 'wb') as f:
|
||||
with assert_warns(UserWarning):
|
||||
format.write_array(f, arr, version=None)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('dt, fail', [
|
||||
(np.dtype({'names': ['a', 'b'], 'formats': [float, np.dtype('S3',
|
||||
metadata={'some': 'stuff'})]}), True),
|
||||
(np.dtype(int, metadata={'some': 'stuff'}), False),
|
||||
(np.dtype([('subarray', (int, (2,)))], metadata={'some': 'stuff'}), False),
|
||||
# recursive: metadata on the field of a dtype
|
||||
(np.dtype({'names': ['a', 'b'], 'formats': [
|
||||
float, np.dtype({'names': ['c'], 'formats': [np.dtype(int, metadata={})]})
|
||||
]}), False)
|
||||
])
|
||||
def test_metadata_dtype(dt, fail):
|
||||
# gh-14142
|
||||
arr = np.ones(10, dtype=dt)
|
||||
buf = BytesIO()
|
||||
with assert_warns(UserWarning):
|
||||
np.save(buf, arr)
|
||||
buf.seek(0)
|
||||
if fail:
|
||||
with assert_raises(ValueError):
|
||||
np.load(buf)
|
||||
else:
|
||||
arr2 = np.load(buf)
|
||||
# BUG: assert_array_equal does not check metadata
|
||||
from numpy.lib.format import _has_metadata
|
||||
assert_array_equal(arr, arr2)
|
||||
assert _has_metadata(arr.dtype)
|
||||
assert not _has_metadata(arr2.dtype)
|
||||
|
||||
3392
venv/Lib/site-packages/numpy/lib/tests/test_function_base.py
Normal file
3392
venv/Lib/site-packages/numpy/lib/tests/test_function_base.py
Normal file
File diff suppressed because it is too large
Load diff
838
venv/Lib/site-packages/numpy/lib/tests/test_histograms.py
Normal file
838
venv/Lib/site-packages/numpy/lib/tests/test_histograms.py
Normal file
|
|
@ -0,0 +1,838 @@
|
|||
import numpy as np
|
||||
|
||||
from numpy.lib.histograms import histogram, histogramdd, histogram_bin_edges
|
||||
from numpy.testing import (
|
||||
assert_, assert_equal, assert_array_equal, assert_almost_equal,
|
||||
assert_array_almost_equal, assert_raises, assert_allclose,
|
||||
assert_array_max_ulp, assert_raises_regex, suppress_warnings,
|
||||
)
|
||||
import pytest
|
||||
|
||||
|
||||
class TestHistogram:
|
||||
|
||||
def setup(self):
|
||||
pass
|
||||
|
||||
def teardown(self):
|
||||
pass
|
||||
|
||||
def test_simple(self):
|
||||
n = 100
|
||||
v = np.random.rand(n)
|
||||
(a, b) = histogram(v)
|
||||
# check if the sum of the bins equals the number of samples
|
||||
assert_equal(np.sum(a, axis=0), n)
|
||||
# check that the bin counts are evenly spaced when the data is from
|
||||
# a linear function
|
||||
(a, b) = histogram(np.linspace(0, 10, 100))
|
||||
assert_array_equal(a, 10)
|
||||
|
||||
def test_one_bin(self):
|
||||
# Ticket 632
|
||||
hist, edges = histogram([1, 2, 3, 4], [1, 2])
|
||||
assert_array_equal(hist, [2, ])
|
||||
assert_array_equal(edges, [1, 2])
|
||||
assert_raises(ValueError, histogram, [1, 2], bins=0)
|
||||
h, e = histogram([1, 2], bins=1)
|
||||
assert_equal(h, np.array([2]))
|
||||
assert_allclose(e, np.array([1., 2.]))
|
||||
|
||||
def test_normed(self):
|
||||
sup = suppress_warnings()
|
||||
with sup:
|
||||
rec = sup.record(np.VisibleDeprecationWarning, '.*normed.*')
|
||||
# Check that the integral of the density equals 1.
|
||||
n = 100
|
||||
v = np.random.rand(n)
|
||||
a, b = histogram(v, normed=True)
|
||||
area = np.sum(a * np.diff(b))
|
||||
assert_almost_equal(area, 1)
|
||||
assert_equal(len(rec), 1)
|
||||
|
||||
sup = suppress_warnings()
|
||||
with sup:
|
||||
rec = sup.record(np.VisibleDeprecationWarning, '.*normed.*')
|
||||
# Check with non-constant bin widths (buggy but backwards
|
||||
# compatible)
|
||||
v = np.arange(10)
|
||||
bins = [0, 1, 5, 9, 10]
|
||||
a, b = histogram(v, bins, normed=True)
|
||||
area = np.sum(a * np.diff(b))
|
||||
assert_almost_equal(area, 1)
|
||||
assert_equal(len(rec), 1)
|
||||
|
||||
def test_density(self):
|
||||
# Check that the integral of the density equals 1.
|
||||
n = 100
|
||||
v = np.random.rand(n)
|
||||
a, b = histogram(v, density=True)
|
||||
area = np.sum(a * np.diff(b))
|
||||
assert_almost_equal(area, 1)
|
||||
|
||||
# Check with non-constant bin widths
|
||||
v = np.arange(10)
|
||||
bins = [0, 1, 3, 6, 10]
|
||||
a, b = histogram(v, bins, density=True)
|
||||
assert_array_equal(a, .1)
|
||||
assert_equal(np.sum(a * np.diff(b)), 1)
|
||||
|
||||
# Test that passing False works too
|
||||
a, b = histogram(v, bins, density=False)
|
||||
assert_array_equal(a, [1, 2, 3, 4])
|
||||
|
||||
# Variable bin widths are especially useful to deal with
|
||||
# infinities.
|
||||
v = np.arange(10)
|
||||
bins = [0, 1, 3, 6, np.inf]
|
||||
a, b = histogram(v, bins, density=True)
|
||||
assert_array_equal(a, [.1, .1, .1, 0.])
|
||||
|
||||
# Taken from a bug report from N. Becker on the numpy-discussion
|
||||
# mailing list Aug. 6, 2010.
|
||||
counts, dmy = np.histogram(
|
||||
[1, 2, 3, 4], [0.5, 1.5, np.inf], density=True)
|
||||
assert_equal(counts, [.25, 0])
|
||||
|
||||
def test_outliers(self):
|
||||
# Check that outliers are not tallied
|
||||
a = np.arange(10) + .5
|
||||
|
||||
# Lower outliers
|
||||
h, b = histogram(a, range=[0, 9])
|
||||
assert_equal(h.sum(), 9)
|
||||
|
||||
# Upper outliers
|
||||
h, b = histogram(a, range=[1, 10])
|
||||
assert_equal(h.sum(), 9)
|
||||
|
||||
# Normalization
|
||||
h, b = histogram(a, range=[1, 9], density=True)
|
||||
assert_almost_equal((h * np.diff(b)).sum(), 1, decimal=15)
|
||||
|
||||
# Weights
|
||||
w = np.arange(10) + .5
|
||||
h, b = histogram(a, range=[1, 9], weights=w, density=True)
|
||||
assert_equal((h * np.diff(b)).sum(), 1)
|
||||
|
||||
h, b = histogram(a, bins=8, range=[1, 9], weights=w)
|
||||
assert_equal(h, w[1:-1])
|
||||
|
||||
def test_arr_weights_mismatch(self):
|
||||
a = np.arange(10) + .5
|
||||
w = np.arange(11) + .5
|
||||
with assert_raises_regex(ValueError, "same shape as"):
|
||||
h, b = histogram(a, range=[1, 9], weights=w, density=True)
|
||||
|
||||
|
||||
def test_type(self):
|
||||
# Check the type of the returned histogram
|
||||
a = np.arange(10) + .5
|
||||
h, b = histogram(a)
|
||||
assert_(np.issubdtype(h.dtype, np.integer))
|
||||
|
||||
h, b = histogram(a, density=True)
|
||||
assert_(np.issubdtype(h.dtype, np.floating))
|
||||
|
||||
h, b = histogram(a, weights=np.ones(10, int))
|
||||
assert_(np.issubdtype(h.dtype, np.integer))
|
||||
|
||||
h, b = histogram(a, weights=np.ones(10, float))
|
||||
assert_(np.issubdtype(h.dtype, np.floating))
|
||||
|
||||
def test_f32_rounding(self):
|
||||
# gh-4799, check that the rounding of the edges works with float32
|
||||
x = np.array([276.318359, -69.593948, 21.329449], dtype=np.float32)
|
||||
y = np.array([5005.689453, 4481.327637, 6010.369629], dtype=np.float32)
|
||||
counts_hist, xedges, yedges = np.histogram2d(x, y, bins=100)
|
||||
assert_equal(counts_hist.sum(), 3.)
|
||||
|
||||
def test_bool_conversion(self):
|
||||
# gh-12107
|
||||
# Reference integer histogram
|
||||
a = np.array([1, 1, 0], dtype=np.uint8)
|
||||
int_hist, int_edges = np.histogram(a)
|
||||
|
||||
# Should raise an warning on booleans
|
||||
# Ensure that the histograms are equivalent, need to suppress
|
||||
# the warnings to get the actual outputs
|
||||
with suppress_warnings() as sup:
|
||||
rec = sup.record(RuntimeWarning, 'Converting input from .*')
|
||||
hist, edges = np.histogram([True, True, False])
|
||||
# A warning should be issued
|
||||
assert_equal(len(rec), 1)
|
||||
assert_array_equal(hist, int_hist)
|
||||
assert_array_equal(edges, int_edges)
|
||||
|
||||
def test_weights(self):
|
||||
v = np.random.rand(100)
|
||||
w = np.ones(100) * 5
|
||||
a, b = histogram(v)
|
||||
na, nb = histogram(v, density=True)
|
||||
wa, wb = histogram(v, weights=w)
|
||||
nwa, nwb = histogram(v, weights=w, density=True)
|
||||
assert_array_almost_equal(a * 5, wa)
|
||||
assert_array_almost_equal(na, nwa)
|
||||
|
||||
# Check weights are properly applied.
|
||||
v = np.linspace(0, 10, 10)
|
||||
w = np.concatenate((np.zeros(5), np.ones(5)))
|
||||
wa, wb = histogram(v, bins=np.arange(11), weights=w)
|
||||
assert_array_almost_equal(wa, w)
|
||||
|
||||
# Check with integer weights
|
||||
wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1])
|
||||
assert_array_equal(wa, [4, 5, 0, 1])
|
||||
wa, wb = histogram(
|
||||
[1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1], density=True)
|
||||
assert_array_almost_equal(wa, np.array([4, 5, 0, 1]) / 10. / 3. * 4)
|
||||
|
||||
# Check weights with non-uniform bin widths
|
||||
a, b = histogram(
|
||||
np.arange(9), [0, 1, 3, 6, 10],
|
||||
weights=[2, 1, 1, 1, 1, 1, 1, 1, 1], density=True)
|
||||
assert_almost_equal(a, [.2, .1, .1, .075])
|
||||
|
||||
def test_exotic_weights(self):
|
||||
|
||||
# Test the use of weights that are not integer or floats, but e.g.
|
||||
# complex numbers or object types.
|
||||
|
||||
# Complex weights
|
||||
values = np.array([1.3, 2.5, 2.3])
|
||||
weights = np.array([1, -1, 2]) + 1j * np.array([2, 1, 2])
|
||||
|
||||
# Check with custom bins
|
||||
wa, wb = histogram(values, bins=[0, 2, 3], weights=weights)
|
||||
assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3]))
|
||||
|
||||
# Check with even bins
|
||||
wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights)
|
||||
assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3]))
|
||||
|
||||
# Decimal weights
|
||||
from decimal import Decimal
|
||||
values = np.array([1.3, 2.5, 2.3])
|
||||
weights = np.array([Decimal(1), Decimal(2), Decimal(3)])
|
||||
|
||||
# Check with custom bins
|
||||
wa, wb = histogram(values, bins=[0, 2, 3], weights=weights)
|
||||
assert_array_almost_equal(wa, [Decimal(1), Decimal(5)])
|
||||
|
||||
# Check with even bins
|
||||
wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights)
|
||||
assert_array_almost_equal(wa, [Decimal(1), Decimal(5)])
|
||||
|
||||
def test_no_side_effects(self):
|
||||
# This is a regression test that ensures that values passed to
|
||||
# ``histogram`` are unchanged.
|
||||
values = np.array([1.3, 2.5, 2.3])
|
||||
np.histogram(values, range=[-10, 10], bins=100)
|
||||
assert_array_almost_equal(values, [1.3, 2.5, 2.3])
|
||||
|
||||
def test_empty(self):
|
||||
a, b = histogram([], bins=([0, 1]))
|
||||
assert_array_equal(a, np.array([0]))
|
||||
assert_array_equal(b, np.array([0, 1]))
|
||||
|
||||
def test_error_binnum_type (self):
|
||||
# Tests if right Error is raised if bins argument is float
|
||||
vals = np.linspace(0.0, 1.0, num=100)
|
||||
histogram(vals, 5)
|
||||
assert_raises(TypeError, histogram, vals, 2.4)
|
||||
|
||||
def test_finite_range(self):
|
||||
# Normal ranges should be fine
|
||||
vals = np.linspace(0.0, 1.0, num=100)
|
||||
histogram(vals, range=[0.25,0.75])
|
||||
assert_raises(ValueError, histogram, vals, range=[np.nan,0.75])
|
||||
assert_raises(ValueError, histogram, vals, range=[0.25,np.inf])
|
||||
|
||||
def test_invalid_range(self):
|
||||
# start of range must be < end of range
|
||||
vals = np.linspace(0.0, 1.0, num=100)
|
||||
with assert_raises_regex(ValueError, "max must be larger than"):
|
||||
np.histogram(vals, range=[0.1, 0.01])
|
||||
|
||||
def test_bin_edge_cases(self):
|
||||
# Ensure that floating-point computations correctly place edge cases.
|
||||
arr = np.array([337, 404, 739, 806, 1007, 1811, 2012])
|
||||
hist, edges = np.histogram(arr, bins=8296, range=(2, 2280))
|
||||
mask = hist > 0
|
||||
left_edges = edges[:-1][mask]
|
||||
right_edges = edges[1:][mask]
|
||||
for x, left, right in zip(arr, left_edges, right_edges):
|
||||
assert_(x >= left)
|
||||
assert_(x < right)
|
||||
|
||||
def test_last_bin_inclusive_range(self):
|
||||
arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.])
|
||||
hist, edges = np.histogram(arr, bins=30, range=(-0.5, 5))
|
||||
assert_equal(hist[-1], 1)
|
||||
|
||||
def test_bin_array_dims(self):
|
||||
# gracefully handle bins object > 1 dimension
|
||||
vals = np.linspace(0.0, 1.0, num=100)
|
||||
bins = np.array([[0, 0.5], [0.6, 1.0]])
|
||||
with assert_raises_regex(ValueError, "must be 1d"):
|
||||
np.histogram(vals, bins=bins)
|
||||
|
||||
def test_unsigned_monotonicity_check(self):
|
||||
# Ensures ValueError is raised if bins not increasing monotonically
|
||||
# when bins contain unsigned values (see #9222)
|
||||
arr = np.array([2])
|
||||
bins = np.array([1, 3, 1], dtype='uint64')
|
||||
with assert_raises(ValueError):
|
||||
hist, edges = np.histogram(arr, bins=bins)
|
||||
|
||||
def test_object_array_of_0d(self):
|
||||
# gh-7864
|
||||
assert_raises(ValueError,
|
||||
histogram, [np.array(0.4) for i in range(10)] + [-np.inf])
|
||||
assert_raises(ValueError,
|
||||
histogram, [np.array(0.4) for i in range(10)] + [np.inf])
|
||||
|
||||
# these should not crash
|
||||
np.histogram([np.array(0.5) for i in range(10)] + [.500000000000001])
|
||||
np.histogram([np.array(0.5) for i in range(10)] + [.5])
|
||||
|
||||
def test_some_nan_values(self):
|
||||
# gh-7503
|
||||
one_nan = np.array([0, 1, np.nan])
|
||||
all_nan = np.array([np.nan, np.nan])
|
||||
|
||||
# the internal comparisons with NaN give warnings
|
||||
sup = suppress_warnings()
|
||||
sup.filter(RuntimeWarning)
|
||||
with sup:
|
||||
# can't infer range with nan
|
||||
assert_raises(ValueError, histogram, one_nan, bins='auto')
|
||||
assert_raises(ValueError, histogram, all_nan, bins='auto')
|
||||
|
||||
# explicit range solves the problem
|
||||
h, b = histogram(one_nan, bins='auto', range=(0, 1))
|
||||
assert_equal(h.sum(), 2) # nan is not counted
|
||||
h, b = histogram(all_nan, bins='auto', range=(0, 1))
|
||||
assert_equal(h.sum(), 0) # nan is not counted
|
||||
|
||||
# as does an explicit set of bins
|
||||
h, b = histogram(one_nan, bins=[0, 1])
|
||||
assert_equal(h.sum(), 2) # nan is not counted
|
||||
h, b = histogram(all_nan, bins=[0, 1])
|
||||
assert_equal(h.sum(), 0) # nan is not counted
|
||||
|
||||
def test_datetime(self):
|
||||
begin = np.datetime64('2000-01-01', 'D')
|
||||
offsets = np.array([0, 0, 1, 1, 2, 3, 5, 10, 20])
|
||||
bins = np.array([0, 2, 7, 20])
|
||||
dates = begin + offsets
|
||||
date_bins = begin + bins
|
||||
|
||||
td = np.dtype('timedelta64[D]')
|
||||
|
||||
# Results should be the same for integer offsets or datetime values.
|
||||
# For now, only explicit bins are supported, since linspace does not
|
||||
# work on datetimes or timedeltas
|
||||
d_count, d_edge = histogram(dates, bins=date_bins)
|
||||
t_count, t_edge = histogram(offsets.astype(td), bins=bins.astype(td))
|
||||
i_count, i_edge = histogram(offsets, bins=bins)
|
||||
|
||||
assert_equal(d_count, i_count)
|
||||
assert_equal(t_count, i_count)
|
||||
|
||||
assert_equal((d_edge - begin).astype(int), i_edge)
|
||||
assert_equal(t_edge.astype(int), i_edge)
|
||||
|
||||
assert_equal(d_edge.dtype, dates.dtype)
|
||||
assert_equal(t_edge.dtype, td)
|
||||
|
||||
def do_signed_overflow_bounds(self, dtype):
|
||||
exponent = 8 * np.dtype(dtype).itemsize - 1
|
||||
arr = np.array([-2**exponent + 4, 2**exponent - 4], dtype=dtype)
|
||||
hist, e = histogram(arr, bins=2)
|
||||
assert_equal(e, [-2**exponent + 4, 0, 2**exponent - 4])
|
||||
assert_equal(hist, [1, 1])
|
||||
|
||||
def test_signed_overflow_bounds(self):
|
||||
self.do_signed_overflow_bounds(np.byte)
|
||||
self.do_signed_overflow_bounds(np.short)
|
||||
self.do_signed_overflow_bounds(np.intc)
|
||||
self.do_signed_overflow_bounds(np.int_)
|
||||
self.do_signed_overflow_bounds(np.longlong)
|
||||
|
||||
def do_precision_lower_bound(self, float_small, float_large):
|
||||
eps = np.finfo(float_large).eps
|
||||
|
||||
arr = np.array([1.0], float_small)
|
||||
range = np.array([1.0 + eps, 2.0], float_large)
|
||||
|
||||
# test is looking for behavior when the bounds change between dtypes
|
||||
if range.astype(float_small)[0] != 1:
|
||||
return
|
||||
|
||||
# previously crashed
|
||||
count, x_loc = np.histogram(arr, bins=1, range=range)
|
||||
assert_equal(count, [1])
|
||||
|
||||
# gh-10322 means that the type comes from arr - this may change
|
||||
assert_equal(x_loc.dtype, float_small)
|
||||
|
||||
def do_precision_upper_bound(self, float_small, float_large):
|
||||
eps = np.finfo(float_large).eps
|
||||
|
||||
arr = np.array([1.0], float_small)
|
||||
range = np.array([0.0, 1.0 - eps], float_large)
|
||||
|
||||
# test is looking for behavior when the bounds change between dtypes
|
||||
if range.astype(float_small)[-1] != 1:
|
||||
return
|
||||
|
||||
# previously crashed
|
||||
count, x_loc = np.histogram(arr, bins=1, range=range)
|
||||
assert_equal(count, [1])
|
||||
|
||||
# gh-10322 means that the type comes from arr - this may change
|
||||
assert_equal(x_loc.dtype, float_small)
|
||||
|
||||
def do_precision(self, float_small, float_large):
|
||||
self.do_precision_lower_bound(float_small, float_large)
|
||||
self.do_precision_upper_bound(float_small, float_large)
|
||||
|
||||
def test_precision(self):
|
||||
# not looping results in a useful stack trace upon failure
|
||||
self.do_precision(np.half, np.single)
|
||||
self.do_precision(np.half, np.double)
|
||||
self.do_precision(np.half, np.longdouble)
|
||||
self.do_precision(np.single, np.double)
|
||||
self.do_precision(np.single, np.longdouble)
|
||||
self.do_precision(np.double, np.longdouble)
|
||||
|
||||
def test_histogram_bin_edges(self):
|
||||
hist, e = histogram([1, 2, 3, 4], [1, 2])
|
||||
edges = histogram_bin_edges([1, 2, 3, 4], [1, 2])
|
||||
assert_array_equal(edges, e)
|
||||
|
||||
arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.])
|
||||
hist, e = histogram(arr, bins=30, range=(-0.5, 5))
|
||||
edges = histogram_bin_edges(arr, bins=30, range=(-0.5, 5))
|
||||
assert_array_equal(edges, e)
|
||||
|
||||
hist, e = histogram(arr, bins='auto', range=(0, 1))
|
||||
edges = histogram_bin_edges(arr, bins='auto', range=(0, 1))
|
||||
assert_array_equal(edges, e)
|
||||
|
||||
|
||||
class TestHistogramOptimBinNums:
|
||||
"""
|
||||
Provide test coverage when using provided estimators for optimal number of
|
||||
bins
|
||||
"""
|
||||
|
||||
def test_empty(self):
|
||||
estimator_list = ['fd', 'scott', 'rice', 'sturges',
|
||||
'doane', 'sqrt', 'auto', 'stone']
|
||||
# check it can deal with empty data
|
||||
for estimator in estimator_list:
|
||||
a, b = histogram([], bins=estimator)
|
||||
assert_array_equal(a, np.array([0]))
|
||||
assert_array_equal(b, np.array([0, 1]))
|
||||
|
||||
def test_simple(self):
|
||||
"""
|
||||
Straightforward testing with a mixture of linspace data (for
|
||||
consistency). All test values have been precomputed and the values
|
||||
shouldn't change
|
||||
"""
|
||||
# Some basic sanity checking, with some fixed data.
|
||||
# Checking for the correct number of bins
|
||||
basic_test = {50: {'fd': 4, 'scott': 4, 'rice': 8, 'sturges': 7,
|
||||
'doane': 8, 'sqrt': 8, 'auto': 7, 'stone': 2},
|
||||
500: {'fd': 8, 'scott': 8, 'rice': 16, 'sturges': 10,
|
||||
'doane': 12, 'sqrt': 23, 'auto': 10, 'stone': 9},
|
||||
5000: {'fd': 17, 'scott': 17, 'rice': 35, 'sturges': 14,
|
||||
'doane': 17, 'sqrt': 71, 'auto': 17, 'stone': 20}}
|
||||
|
||||
for testlen, expectedResults in basic_test.items():
|
||||
# Create some sort of non uniform data to test with
|
||||
# (2 peak uniform mixture)
|
||||
x1 = np.linspace(-10, -1, testlen // 5 * 2)
|
||||
x2 = np.linspace(1, 10, testlen // 5 * 3)
|
||||
x = np.concatenate((x1, x2))
|
||||
for estimator, numbins in expectedResults.items():
|
||||
a, b = np.histogram(x, estimator)
|
||||
assert_equal(len(a), numbins, err_msg="For the {0} estimator "
|
||||
"with datasize of {1}".format(estimator, testlen))
|
||||
|
||||
def test_small(self):
|
||||
"""
|
||||
Smaller datasets have the potential to cause issues with the data
|
||||
adaptive methods, especially the FD method. All bin numbers have been
|
||||
precalculated.
|
||||
"""
|
||||
small_dat = {1: {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1,
|
||||
'doane': 1, 'sqrt': 1, 'stone': 1},
|
||||
2: {'fd': 2, 'scott': 1, 'rice': 3, 'sturges': 2,
|
||||
'doane': 1, 'sqrt': 2, 'stone': 1},
|
||||
3: {'fd': 2, 'scott': 2, 'rice': 3, 'sturges': 3,
|
||||
'doane': 3, 'sqrt': 2, 'stone': 1}}
|
||||
|
||||
for testlen, expectedResults in small_dat.items():
|
||||
testdat = np.arange(testlen)
|
||||
for estimator, expbins in expectedResults.items():
|
||||
a, b = np.histogram(testdat, estimator)
|
||||
assert_equal(len(a), expbins, err_msg="For the {0} estimator "
|
||||
"with datasize of {1}".format(estimator, testlen))
|
||||
|
||||
def test_incorrect_methods(self):
|
||||
"""
|
||||
Check a Value Error is thrown when an unknown string is passed in
|
||||
"""
|
||||
check_list = ['mad', 'freeman', 'histograms', 'IQR']
|
||||
for estimator in check_list:
|
||||
assert_raises(ValueError, histogram, [1, 2, 3], estimator)
|
||||
|
||||
def test_novariance(self):
|
||||
"""
|
||||
Check that methods handle no variance in data
|
||||
Primarily for Scott and FD as the SD and IQR are both 0 in this case
|
||||
"""
|
||||
novar_dataset = np.ones(100)
|
||||
novar_resultdict = {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1,
|
||||
'doane': 1, 'sqrt': 1, 'auto': 1, 'stone': 1}
|
||||
|
||||
for estimator, numbins in novar_resultdict.items():
|
||||
a, b = np.histogram(novar_dataset, estimator)
|
||||
assert_equal(len(a), numbins, err_msg="{0} estimator, "
|
||||
"No Variance test".format(estimator))
|
||||
|
||||
def test_limited_variance(self):
|
||||
"""
|
||||
Check when IQR is 0, but variance exists, we return the sturges value
|
||||
and not the fd value.
|
||||
"""
|
||||
lim_var_data = np.ones(1000)
|
||||
lim_var_data[:3] = 0
|
||||
lim_var_data[-4:] = 100
|
||||
|
||||
edges_auto = histogram_bin_edges(lim_var_data, 'auto')
|
||||
assert_equal(edges_auto, np.linspace(0, 100, 12))
|
||||
|
||||
edges_fd = histogram_bin_edges(lim_var_data, 'fd')
|
||||
assert_equal(edges_fd, np.array([0, 100]))
|
||||
|
||||
edges_sturges = histogram_bin_edges(lim_var_data, 'sturges')
|
||||
assert_equal(edges_sturges, np.linspace(0, 100, 12))
|
||||
|
||||
def test_outlier(self):
|
||||
"""
|
||||
Check the FD, Scott and Doane with outliers.
|
||||
|
||||
The FD estimates a smaller binwidth since it's less affected by
|
||||
outliers. Since the range is so (artificially) large, this means more
|
||||
bins, most of which will be empty, but the data of interest usually is
|
||||
unaffected. The Scott estimator is more affected and returns fewer bins,
|
||||
despite most of the variance being in one area of the data. The Doane
|
||||
estimator lies somewhere between the other two.
|
||||
"""
|
||||
xcenter = np.linspace(-10, 10, 50)
|
||||
outlier_dataset = np.hstack((np.linspace(-110, -100, 5), xcenter))
|
||||
|
||||
outlier_resultdict = {'fd': 21, 'scott': 5, 'doane': 11, 'stone': 6}
|
||||
|
||||
for estimator, numbins in outlier_resultdict.items():
|
||||
a, b = np.histogram(outlier_dataset, estimator)
|
||||
assert_equal(len(a), numbins)
|
||||
|
||||
def test_scott_vs_stone(self):
|
||||
"""Verify that Scott's rule and Stone's rule converges for normally distributed data"""
|
||||
|
||||
def nbins_ratio(seed, size):
|
||||
rng = np.random.RandomState(seed)
|
||||
x = rng.normal(loc=0, scale=2, size=size)
|
||||
a, b = len(np.histogram(x, 'stone')[0]), len(np.histogram(x, 'scott')[0])
|
||||
return a / (a + b)
|
||||
|
||||
ll = [[nbins_ratio(seed, size) for size in np.geomspace(start=10, stop=100, num=4).round().astype(int)]
|
||||
for seed in range(10)]
|
||||
|
||||
# the average difference between the two methods decreases as the dataset size increases.
|
||||
avg = abs(np.mean(ll, axis=0) - 0.5)
|
||||
assert_almost_equal(avg, [0.15, 0.09, 0.08, 0.03], decimal=2)
|
||||
|
||||
def test_simple_range(self):
|
||||
"""
|
||||
Straightforward testing with a mixture of linspace data (for
|
||||
consistency). Adding in a 3rd mixture that will then be
|
||||
completely ignored. All test values have been precomputed and
|
||||
the shouldn't change.
|
||||
"""
|
||||
# some basic sanity checking, with some fixed data.
|
||||
# Checking for the correct number of bins
|
||||
basic_test = {
|
||||
50: {'fd': 8, 'scott': 8, 'rice': 15,
|
||||
'sturges': 14, 'auto': 14, 'stone': 8},
|
||||
500: {'fd': 15, 'scott': 16, 'rice': 32,
|
||||
'sturges': 20, 'auto': 20, 'stone': 80},
|
||||
5000: {'fd': 33, 'scott': 33, 'rice': 69,
|
||||
'sturges': 27, 'auto': 33, 'stone': 80}
|
||||
}
|
||||
|
||||
for testlen, expectedResults in basic_test.items():
|
||||
# create some sort of non uniform data to test with
|
||||
# (3 peak uniform mixture)
|
||||
x1 = np.linspace(-10, -1, testlen // 5 * 2)
|
||||
x2 = np.linspace(1, 10, testlen // 5 * 3)
|
||||
x3 = np.linspace(-100, -50, testlen)
|
||||
x = np.hstack((x1, x2, x3))
|
||||
for estimator, numbins in expectedResults.items():
|
||||
a, b = np.histogram(x, estimator, range = (-20, 20))
|
||||
msg = "For the {0} estimator".format(estimator)
|
||||
msg += " with datasize of {0}".format(testlen)
|
||||
assert_equal(len(a), numbins, err_msg=msg)
|
||||
|
||||
@pytest.mark.parametrize("bins", ['auto', 'fd', 'doane', 'scott',
|
||||
'stone', 'rice', 'sturges'])
|
||||
def test_signed_integer_data(self, bins):
|
||||
# Regression test for gh-14379.
|
||||
a = np.array([-2, 0, 127], dtype=np.int8)
|
||||
hist, edges = np.histogram(a, bins=bins)
|
||||
hist32, edges32 = np.histogram(a.astype(np.int32), bins=bins)
|
||||
assert_array_equal(hist, hist32)
|
||||
assert_array_equal(edges, edges32)
|
||||
|
||||
def test_simple_weighted(self):
|
||||
"""
|
||||
Check that weighted data raises a TypeError
|
||||
"""
|
||||
estimator_list = ['fd', 'scott', 'rice', 'sturges', 'auto']
|
||||
for estimator in estimator_list:
|
||||
assert_raises(TypeError, histogram, [1, 2, 3],
|
||||
estimator, weights=[1, 2, 3])
|
||||
|
||||
|
||||
class TestHistogramdd:
|
||||
|
||||
def test_simple(self):
|
||||
x = np.array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5],
|
||||
[.5, .5, 1.5], [.5, 1.5, 2.5], [.5, 2.5, 2.5]])
|
||||
H, edges = histogramdd(x, (2, 3, 3),
|
||||
range=[[-1, 1], [0, 3], [0, 3]])
|
||||
answer = np.array([[[0, 1, 0], [0, 0, 1], [1, 0, 0]],
|
||||
[[0, 1, 0], [0, 0, 1], [0, 0, 1]]])
|
||||
assert_array_equal(H, answer)
|
||||
|
||||
# Check normalization
|
||||
ed = [[-2, 0, 2], [0, 1, 2, 3], [0, 1, 2, 3]]
|
||||
H, edges = histogramdd(x, bins=ed, density=True)
|
||||
assert_(np.all(H == answer / 12.))
|
||||
|
||||
# Check that H has the correct shape.
|
||||
H, edges = histogramdd(x, (2, 3, 4),
|
||||
range=[[-1, 1], [0, 3], [0, 4]],
|
||||
density=True)
|
||||
answer = np.array([[[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]],
|
||||
[[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0]]])
|
||||
assert_array_almost_equal(H, answer / 6., 4)
|
||||
# Check that a sequence of arrays is accepted and H has the correct
|
||||
# shape.
|
||||
z = [np.squeeze(y) for y in np.split(x, 3, axis=1)]
|
||||
H, edges = histogramdd(
|
||||
z, bins=(4, 3, 2), range=[[-2, 2], [0, 3], [0, 2]])
|
||||
answer = np.array([[[0, 0], [0, 0], [0, 0]],
|
||||
[[0, 1], [0, 0], [1, 0]],
|
||||
[[0, 1], [0, 0], [0, 0]],
|
||||
[[0, 0], [0, 0], [0, 0]]])
|
||||
assert_array_equal(H, answer)
|
||||
|
||||
Z = np.zeros((5, 5, 5))
|
||||
Z[list(range(5)), list(range(5)), list(range(5))] = 1.
|
||||
H, edges = histogramdd([np.arange(5), np.arange(5), np.arange(5)], 5)
|
||||
assert_array_equal(H, Z)
|
||||
|
||||
def test_shape_3d(self):
|
||||
# All possible permutations for bins of different lengths in 3D.
|
||||
bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4),
|
||||
(4, 5, 6))
|
||||
r = np.random.rand(10, 3)
|
||||
for b in bins:
|
||||
H, edges = histogramdd(r, b)
|
||||
assert_(H.shape == b)
|
||||
|
||||
def test_shape_4d(self):
|
||||
# All possible permutations for bins of different lengths in 4D.
|
||||
bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4),
|
||||
(5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6),
|
||||
(7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7),
|
||||
(4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5),
|
||||
(6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5),
|
||||
(5, 4, 7, 6), (5, 6, 7, 4), (6, 4, 5, 7), (7, 5, 6, 4))
|
||||
|
||||
r = np.random.rand(10, 4)
|
||||
for b in bins:
|
||||
H, edges = histogramdd(r, b)
|
||||
assert_(H.shape == b)
|
||||
|
||||
def test_weights(self):
|
||||
v = np.random.rand(100, 2)
|
||||
hist, edges = histogramdd(v)
|
||||
n_hist, edges = histogramdd(v, density=True)
|
||||
w_hist, edges = histogramdd(v, weights=np.ones(100))
|
||||
assert_array_equal(w_hist, hist)
|
||||
w_hist, edges = histogramdd(v, weights=np.ones(100) * 2, density=True)
|
||||
assert_array_equal(w_hist, n_hist)
|
||||
w_hist, edges = histogramdd(v, weights=np.ones(100, int) * 2)
|
||||
assert_array_equal(w_hist, 2 * hist)
|
||||
|
||||
def test_identical_samples(self):
|
||||
x = np.zeros((10, 2), int)
|
||||
hist, edges = histogramdd(x, bins=2)
|
||||
assert_array_equal(edges[0], np.array([-0.5, 0., 0.5]))
|
||||
|
||||
def test_empty(self):
|
||||
a, b = histogramdd([[], []], bins=([0, 1], [0, 1]))
|
||||
assert_array_max_ulp(a, np.array([[0.]]))
|
||||
a, b = np.histogramdd([[], [], []], bins=2)
|
||||
assert_array_max_ulp(a, np.zeros((2, 2, 2)))
|
||||
|
||||
def test_bins_errors(self):
|
||||
# There are two ways to specify bins. Check for the right errors
|
||||
# when mixing those.
|
||||
x = np.arange(8).reshape(2, 4)
|
||||
assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5])
|
||||
assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1])
|
||||
assert_raises(
|
||||
ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]])
|
||||
assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]]))
|
||||
|
||||
def test_inf_edges(self):
|
||||
# Test using +/-inf bin edges works. See #1788.
|
||||
with np.errstate(invalid='ignore'):
|
||||
x = np.arange(6).reshape(3, 2)
|
||||
expected = np.array([[1, 0], [0, 1], [0, 1]])
|
||||
h, e = np.histogramdd(x, bins=[3, [-np.inf, 2, 10]])
|
||||
assert_allclose(h, expected)
|
||||
h, e = np.histogramdd(x, bins=[3, np.array([-1, 2, np.inf])])
|
||||
assert_allclose(h, expected)
|
||||
h, e = np.histogramdd(x, bins=[3, [-np.inf, 3, np.inf]])
|
||||
assert_allclose(h, expected)
|
||||
|
||||
def test_rightmost_binedge(self):
|
||||
# Test event very close to rightmost binedge. See Github issue #4266
|
||||
x = [0.9999999995]
|
||||
bins = [[0., 0.5, 1.0]]
|
||||
hist, _ = histogramdd(x, bins=bins)
|
||||
assert_(hist[0] == 0.0)
|
||||
assert_(hist[1] == 1.)
|
||||
x = [1.0]
|
||||
bins = [[0., 0.5, 1.0]]
|
||||
hist, _ = histogramdd(x, bins=bins)
|
||||
assert_(hist[0] == 0.0)
|
||||
assert_(hist[1] == 1.)
|
||||
x = [1.0000000001]
|
||||
bins = [[0., 0.5, 1.0]]
|
||||
hist, _ = histogramdd(x, bins=bins)
|
||||
assert_(hist[0] == 0.0)
|
||||
assert_(hist[1] == 0.0)
|
||||
x = [1.0001]
|
||||
bins = [[0., 0.5, 1.0]]
|
||||
hist, _ = histogramdd(x, bins=bins)
|
||||
assert_(hist[0] == 0.0)
|
||||
assert_(hist[1] == 0.0)
|
||||
|
||||
def test_finite_range(self):
|
||||
vals = np.random.random((100, 3))
|
||||
histogramdd(vals, range=[[0.0, 1.0], [0.25, 0.75], [0.25, 0.5]])
|
||||
assert_raises(ValueError, histogramdd, vals,
|
||||
range=[[0.0, 1.0], [0.25, 0.75], [0.25, np.inf]])
|
||||
assert_raises(ValueError, histogramdd, vals,
|
||||
range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]])
|
||||
|
||||
def test_equal_edges(self):
|
||||
""" Test that adjacent entries in an edge array can be equal """
|
||||
x = np.array([0, 1, 2])
|
||||
y = np.array([0, 1, 2])
|
||||
x_edges = np.array([0, 2, 2])
|
||||
y_edges = 1
|
||||
hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
|
||||
|
||||
hist_expected = np.array([
|
||||
[2.],
|
||||
[1.], # x == 2 falls in the final bin
|
||||
])
|
||||
assert_equal(hist, hist_expected)
|
||||
|
||||
def test_edge_dtype(self):
|
||||
""" Test that if an edge array is input, its type is preserved """
|
||||
x = np.array([0, 10, 20])
|
||||
y = x / 10
|
||||
x_edges = np.array([0, 5, 15, 20])
|
||||
y_edges = x_edges / 10
|
||||
hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
|
||||
|
||||
assert_equal(edges[0].dtype, x_edges.dtype)
|
||||
assert_equal(edges[1].dtype, y_edges.dtype)
|
||||
|
||||
def test_large_integers(self):
|
||||
big = 2**60 # Too large to represent with a full precision float
|
||||
|
||||
x = np.array([0], np.int64)
|
||||
x_edges = np.array([-1, +1], np.int64)
|
||||
y = big + x
|
||||
y_edges = big + x_edges
|
||||
|
||||
hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
|
||||
|
||||
assert_equal(hist[0, 0], 1)
|
||||
|
||||
def test_density_non_uniform_2d(self):
|
||||
# Defines the following grid:
|
||||
#
|
||||
# 0 2 8
|
||||
# 0+-+-----+
|
||||
# + | +
|
||||
# + | +
|
||||
# 6+-+-----+
|
||||
# 8+-+-----+
|
||||
x_edges = np.array([0, 2, 8])
|
||||
y_edges = np.array([0, 6, 8])
|
||||
relative_areas = np.array([
|
||||
[3, 9],
|
||||
[1, 3]])
|
||||
|
||||
# ensure the number of points in each region is proportional to its area
|
||||
x = np.array([1] + [1]*3 + [7]*3 + [7]*9)
|
||||
y = np.array([7] + [1]*3 + [7]*3 + [1]*9)
|
||||
|
||||
# sanity check that the above worked as intended
|
||||
hist, edges = histogramdd((y, x), bins=(y_edges, x_edges))
|
||||
assert_equal(hist, relative_areas)
|
||||
|
||||
# resulting histogram should be uniform, since counts and areas are proportional
|
||||
hist, edges = histogramdd((y, x), bins=(y_edges, x_edges), density=True)
|
||||
assert_equal(hist, 1 / (8*8))
|
||||
|
||||
def test_density_non_uniform_1d(self):
|
||||
# compare to histogram to show the results are the same
|
||||
v = np.arange(10)
|
||||
bins = np.array([0, 1, 3, 6, 10])
|
||||
hist, edges = histogram(v, bins, density=True)
|
||||
hist_dd, edges_dd = histogramdd((v,), (bins,), density=True)
|
||||
assert_equal(hist, hist_dd)
|
||||
assert_equal(edges, edges_dd[0])
|
||||
|
||||
def test_density_via_normed(self):
|
||||
# normed should simply alias to density argument
|
||||
v = np.arange(10)
|
||||
bins = np.array([0, 1, 3, 6, 10])
|
||||
hist, edges = histogram(v, bins, density=True)
|
||||
hist_dd, edges_dd = histogramdd((v,), (bins,), normed=True)
|
||||
assert_equal(hist, hist_dd)
|
||||
assert_equal(edges, edges_dd[0])
|
||||
|
||||
def test_density_normed_redundancy(self):
|
||||
v = np.arange(10)
|
||||
bins = np.array([0, 1, 3, 6, 10])
|
||||
with assert_raises_regex(TypeError, "Cannot specify both"):
|
||||
hist_dd, edges_dd = histogramdd((v,), (bins,),
|
||||
density=True,
|
||||
normed=True)
|
||||
498
venv/Lib/site-packages/numpy/lib/tests/test_index_tricks.py
Normal file
498
venv/Lib/site-packages/numpy/lib/tests/test_index_tricks.py
Normal file
|
|
@ -0,0 +1,498 @@
|
|||
import pytest
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import (
|
||||
assert_, assert_equal, assert_array_equal, assert_almost_equal,
|
||||
assert_array_almost_equal, assert_raises, assert_raises_regex,
|
||||
assert_warns
|
||||
)
|
||||
from numpy.lib.index_tricks import (
|
||||
mgrid, ogrid, ndenumerate, fill_diagonal, diag_indices, diag_indices_from,
|
||||
index_exp, ndindex, r_, s_, ix_
|
||||
)
|
||||
|
||||
|
||||
class TestRavelUnravelIndex:
|
||||
def test_basic(self):
|
||||
assert_equal(np.unravel_index(2, (2, 2)), (1, 0))
|
||||
|
||||
# test backwards compatibility with older dims
|
||||
# keyword argument; see Issue #10586
|
||||
with assert_warns(DeprecationWarning):
|
||||
# we should achieve the correct result
|
||||
# AND raise the appropriate warning
|
||||
# when using older "dims" kw argument
|
||||
assert_equal(np.unravel_index(indices=2,
|
||||
dims=(2, 2)),
|
||||
(1, 0))
|
||||
|
||||
# test that new shape argument works properly
|
||||
assert_equal(np.unravel_index(indices=2,
|
||||
shape=(2, 2)),
|
||||
(1, 0))
|
||||
|
||||
# test that an invalid second keyword argument
|
||||
# is properly handled
|
||||
with assert_raises(TypeError):
|
||||
np.unravel_index(indices=2, hape=(2, 2))
|
||||
|
||||
with assert_raises(TypeError):
|
||||
np.unravel_index(2, hape=(2, 2))
|
||||
|
||||
with assert_raises(TypeError):
|
||||
np.unravel_index(254, ims=(17, 94))
|
||||
|
||||
assert_equal(np.ravel_multi_index((1, 0), (2, 2)), 2)
|
||||
assert_equal(np.unravel_index(254, (17, 94)), (2, 66))
|
||||
assert_equal(np.ravel_multi_index((2, 66), (17, 94)), 254)
|
||||
assert_raises(ValueError, np.unravel_index, -1, (2, 2))
|
||||
assert_raises(TypeError, np.unravel_index, 0.5, (2, 2))
|
||||
assert_raises(ValueError, np.unravel_index, 4, (2, 2))
|
||||
assert_raises(ValueError, np.ravel_multi_index, (-3, 1), (2, 2))
|
||||
assert_raises(ValueError, np.ravel_multi_index, (2, 1), (2, 2))
|
||||
assert_raises(ValueError, np.ravel_multi_index, (0, -3), (2, 2))
|
||||
assert_raises(ValueError, np.ravel_multi_index, (0, 2), (2, 2))
|
||||
assert_raises(TypeError, np.ravel_multi_index, (0.1, 0.), (2, 2))
|
||||
|
||||
assert_equal(np.unravel_index((2*3 + 1)*6 + 4, (4, 3, 6)), [2, 1, 4])
|
||||
assert_equal(
|
||||
np.ravel_multi_index([2, 1, 4], (4, 3, 6)), (2*3 + 1)*6 + 4)
|
||||
|
||||
arr = np.array([[3, 6, 6], [4, 5, 1]])
|
||||
assert_equal(np.ravel_multi_index(arr, (7, 6)), [22, 41, 37])
|
||||
assert_equal(
|
||||
np.ravel_multi_index(arr, (7, 6), order='F'), [31, 41, 13])
|
||||
assert_equal(
|
||||
np.ravel_multi_index(arr, (4, 6), mode='clip'), [22, 23, 19])
|
||||
assert_equal(np.ravel_multi_index(arr, (4, 4), mode=('clip', 'wrap')),
|
||||
[12, 13, 13])
|
||||
assert_equal(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)), 1621)
|
||||
|
||||
assert_equal(np.unravel_index(np.array([22, 41, 37]), (7, 6)),
|
||||
[[3, 6, 6], [4, 5, 1]])
|
||||
assert_equal(
|
||||
np.unravel_index(np.array([31, 41, 13]), (7, 6), order='F'),
|
||||
[[3, 6, 6], [4, 5, 1]])
|
||||
assert_equal(np.unravel_index(1621, (6, 7, 8, 9)), [3, 1, 4, 1])
|
||||
|
||||
def test_empty_indices(self):
|
||||
msg1 = 'indices must be integral: the provided empty sequence was'
|
||||
msg2 = 'only int indices permitted'
|
||||
assert_raises_regex(TypeError, msg1, np.unravel_index, [], (10, 3, 5))
|
||||
assert_raises_regex(TypeError, msg1, np.unravel_index, (), (10, 3, 5))
|
||||
assert_raises_regex(TypeError, msg2, np.unravel_index, np.array([]),
|
||||
(10, 3, 5))
|
||||
assert_equal(np.unravel_index(np.array([],dtype=int), (10, 3, 5)),
|
||||
[[], [], []])
|
||||
assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], []),
|
||||
(10, 3))
|
||||
assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], ['abc']),
|
||||
(10, 3))
|
||||
assert_raises_regex(TypeError, msg2, np.ravel_multi_index,
|
||||
(np.array([]), np.array([])), (5, 3))
|
||||
assert_equal(np.ravel_multi_index(
|
||||
(np.array([], dtype=int), np.array([], dtype=int)), (5, 3)), [])
|
||||
assert_equal(np.ravel_multi_index(np.array([[], []], dtype=int),
|
||||
(5, 3)), [])
|
||||
|
||||
def test_big_indices(self):
|
||||
# ravel_multi_index for big indices (issue #7546)
|
||||
if np.intp == np.int64:
|
||||
arr = ([1, 29], [3, 5], [3, 117], [19, 2],
|
||||
[2379, 1284], [2, 2], [0, 1])
|
||||
assert_equal(
|
||||
np.ravel_multi_index(arr, (41, 7, 120, 36, 2706, 8, 6)),
|
||||
[5627771580, 117259570957])
|
||||
|
||||
# test unravel_index for big indices (issue #9538)
|
||||
assert_raises(ValueError, np.unravel_index, 1, (2**32-1, 2**31+1))
|
||||
|
||||
# test overflow checking for too big array (issue #7546)
|
||||
dummy_arr = ([0],[0])
|
||||
half_max = np.iinfo(np.intp).max // 2
|
||||
assert_equal(
|
||||
np.ravel_multi_index(dummy_arr, (half_max, 2)), [0])
|
||||
assert_raises(ValueError,
|
||||
np.ravel_multi_index, dummy_arr, (half_max+1, 2))
|
||||
assert_equal(
|
||||
np.ravel_multi_index(dummy_arr, (half_max, 2), order='F'), [0])
|
||||
assert_raises(ValueError,
|
||||
np.ravel_multi_index, dummy_arr, (half_max+1, 2), order='F')
|
||||
|
||||
def test_dtypes(self):
|
||||
# Test with different data types
|
||||
for dtype in [np.int16, np.uint16, np.int32,
|
||||
np.uint32, np.int64, np.uint64]:
|
||||
coords = np.array(
|
||||
[[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0]], dtype=dtype)
|
||||
shape = (5, 8)
|
||||
uncoords = 8*coords[0]+coords[1]
|
||||
assert_equal(np.ravel_multi_index(coords, shape), uncoords)
|
||||
assert_equal(coords, np.unravel_index(uncoords, shape))
|
||||
uncoords = coords[0]+5*coords[1]
|
||||
assert_equal(
|
||||
np.ravel_multi_index(coords, shape, order='F'), uncoords)
|
||||
assert_equal(coords, np.unravel_index(uncoords, shape, order='F'))
|
||||
|
||||
coords = np.array(
|
||||
[[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0], [1, 3, 1, 0, 9, 5]],
|
||||
dtype=dtype)
|
||||
shape = (5, 8, 10)
|
||||
uncoords = 10*(8*coords[0]+coords[1])+coords[2]
|
||||
assert_equal(np.ravel_multi_index(coords, shape), uncoords)
|
||||
assert_equal(coords, np.unravel_index(uncoords, shape))
|
||||
uncoords = coords[0]+5*(coords[1]+8*coords[2])
|
||||
assert_equal(
|
||||
np.ravel_multi_index(coords, shape, order='F'), uncoords)
|
||||
assert_equal(coords, np.unravel_index(uncoords, shape, order='F'))
|
||||
|
||||
def test_clipmodes(self):
|
||||
# Test clipmodes
|
||||
assert_equal(
|
||||
np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), mode='wrap'),
|
||||
np.ravel_multi_index([1, 1, 6, 2], (4, 3, 7, 12)))
|
||||
assert_equal(np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12),
|
||||
mode=(
|
||||
'wrap', 'raise', 'clip', 'raise')),
|
||||
np.ravel_multi_index([1, 1, 0, 2], (4, 3, 7, 12)))
|
||||
assert_raises(
|
||||
ValueError, np.ravel_multi_index, [5, 1, -1, 2], (4, 3, 7, 12))
|
||||
|
||||
def test_writeability(self):
|
||||
# See gh-7269
|
||||
x, y = np.unravel_index([1, 2, 3], (4, 5))
|
||||
assert_(x.flags.writeable)
|
||||
assert_(y.flags.writeable)
|
||||
|
||||
def test_0d(self):
|
||||
# gh-580
|
||||
x = np.unravel_index(0, ())
|
||||
assert_equal(x, ())
|
||||
|
||||
assert_raises_regex(ValueError, "0d array", np.unravel_index, [0], ())
|
||||
assert_raises_regex(
|
||||
ValueError, "out of bounds", np.unravel_index, [1], ())
|
||||
|
||||
@pytest.mark.parametrize("mode", ["clip", "wrap", "raise"])
|
||||
def test_empty_array_ravel(self, mode):
|
||||
res = np.ravel_multi_index(
|
||||
np.zeros((3, 0), dtype=np.intp), (2, 1, 0), mode=mode)
|
||||
assert(res.shape == (0,))
|
||||
|
||||
with assert_raises(ValueError):
|
||||
np.ravel_multi_index(
|
||||
np.zeros((3, 1), dtype=np.intp), (2, 1, 0), mode=mode)
|
||||
|
||||
def test_empty_array_unravel(self):
|
||||
res = np.unravel_index(np.zeros(0, dtype=np.intp), (2, 1, 0))
|
||||
# res is a tuple of three empty arrays
|
||||
assert(len(res) == 3)
|
||||
assert(all(a.shape == (0,) for a in res))
|
||||
|
||||
with assert_raises(ValueError):
|
||||
np.unravel_index([1], (2, 1, 0))
|
||||
|
||||
class TestGrid:
|
||||
def test_basic(self):
|
||||
a = mgrid[-1:1:10j]
|
||||
b = mgrid[-1:1:0.1]
|
||||
assert_(a.shape == (10,))
|
||||
assert_(b.shape == (20,))
|
||||
assert_(a[0] == -1)
|
||||
assert_almost_equal(a[-1], 1)
|
||||
assert_(b[0] == -1)
|
||||
assert_almost_equal(b[1]-b[0], 0.1, 11)
|
||||
assert_almost_equal(b[-1], b[0]+19*0.1, 11)
|
||||
assert_almost_equal(a[1]-a[0], 2.0/9.0, 11)
|
||||
|
||||
def test_linspace_equivalence(self):
|
||||
y, st = np.linspace(2, 10, retstep=True)
|
||||
assert_almost_equal(st, 8/49.0)
|
||||
assert_array_almost_equal(y, mgrid[2:10:50j], 13)
|
||||
|
||||
def test_nd(self):
|
||||
c = mgrid[-1:1:10j, -2:2:10j]
|
||||
d = mgrid[-1:1:0.1, -2:2:0.2]
|
||||
assert_(c.shape == (2, 10, 10))
|
||||
assert_(d.shape == (2, 20, 20))
|
||||
assert_array_equal(c[0][0, :], -np.ones(10, 'd'))
|
||||
assert_array_equal(c[1][:, 0], -2*np.ones(10, 'd'))
|
||||
assert_array_almost_equal(c[0][-1, :], np.ones(10, 'd'), 11)
|
||||
assert_array_almost_equal(c[1][:, -1], 2*np.ones(10, 'd'), 11)
|
||||
assert_array_almost_equal(d[0, 1, :] - d[0, 0, :],
|
||||
0.1*np.ones(20, 'd'), 11)
|
||||
assert_array_almost_equal(d[1, :, 1] - d[1, :, 0],
|
||||
0.2*np.ones(20, 'd'), 11)
|
||||
|
||||
def test_sparse(self):
|
||||
grid_full = mgrid[-1:1:10j, -2:2:10j]
|
||||
grid_sparse = ogrid[-1:1:10j, -2:2:10j]
|
||||
|
||||
# sparse grids can be made dense by broadcasting
|
||||
grid_broadcast = np.broadcast_arrays(*grid_sparse)
|
||||
for f, b in zip(grid_full, grid_broadcast):
|
||||
assert_equal(f, b)
|
||||
|
||||
@pytest.mark.parametrize("start, stop, step, expected", [
|
||||
(None, 10, 10j, (200, 10)),
|
||||
(-10, 20, None, (1800, 30)),
|
||||
])
|
||||
def test_mgrid_size_none_handling(self, start, stop, step, expected):
|
||||
# regression test None value handling for
|
||||
# start and step values used by mgrid;
|
||||
# internally, this aims to cover previously
|
||||
# unexplored code paths in nd_grid()
|
||||
grid = mgrid[start:stop:step, start:stop:step]
|
||||
# need a smaller grid to explore one of the
|
||||
# untested code paths
|
||||
grid_small = mgrid[start:stop:step]
|
||||
assert_equal(grid.size, expected[0])
|
||||
assert_equal(grid_small.size, expected[1])
|
||||
|
||||
|
||||
class TestConcatenator:
|
||||
def test_1d(self):
|
||||
assert_array_equal(r_[1, 2, 3, 4, 5, 6], np.array([1, 2, 3, 4, 5, 6]))
|
||||
b = np.ones(5)
|
||||
c = r_[b, 0, 0, b]
|
||||
assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1])
|
||||
|
||||
def test_mixed_type(self):
|
||||
g = r_[10.1, 1:10]
|
||||
assert_(g.dtype == 'f8')
|
||||
|
||||
def test_more_mixed_type(self):
|
||||
g = r_[-10.1, np.array([1]), np.array([2, 3, 4]), 10.0]
|
||||
assert_(g.dtype == 'f8')
|
||||
|
||||
def test_complex_step(self):
|
||||
# Regression test for #12262
|
||||
g = r_[0:36:100j]
|
||||
assert_(g.shape == (100,))
|
||||
|
||||
def test_2d(self):
|
||||
b = np.random.rand(5, 5)
|
||||
c = np.random.rand(5, 5)
|
||||
d = r_['1', b, c] # append columns
|
||||
assert_(d.shape == (5, 10))
|
||||
assert_array_equal(d[:, :5], b)
|
||||
assert_array_equal(d[:, 5:], c)
|
||||
d = r_[b, c]
|
||||
assert_(d.shape == (10, 5))
|
||||
assert_array_equal(d[:5, :], b)
|
||||
assert_array_equal(d[5:, :], c)
|
||||
|
||||
def test_0d(self):
|
||||
assert_equal(r_[0, np.array(1), 2], [0, 1, 2])
|
||||
assert_equal(r_[[0, 1, 2], np.array(3)], [0, 1, 2, 3])
|
||||
assert_equal(r_[np.array(0), [1, 2, 3]], [0, 1, 2, 3])
|
||||
|
||||
|
||||
class TestNdenumerate:
|
||||
def test_basic(self):
|
||||
a = np.array([[1, 2], [3, 4]])
|
||||
assert_equal(list(ndenumerate(a)),
|
||||
[((0, 0), 1), ((0, 1), 2), ((1, 0), 3), ((1, 1), 4)])
|
||||
|
||||
|
||||
class TestIndexExpression:
|
||||
def test_regression_1(self):
|
||||
# ticket #1196
|
||||
a = np.arange(2)
|
||||
assert_equal(a[:-1], a[s_[:-1]])
|
||||
assert_equal(a[:-1], a[index_exp[:-1]])
|
||||
|
||||
def test_simple_1(self):
|
||||
a = np.random.rand(4, 5, 6)
|
||||
|
||||
assert_equal(a[:, :3, [1, 2]], a[index_exp[:, :3, [1, 2]]])
|
||||
assert_equal(a[:, :3, [1, 2]], a[s_[:, :3, [1, 2]]])
|
||||
|
||||
|
||||
class TestIx_:
|
||||
def test_regression_1(self):
|
||||
# Test empty untyped inputs create outputs of indexing type, gh-5804
|
||||
a, = np.ix_(range(0))
|
||||
assert_equal(a.dtype, np.intp)
|
||||
|
||||
a, = np.ix_([])
|
||||
assert_equal(a.dtype, np.intp)
|
||||
|
||||
# but if the type is specified, don't change it
|
||||
a, = np.ix_(np.array([], dtype=np.float32))
|
||||
assert_equal(a.dtype, np.float32)
|
||||
|
||||
def test_shape_and_dtype(self):
|
||||
sizes = (4, 5, 3, 2)
|
||||
# Test both lists and arrays
|
||||
for func in (range, np.arange):
|
||||
arrays = np.ix_(*[func(sz) for sz in sizes])
|
||||
for k, (a, sz) in enumerate(zip(arrays, sizes)):
|
||||
assert_equal(a.shape[k], sz)
|
||||
assert_(all(sh == 1 for j, sh in enumerate(a.shape) if j != k))
|
||||
assert_(np.issubdtype(a.dtype, np.integer))
|
||||
|
||||
def test_bool(self):
|
||||
bool_a = [True, False, True, True]
|
||||
int_a, = np.nonzero(bool_a)
|
||||
assert_equal(np.ix_(bool_a)[0], int_a)
|
||||
|
||||
def test_1d_only(self):
|
||||
idx2d = [[1, 2, 3], [4, 5, 6]]
|
||||
assert_raises(ValueError, np.ix_, idx2d)
|
||||
|
||||
def test_repeated_input(self):
|
||||
length_of_vector = 5
|
||||
x = np.arange(length_of_vector)
|
||||
out = ix_(x, x)
|
||||
assert_equal(out[0].shape, (length_of_vector, 1))
|
||||
assert_equal(out[1].shape, (1, length_of_vector))
|
||||
# check that input shape is not modified
|
||||
assert_equal(x.shape, (length_of_vector,))
|
||||
|
||||
|
||||
def test_c_():
|
||||
a = np.c_[np.array([[1, 2, 3]]), 0, 0, np.array([[4, 5, 6]])]
|
||||
assert_equal(a, [[1, 2, 3, 0, 0, 4, 5, 6]])
|
||||
|
||||
|
||||
class TestFillDiagonal:
|
||||
def test_basic(self):
|
||||
a = np.zeros((3, 3), int)
|
||||
fill_diagonal(a, 5)
|
||||
assert_array_equal(
|
||||
a, np.array([[5, 0, 0],
|
||||
[0, 5, 0],
|
||||
[0, 0, 5]])
|
||||
)
|
||||
|
||||
def test_tall_matrix(self):
|
||||
a = np.zeros((10, 3), int)
|
||||
fill_diagonal(a, 5)
|
||||
assert_array_equal(
|
||||
a, np.array([[5, 0, 0],
|
||||
[0, 5, 0],
|
||||
[0, 0, 5],
|
||||
[0, 0, 0],
|
||||
[0, 0, 0],
|
||||
[0, 0, 0],
|
||||
[0, 0, 0],
|
||||
[0, 0, 0],
|
||||
[0, 0, 0],
|
||||
[0, 0, 0]])
|
||||
)
|
||||
|
||||
def test_tall_matrix_wrap(self):
|
||||
a = np.zeros((10, 3), int)
|
||||
fill_diagonal(a, 5, True)
|
||||
assert_array_equal(
|
||||
a, np.array([[5, 0, 0],
|
||||
[0, 5, 0],
|
||||
[0, 0, 5],
|
||||
[0, 0, 0],
|
||||
[5, 0, 0],
|
||||
[0, 5, 0],
|
||||
[0, 0, 5],
|
||||
[0, 0, 0],
|
||||
[5, 0, 0],
|
||||
[0, 5, 0]])
|
||||
)
|
||||
|
||||
def test_wide_matrix(self):
|
||||
a = np.zeros((3, 10), int)
|
||||
fill_diagonal(a, 5)
|
||||
assert_array_equal(
|
||||
a, np.array([[5, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 5, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 5, 0, 0, 0, 0, 0, 0, 0]])
|
||||
)
|
||||
|
||||
def test_operate_4d_array(self):
|
||||
a = np.zeros((3, 3, 3, 3), int)
|
||||
fill_diagonal(a, 4)
|
||||
i = np.array([0, 1, 2])
|
||||
assert_equal(np.where(a != 0), (i, i, i, i))
|
||||
|
||||
def test_low_dim_handling(self):
|
||||
# raise error with low dimensionality
|
||||
a = np.zeros(3, int)
|
||||
with assert_raises_regex(ValueError, "at least 2-d"):
|
||||
fill_diagonal(a, 5)
|
||||
|
||||
def test_hetero_shape_handling(self):
|
||||
# raise error with high dimensionality and
|
||||
# shape mismatch
|
||||
a = np.zeros((3,3,7,3), int)
|
||||
with assert_raises_regex(ValueError, "equal length"):
|
||||
fill_diagonal(a, 2)
|
||||
|
||||
|
||||
def test_diag_indices():
|
||||
di = diag_indices(4)
|
||||
a = np.array([[1, 2, 3, 4],
|
||||
[5, 6, 7, 8],
|
||||
[9, 10, 11, 12],
|
||||
[13, 14, 15, 16]])
|
||||
a[di] = 100
|
||||
assert_array_equal(
|
||||
a, np.array([[100, 2, 3, 4],
|
||||
[5, 100, 7, 8],
|
||||
[9, 10, 100, 12],
|
||||
[13, 14, 15, 100]])
|
||||
)
|
||||
|
||||
# Now, we create indices to manipulate a 3-d array:
|
||||
d3 = diag_indices(2, 3)
|
||||
|
||||
# And use it to set the diagonal of a zeros array to 1:
|
||||
a = np.zeros((2, 2, 2), int)
|
||||
a[d3] = 1
|
||||
assert_array_equal(
|
||||
a, np.array([[[1, 0],
|
||||
[0, 0]],
|
||||
[[0, 0],
|
||||
[0, 1]]])
|
||||
)
|
||||
|
||||
|
||||
class TestDiagIndicesFrom:
|
||||
|
||||
def test_diag_indices_from(self):
|
||||
x = np.random.random((4, 4))
|
||||
r, c = diag_indices_from(x)
|
||||
assert_array_equal(r, np.arange(4))
|
||||
assert_array_equal(c, np.arange(4))
|
||||
|
||||
def test_error_small_input(self):
|
||||
x = np.ones(7)
|
||||
with assert_raises_regex(ValueError, "at least 2-d"):
|
||||
diag_indices_from(x)
|
||||
|
||||
def test_error_shape_mismatch(self):
|
||||
x = np.zeros((3, 3, 2, 3), int)
|
||||
with assert_raises_regex(ValueError, "equal length"):
|
||||
diag_indices_from(x)
|
||||
|
||||
|
||||
def test_ndindex():
|
||||
x = list(ndindex(1, 2, 3))
|
||||
expected = [ix for ix, e in ndenumerate(np.zeros((1, 2, 3)))]
|
||||
assert_array_equal(x, expected)
|
||||
|
||||
x = list(ndindex((1, 2, 3)))
|
||||
assert_array_equal(x, expected)
|
||||
|
||||
# Test use of scalars and tuples
|
||||
x = list(ndindex((3,)))
|
||||
assert_array_equal(x, list(ndindex(3)))
|
||||
|
||||
# Make sure size argument is optional
|
||||
x = list(ndindex())
|
||||
assert_equal(x, [()])
|
||||
|
||||
x = list(ndindex(()))
|
||||
assert_equal(x, [()])
|
||||
|
||||
# Make sure 0-sized ndindex works correctly
|
||||
x = list(ndindex(*[0]))
|
||||
assert_equal(x, [])
|
||||
2614
venv/Lib/site-packages/numpy/lib/tests/test_io.py
Normal file
2614
venv/Lib/site-packages/numpy/lib/tests/test_io.py
Normal file
File diff suppressed because it is too large
Load diff
216
venv/Lib/site-packages/numpy/lib/tests/test_mixins.py
Normal file
216
venv/Lib/site-packages/numpy/lib/tests/test_mixins.py
Normal file
|
|
@ -0,0 +1,216 @@
|
|||
import numbers
|
||||
import operator
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import assert_, assert_equal, assert_raises
|
||||
|
||||
|
||||
# NOTE: This class should be kept as an exact copy of the example from the
|
||||
# docstring for NDArrayOperatorsMixin.
|
||||
|
||||
class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin):
|
||||
def __init__(self, value):
|
||||
self.value = np.asarray(value)
|
||||
|
||||
# One might also consider adding the built-in list type to this
|
||||
# list, to support operations like np.add(array_like, list)
|
||||
_HANDLED_TYPES = (np.ndarray, numbers.Number)
|
||||
|
||||
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
|
||||
out = kwargs.get('out', ())
|
||||
for x in inputs + out:
|
||||
# Only support operations with instances of _HANDLED_TYPES.
|
||||
# Use ArrayLike instead of type(self) for isinstance to
|
||||
# allow subclasses that don't override __array_ufunc__ to
|
||||
# handle ArrayLike objects.
|
||||
if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)):
|
||||
return NotImplemented
|
||||
|
||||
# Defer to the implementation of the ufunc on unwrapped values.
|
||||
inputs = tuple(x.value if isinstance(x, ArrayLike) else x
|
||||
for x in inputs)
|
||||
if out:
|
||||
kwargs['out'] = tuple(
|
||||
x.value if isinstance(x, ArrayLike) else x
|
||||
for x in out)
|
||||
result = getattr(ufunc, method)(*inputs, **kwargs)
|
||||
|
||||
if type(result) is tuple:
|
||||
# multiple return values
|
||||
return tuple(type(self)(x) for x in result)
|
||||
elif method == 'at':
|
||||
# no return value
|
||||
return None
|
||||
else:
|
||||
# one return value
|
||||
return type(self)(result)
|
||||
|
||||
def __repr__(self):
|
||||
return '%s(%r)' % (type(self).__name__, self.value)
|
||||
|
||||
|
||||
def wrap_array_like(result):
|
||||
if type(result) is tuple:
|
||||
return tuple(ArrayLike(r) for r in result)
|
||||
else:
|
||||
return ArrayLike(result)
|
||||
|
||||
|
||||
def _assert_equal_type_and_value(result, expected, err_msg=None):
|
||||
assert_equal(type(result), type(expected), err_msg=err_msg)
|
||||
if isinstance(result, tuple):
|
||||
assert_equal(len(result), len(expected), err_msg=err_msg)
|
||||
for result_item, expected_item in zip(result, expected):
|
||||
_assert_equal_type_and_value(result_item, expected_item, err_msg)
|
||||
else:
|
||||
assert_equal(result.value, expected.value, err_msg=err_msg)
|
||||
assert_equal(getattr(result.value, 'dtype', None),
|
||||
getattr(expected.value, 'dtype', None), err_msg=err_msg)
|
||||
|
||||
|
||||
_ALL_BINARY_OPERATORS = [
|
||||
operator.lt,
|
||||
operator.le,
|
||||
operator.eq,
|
||||
operator.ne,
|
||||
operator.gt,
|
||||
operator.ge,
|
||||
operator.add,
|
||||
operator.sub,
|
||||
operator.mul,
|
||||
operator.truediv,
|
||||
operator.floordiv,
|
||||
operator.mod,
|
||||
divmod,
|
||||
pow,
|
||||
operator.lshift,
|
||||
operator.rshift,
|
||||
operator.and_,
|
||||
operator.xor,
|
||||
operator.or_,
|
||||
]
|
||||
|
||||
|
||||
class TestNDArrayOperatorsMixin:
|
||||
|
||||
def test_array_like_add(self):
|
||||
|
||||
def check(result):
|
||||
_assert_equal_type_and_value(result, ArrayLike(0))
|
||||
|
||||
check(ArrayLike(0) + 0)
|
||||
check(0 + ArrayLike(0))
|
||||
|
||||
check(ArrayLike(0) + np.array(0))
|
||||
check(np.array(0) + ArrayLike(0))
|
||||
|
||||
check(ArrayLike(np.array(0)) + 0)
|
||||
check(0 + ArrayLike(np.array(0)))
|
||||
|
||||
check(ArrayLike(np.array(0)) + np.array(0))
|
||||
check(np.array(0) + ArrayLike(np.array(0)))
|
||||
|
||||
def test_inplace(self):
|
||||
array_like = ArrayLike(np.array([0]))
|
||||
array_like += 1
|
||||
_assert_equal_type_and_value(array_like, ArrayLike(np.array([1])))
|
||||
|
||||
array = np.array([0])
|
||||
array += ArrayLike(1)
|
||||
_assert_equal_type_and_value(array, ArrayLike(np.array([1])))
|
||||
|
||||
def test_opt_out(self):
|
||||
|
||||
class OptOut:
|
||||
"""Object that opts out of __array_ufunc__."""
|
||||
__array_ufunc__ = None
|
||||
|
||||
def __add__(self, other):
|
||||
return self
|
||||
|
||||
def __radd__(self, other):
|
||||
return self
|
||||
|
||||
array_like = ArrayLike(1)
|
||||
opt_out = OptOut()
|
||||
|
||||
# supported operations
|
||||
assert_(array_like + opt_out is opt_out)
|
||||
assert_(opt_out + array_like is opt_out)
|
||||
|
||||
# not supported
|
||||
with assert_raises(TypeError):
|
||||
# don't use the Python default, array_like = array_like + opt_out
|
||||
array_like += opt_out
|
||||
with assert_raises(TypeError):
|
||||
array_like - opt_out
|
||||
with assert_raises(TypeError):
|
||||
opt_out - array_like
|
||||
|
||||
def test_subclass(self):
|
||||
|
||||
class SubArrayLike(ArrayLike):
|
||||
"""Should take precedence over ArrayLike."""
|
||||
|
||||
x = ArrayLike(0)
|
||||
y = SubArrayLike(1)
|
||||
_assert_equal_type_and_value(x + y, y)
|
||||
_assert_equal_type_and_value(y + x, y)
|
||||
|
||||
def test_object(self):
|
||||
x = ArrayLike(0)
|
||||
obj = object()
|
||||
with assert_raises(TypeError):
|
||||
x + obj
|
||||
with assert_raises(TypeError):
|
||||
obj + x
|
||||
with assert_raises(TypeError):
|
||||
x += obj
|
||||
|
||||
def test_unary_methods(self):
|
||||
array = np.array([-1, 0, 1, 2])
|
||||
array_like = ArrayLike(array)
|
||||
for op in [operator.neg,
|
||||
operator.pos,
|
||||
abs,
|
||||
operator.invert]:
|
||||
_assert_equal_type_and_value(op(array_like), ArrayLike(op(array)))
|
||||
|
||||
def test_forward_binary_methods(self):
|
||||
array = np.array([-1, 0, 1, 2])
|
||||
array_like = ArrayLike(array)
|
||||
for op in _ALL_BINARY_OPERATORS:
|
||||
expected = wrap_array_like(op(array, 1))
|
||||
actual = op(array_like, 1)
|
||||
err_msg = 'failed for operator {}'.format(op)
|
||||
_assert_equal_type_and_value(expected, actual, err_msg=err_msg)
|
||||
|
||||
def test_reflected_binary_methods(self):
|
||||
for op in _ALL_BINARY_OPERATORS:
|
||||
expected = wrap_array_like(op(2, 1))
|
||||
actual = op(2, ArrayLike(1))
|
||||
err_msg = 'failed for operator {}'.format(op)
|
||||
_assert_equal_type_and_value(expected, actual, err_msg=err_msg)
|
||||
|
||||
def test_matmul(self):
|
||||
array = np.array([1, 2], dtype=np.float64)
|
||||
array_like = ArrayLike(array)
|
||||
expected = ArrayLike(np.float64(5))
|
||||
_assert_equal_type_and_value(expected, np.matmul(array_like, array))
|
||||
_assert_equal_type_and_value(
|
||||
expected, operator.matmul(array_like, array))
|
||||
_assert_equal_type_and_value(
|
||||
expected, operator.matmul(array, array_like))
|
||||
|
||||
def test_ufunc_at(self):
|
||||
array = ArrayLike(np.array([1, 2, 3, 4]))
|
||||
assert_(np.negative.at(array, np.array([0, 1])) is None)
|
||||
_assert_equal_type_and_value(array, ArrayLike([-1, -2, 3, 4]))
|
||||
|
||||
def test_ufunc_two_outputs(self):
|
||||
mantissa, exponent = np.frexp(2 ** -3)
|
||||
expected = (ArrayLike(mantissa), ArrayLike(exponent))
|
||||
_assert_equal_type_and_value(
|
||||
np.frexp(ArrayLike(2 ** -3)), expected)
|
||||
_assert_equal_type_and_value(
|
||||
np.frexp(ArrayLike(np.array(2 ** -3))), expected)
|
||||
980
venv/Lib/site-packages/numpy/lib/tests/test_nanfunctions.py
Normal file
980
venv/Lib/site-packages/numpy/lib/tests/test_nanfunctions.py
Normal file
|
|
@ -0,0 +1,980 @@
|
|||
import warnings
|
||||
import pytest
|
||||
|
||||
import numpy as np
|
||||
from numpy.lib.nanfunctions import _nan_mask, _replace_nan
|
||||
from numpy.testing import (
|
||||
assert_, assert_equal, assert_almost_equal, assert_no_warnings,
|
||||
assert_raises, assert_array_equal, suppress_warnings
|
||||
)
|
||||
|
||||
|
||||
# Test data
|
||||
_ndat = np.array([[0.6244, np.nan, 0.2692, 0.0116, np.nan, 0.1170],
|
||||
[0.5351, -0.9403, np.nan, 0.2100, 0.4759, 0.2833],
|
||||
[np.nan, np.nan, np.nan, 0.1042, np.nan, -0.5954],
|
||||
[0.1610, np.nan, np.nan, 0.1859, 0.3146, np.nan]])
|
||||
|
||||
|
||||
# Rows of _ndat with nans removed
|
||||
_rdat = [np.array([0.6244, 0.2692, 0.0116, 0.1170]),
|
||||
np.array([0.5351, -0.9403, 0.2100, 0.4759, 0.2833]),
|
||||
np.array([0.1042, -0.5954]),
|
||||
np.array([0.1610, 0.1859, 0.3146])]
|
||||
|
||||
# Rows of _ndat with nans converted to ones
|
||||
_ndat_ones = np.array([[0.6244, 1.0, 0.2692, 0.0116, 1.0, 0.1170],
|
||||
[0.5351, -0.9403, 1.0, 0.2100, 0.4759, 0.2833],
|
||||
[1.0, 1.0, 1.0, 0.1042, 1.0, -0.5954],
|
||||
[0.1610, 1.0, 1.0, 0.1859, 0.3146, 1.0]])
|
||||
|
||||
# Rows of _ndat with nans converted to zeros
|
||||
_ndat_zeros = np.array([[0.6244, 0.0, 0.2692, 0.0116, 0.0, 0.1170],
|
||||
[0.5351, -0.9403, 0.0, 0.2100, 0.4759, 0.2833],
|
||||
[0.0, 0.0, 0.0, 0.1042, 0.0, -0.5954],
|
||||
[0.1610, 0.0, 0.0, 0.1859, 0.3146, 0.0]])
|
||||
|
||||
|
||||
class TestNanFunctions_MinMax:
|
||||
|
||||
nanfuncs = [np.nanmin, np.nanmax]
|
||||
stdfuncs = [np.min, np.max]
|
||||
|
||||
def test_mutation(self):
|
||||
# Check that passed array is not modified.
|
||||
ndat = _ndat.copy()
|
||||
for f in self.nanfuncs:
|
||||
f(ndat)
|
||||
assert_equal(ndat, _ndat)
|
||||
|
||||
def test_keepdims(self):
|
||||
mat = np.eye(3)
|
||||
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
|
||||
for axis in [None, 0, 1]:
|
||||
tgt = rf(mat, axis=axis, keepdims=True)
|
||||
res = nf(mat, axis=axis, keepdims=True)
|
||||
assert_(res.ndim == tgt.ndim)
|
||||
|
||||
def test_out(self):
|
||||
mat = np.eye(3)
|
||||
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
|
||||
resout = np.zeros(3)
|
||||
tgt = rf(mat, axis=1)
|
||||
res = nf(mat, axis=1, out=resout)
|
||||
assert_almost_equal(res, resout)
|
||||
assert_almost_equal(res, tgt)
|
||||
|
||||
def test_dtype_from_input(self):
|
||||
codes = 'efdgFDG'
|
||||
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
|
||||
for c in codes:
|
||||
mat = np.eye(3, dtype=c)
|
||||
tgt = rf(mat, axis=1).dtype.type
|
||||
res = nf(mat, axis=1).dtype.type
|
||||
assert_(res is tgt)
|
||||
# scalar case
|
||||
tgt = rf(mat, axis=None).dtype.type
|
||||
res = nf(mat, axis=None).dtype.type
|
||||
assert_(res is tgt)
|
||||
|
||||
def test_result_values(self):
|
||||
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
|
||||
tgt = [rf(d) for d in _rdat]
|
||||
res = nf(_ndat, axis=1)
|
||||
assert_almost_equal(res, tgt)
|
||||
|
||||
def test_allnans(self):
|
||||
mat = np.array([np.nan]*9).reshape(3, 3)
|
||||
for f in self.nanfuncs:
|
||||
for axis in [None, 0, 1]:
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
warnings.simplefilter('always')
|
||||
assert_(np.isnan(f(mat, axis=axis)).all())
|
||||
assert_(len(w) == 1, 'no warning raised')
|
||||
assert_(issubclass(w[0].category, RuntimeWarning))
|
||||
# Check scalars
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
warnings.simplefilter('always')
|
||||
assert_(np.isnan(f(np.nan)))
|
||||
assert_(len(w) == 1, 'no warning raised')
|
||||
assert_(issubclass(w[0].category, RuntimeWarning))
|
||||
|
||||
def test_masked(self):
|
||||
mat = np.ma.fix_invalid(_ndat)
|
||||
msk = mat._mask.copy()
|
||||
for f in [np.nanmin]:
|
||||
res = f(mat, axis=1)
|
||||
tgt = f(_ndat, axis=1)
|
||||
assert_equal(res, tgt)
|
||||
assert_equal(mat._mask, msk)
|
||||
assert_(not np.isinf(mat).any())
|
||||
|
||||
def test_scalar(self):
|
||||
for f in self.nanfuncs:
|
||||
assert_(f(0.) == 0.)
|
||||
|
||||
def test_subclass(self):
|
||||
class MyNDArray(np.ndarray):
|
||||
pass
|
||||
|
||||
# Check that it works and that type and
|
||||
# shape are preserved
|
||||
mine = np.eye(3).view(MyNDArray)
|
||||
for f in self.nanfuncs:
|
||||
res = f(mine, axis=0)
|
||||
assert_(isinstance(res, MyNDArray))
|
||||
assert_(res.shape == (3,))
|
||||
res = f(mine, axis=1)
|
||||
assert_(isinstance(res, MyNDArray))
|
||||
assert_(res.shape == (3,))
|
||||
res = f(mine)
|
||||
assert_(res.shape == ())
|
||||
|
||||
# check that rows of nan are dealt with for subclasses (#4628)
|
||||
mine[1] = np.nan
|
||||
for f in self.nanfuncs:
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
warnings.simplefilter('always')
|
||||
res = f(mine, axis=0)
|
||||
assert_(isinstance(res, MyNDArray))
|
||||
assert_(not np.any(np.isnan(res)))
|
||||
assert_(len(w) == 0)
|
||||
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
warnings.simplefilter('always')
|
||||
res = f(mine, axis=1)
|
||||
assert_(isinstance(res, MyNDArray))
|
||||
assert_(np.isnan(res[1]) and not np.isnan(res[0])
|
||||
and not np.isnan(res[2]))
|
||||
assert_(len(w) == 1, 'no warning raised')
|
||||
assert_(issubclass(w[0].category, RuntimeWarning))
|
||||
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
warnings.simplefilter('always')
|
||||
res = f(mine)
|
||||
assert_(res.shape == ())
|
||||
assert_(res != np.nan)
|
||||
assert_(len(w) == 0)
|
||||
|
||||
def test_object_array(self):
|
||||
arr = np.array([[1.0, 2.0], [np.nan, 4.0], [np.nan, np.nan]], dtype=object)
|
||||
assert_equal(np.nanmin(arr), 1.0)
|
||||
assert_equal(np.nanmin(arr, axis=0), [1.0, 2.0])
|
||||
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
warnings.simplefilter('always')
|
||||
# assert_equal does not work on object arrays of nan
|
||||
assert_equal(list(np.nanmin(arr, axis=1)), [1.0, 4.0, np.nan])
|
||||
assert_(len(w) == 1, 'no warning raised')
|
||||
assert_(issubclass(w[0].category, RuntimeWarning))
|
||||
|
||||
|
||||
class TestNanFunctions_ArgminArgmax:
|
||||
|
||||
nanfuncs = [np.nanargmin, np.nanargmax]
|
||||
|
||||
def test_mutation(self):
|
||||
# Check that passed array is not modified.
|
||||
ndat = _ndat.copy()
|
||||
for f in self.nanfuncs:
|
||||
f(ndat)
|
||||
assert_equal(ndat, _ndat)
|
||||
|
||||
def test_result_values(self):
|
||||
for f, fcmp in zip(self.nanfuncs, [np.greater, np.less]):
|
||||
for row in _ndat:
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(RuntimeWarning, "invalid value encountered in")
|
||||
ind = f(row)
|
||||
val = row[ind]
|
||||
# comparing with NaN is tricky as the result
|
||||
# is always false except for NaN != NaN
|
||||
assert_(not np.isnan(val))
|
||||
assert_(not fcmp(val, row).any())
|
||||
assert_(not np.equal(val, row[:ind]).any())
|
||||
|
||||
def test_allnans(self):
|
||||
mat = np.array([np.nan]*9).reshape(3, 3)
|
||||
for f in self.nanfuncs:
|
||||
for axis in [None, 0, 1]:
|
||||
assert_raises(ValueError, f, mat, axis=axis)
|
||||
assert_raises(ValueError, f, np.nan)
|
||||
|
||||
def test_empty(self):
|
||||
mat = np.zeros((0, 3))
|
||||
for f in self.nanfuncs:
|
||||
for axis in [0, None]:
|
||||
assert_raises(ValueError, f, mat, axis=axis)
|
||||
for axis in [1]:
|
||||
res = f(mat, axis=axis)
|
||||
assert_equal(res, np.zeros(0))
|
||||
|
||||
def test_scalar(self):
|
||||
for f in self.nanfuncs:
|
||||
assert_(f(0.) == 0.)
|
||||
|
||||
def test_subclass(self):
|
||||
class MyNDArray(np.ndarray):
|
||||
pass
|
||||
|
||||
# Check that it works and that type and
|
||||
# shape are preserved
|
||||
mine = np.eye(3).view(MyNDArray)
|
||||
for f in self.nanfuncs:
|
||||
res = f(mine, axis=0)
|
||||
assert_(isinstance(res, MyNDArray))
|
||||
assert_(res.shape == (3,))
|
||||
res = f(mine, axis=1)
|
||||
assert_(isinstance(res, MyNDArray))
|
||||
assert_(res.shape == (3,))
|
||||
res = f(mine)
|
||||
assert_(res.shape == ())
|
||||
|
||||
|
||||
class TestNanFunctions_IntTypes:
|
||||
|
||||
int_types = (np.int8, np.int16, np.int32, np.int64, np.uint8,
|
||||
np.uint16, np.uint32, np.uint64)
|
||||
|
||||
mat = np.array([127, 39, 93, 87, 46])
|
||||
|
||||
def integer_arrays(self):
|
||||
for dtype in self.int_types:
|
||||
yield self.mat.astype(dtype)
|
||||
|
||||
def test_nanmin(self):
|
||||
tgt = np.min(self.mat)
|
||||
for mat in self.integer_arrays():
|
||||
assert_equal(np.nanmin(mat), tgt)
|
||||
|
||||
def test_nanmax(self):
|
||||
tgt = np.max(self.mat)
|
||||
for mat in self.integer_arrays():
|
||||
assert_equal(np.nanmax(mat), tgt)
|
||||
|
||||
def test_nanargmin(self):
|
||||
tgt = np.argmin(self.mat)
|
||||
for mat in self.integer_arrays():
|
||||
assert_equal(np.nanargmin(mat), tgt)
|
||||
|
||||
def test_nanargmax(self):
|
||||
tgt = np.argmax(self.mat)
|
||||
for mat in self.integer_arrays():
|
||||
assert_equal(np.nanargmax(mat), tgt)
|
||||
|
||||
def test_nansum(self):
|
||||
tgt = np.sum(self.mat)
|
||||
for mat in self.integer_arrays():
|
||||
assert_equal(np.nansum(mat), tgt)
|
||||
|
||||
def test_nanprod(self):
|
||||
tgt = np.prod(self.mat)
|
||||
for mat in self.integer_arrays():
|
||||
assert_equal(np.nanprod(mat), tgt)
|
||||
|
||||
def test_nancumsum(self):
|
||||
tgt = np.cumsum(self.mat)
|
||||
for mat in self.integer_arrays():
|
||||
assert_equal(np.nancumsum(mat), tgt)
|
||||
|
||||
def test_nancumprod(self):
|
||||
tgt = np.cumprod(self.mat)
|
||||
for mat in self.integer_arrays():
|
||||
assert_equal(np.nancumprod(mat), tgt)
|
||||
|
||||
def test_nanmean(self):
|
||||
tgt = np.mean(self.mat)
|
||||
for mat in self.integer_arrays():
|
||||
assert_equal(np.nanmean(mat), tgt)
|
||||
|
||||
def test_nanvar(self):
|
||||
tgt = np.var(self.mat)
|
||||
for mat in self.integer_arrays():
|
||||
assert_equal(np.nanvar(mat), tgt)
|
||||
|
||||
tgt = np.var(mat, ddof=1)
|
||||
for mat in self.integer_arrays():
|
||||
assert_equal(np.nanvar(mat, ddof=1), tgt)
|
||||
|
||||
def test_nanstd(self):
|
||||
tgt = np.std(self.mat)
|
||||
for mat in self.integer_arrays():
|
||||
assert_equal(np.nanstd(mat), tgt)
|
||||
|
||||
tgt = np.std(self.mat, ddof=1)
|
||||
for mat in self.integer_arrays():
|
||||
assert_equal(np.nanstd(mat, ddof=1), tgt)
|
||||
|
||||
|
||||
class SharedNanFunctionsTestsMixin:
|
||||
def test_mutation(self):
|
||||
# Check that passed array is not modified.
|
||||
ndat = _ndat.copy()
|
||||
for f in self.nanfuncs:
|
||||
f(ndat)
|
||||
assert_equal(ndat, _ndat)
|
||||
|
||||
def test_keepdims(self):
|
||||
mat = np.eye(3)
|
||||
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
|
||||
for axis in [None, 0, 1]:
|
||||
tgt = rf(mat, axis=axis, keepdims=True)
|
||||
res = nf(mat, axis=axis, keepdims=True)
|
||||
assert_(res.ndim == tgt.ndim)
|
||||
|
||||
def test_out(self):
|
||||
mat = np.eye(3)
|
||||
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
|
||||
resout = np.zeros(3)
|
||||
tgt = rf(mat, axis=1)
|
||||
res = nf(mat, axis=1, out=resout)
|
||||
assert_almost_equal(res, resout)
|
||||
assert_almost_equal(res, tgt)
|
||||
|
||||
def test_dtype_from_dtype(self):
|
||||
mat = np.eye(3)
|
||||
codes = 'efdgFDG'
|
||||
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
|
||||
for c in codes:
|
||||
with suppress_warnings() as sup:
|
||||
if nf in {np.nanstd, np.nanvar} and c in 'FDG':
|
||||
# Giving the warning is a small bug, see gh-8000
|
||||
sup.filter(np.ComplexWarning)
|
||||
tgt = rf(mat, dtype=np.dtype(c), axis=1).dtype.type
|
||||
res = nf(mat, dtype=np.dtype(c), axis=1).dtype.type
|
||||
assert_(res is tgt)
|
||||
# scalar case
|
||||
tgt = rf(mat, dtype=np.dtype(c), axis=None).dtype.type
|
||||
res = nf(mat, dtype=np.dtype(c), axis=None).dtype.type
|
||||
assert_(res is tgt)
|
||||
|
||||
def test_dtype_from_char(self):
|
||||
mat = np.eye(3)
|
||||
codes = 'efdgFDG'
|
||||
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
|
||||
for c in codes:
|
||||
with suppress_warnings() as sup:
|
||||
if nf in {np.nanstd, np.nanvar} and c in 'FDG':
|
||||
# Giving the warning is a small bug, see gh-8000
|
||||
sup.filter(np.ComplexWarning)
|
||||
tgt = rf(mat, dtype=c, axis=1).dtype.type
|
||||
res = nf(mat, dtype=c, axis=1).dtype.type
|
||||
assert_(res is tgt)
|
||||
# scalar case
|
||||
tgt = rf(mat, dtype=c, axis=None).dtype.type
|
||||
res = nf(mat, dtype=c, axis=None).dtype.type
|
||||
assert_(res is tgt)
|
||||
|
||||
def test_dtype_from_input(self):
|
||||
codes = 'efdgFDG'
|
||||
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
|
||||
for c in codes:
|
||||
mat = np.eye(3, dtype=c)
|
||||
tgt = rf(mat, axis=1).dtype.type
|
||||
res = nf(mat, axis=1).dtype.type
|
||||
assert_(res is tgt, "res %s, tgt %s" % (res, tgt))
|
||||
# scalar case
|
||||
tgt = rf(mat, axis=None).dtype.type
|
||||
res = nf(mat, axis=None).dtype.type
|
||||
assert_(res is tgt)
|
||||
|
||||
def test_result_values(self):
|
||||
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
|
||||
tgt = [rf(d) for d in _rdat]
|
||||
res = nf(_ndat, axis=1)
|
||||
assert_almost_equal(res, tgt)
|
||||
|
||||
def test_scalar(self):
|
||||
for f in self.nanfuncs:
|
||||
assert_(f(0.) == 0.)
|
||||
|
||||
def test_subclass(self):
|
||||
class MyNDArray(np.ndarray):
|
||||
pass
|
||||
|
||||
# Check that it works and that type and
|
||||
# shape are preserved
|
||||
array = np.eye(3)
|
||||
mine = array.view(MyNDArray)
|
||||
for f in self.nanfuncs:
|
||||
expected_shape = f(array, axis=0).shape
|
||||
res = f(mine, axis=0)
|
||||
assert_(isinstance(res, MyNDArray))
|
||||
assert_(res.shape == expected_shape)
|
||||
expected_shape = f(array, axis=1).shape
|
||||
res = f(mine, axis=1)
|
||||
assert_(isinstance(res, MyNDArray))
|
||||
assert_(res.shape == expected_shape)
|
||||
expected_shape = f(array).shape
|
||||
res = f(mine)
|
||||
assert_(isinstance(res, MyNDArray))
|
||||
assert_(res.shape == expected_shape)
|
||||
|
||||
|
||||
class TestNanFunctions_SumProd(SharedNanFunctionsTestsMixin):
|
||||
|
||||
nanfuncs = [np.nansum, np.nanprod]
|
||||
stdfuncs = [np.sum, np.prod]
|
||||
|
||||
def test_allnans(self):
|
||||
# Check for FutureWarning
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
warnings.simplefilter('always')
|
||||
res = np.nansum([np.nan]*3, axis=None)
|
||||
assert_(res == 0, 'result is not 0')
|
||||
assert_(len(w) == 0, 'warning raised')
|
||||
# Check scalar
|
||||
res = np.nansum(np.nan)
|
||||
assert_(res == 0, 'result is not 0')
|
||||
assert_(len(w) == 0, 'warning raised')
|
||||
# Check there is no warning for not all-nan
|
||||
np.nansum([0]*3, axis=None)
|
||||
assert_(len(w) == 0, 'unwanted warning raised')
|
||||
|
||||
def test_empty(self):
|
||||
for f, tgt_value in zip([np.nansum, np.nanprod], [0, 1]):
|
||||
mat = np.zeros((0, 3))
|
||||
tgt = [tgt_value]*3
|
||||
res = f(mat, axis=0)
|
||||
assert_equal(res, tgt)
|
||||
tgt = []
|
||||
res = f(mat, axis=1)
|
||||
assert_equal(res, tgt)
|
||||
tgt = tgt_value
|
||||
res = f(mat, axis=None)
|
||||
assert_equal(res, tgt)
|
||||
|
||||
|
||||
class TestNanFunctions_CumSumProd(SharedNanFunctionsTestsMixin):
|
||||
|
||||
nanfuncs = [np.nancumsum, np.nancumprod]
|
||||
stdfuncs = [np.cumsum, np.cumprod]
|
||||
|
||||
def test_allnans(self):
|
||||
for f, tgt_value in zip(self.nanfuncs, [0, 1]):
|
||||
# Unlike other nan-functions, sum/prod/cumsum/cumprod don't warn on all nan input
|
||||
with assert_no_warnings():
|
||||
res = f([np.nan]*3, axis=None)
|
||||
tgt = tgt_value*np.ones((3))
|
||||
assert_(np.array_equal(res, tgt), 'result is not %s * np.ones((3))' % (tgt_value))
|
||||
# Check scalar
|
||||
res = f(np.nan)
|
||||
tgt = tgt_value*np.ones((1))
|
||||
assert_(np.array_equal(res, tgt), 'result is not %s * np.ones((1))' % (tgt_value))
|
||||
# Check there is no warning for not all-nan
|
||||
f([0]*3, axis=None)
|
||||
|
||||
def test_empty(self):
|
||||
for f, tgt_value in zip(self.nanfuncs, [0, 1]):
|
||||
mat = np.zeros((0, 3))
|
||||
tgt = tgt_value*np.ones((0, 3))
|
||||
res = f(mat, axis=0)
|
||||
assert_equal(res, tgt)
|
||||
tgt = mat
|
||||
res = f(mat, axis=1)
|
||||
assert_equal(res, tgt)
|
||||
tgt = np.zeros((0))
|
||||
res = f(mat, axis=None)
|
||||
assert_equal(res, tgt)
|
||||
|
||||
def test_keepdims(self):
|
||||
for f, g in zip(self.nanfuncs, self.stdfuncs):
|
||||
mat = np.eye(3)
|
||||
for axis in [None, 0, 1]:
|
||||
tgt = f(mat, axis=axis, out=None)
|
||||
res = g(mat, axis=axis, out=None)
|
||||
assert_(res.ndim == tgt.ndim)
|
||||
|
||||
for f in self.nanfuncs:
|
||||
d = np.ones((3, 5, 7, 11))
|
||||
# Randomly set some elements to NaN:
|
||||
rs = np.random.RandomState(0)
|
||||
d[rs.rand(*d.shape) < 0.5] = np.nan
|
||||
res = f(d, axis=None)
|
||||
assert_equal(res.shape, (1155,))
|
||||
for axis in np.arange(4):
|
||||
res = f(d, axis=axis)
|
||||
assert_equal(res.shape, (3, 5, 7, 11))
|
||||
|
||||
def test_result_values(self):
|
||||
for axis in (-2, -1, 0, 1, None):
|
||||
tgt = np.cumprod(_ndat_ones, axis=axis)
|
||||
res = np.nancumprod(_ndat, axis=axis)
|
||||
assert_almost_equal(res, tgt)
|
||||
tgt = np.cumsum(_ndat_zeros,axis=axis)
|
||||
res = np.nancumsum(_ndat, axis=axis)
|
||||
assert_almost_equal(res, tgt)
|
||||
|
||||
def test_out(self):
|
||||
mat = np.eye(3)
|
||||
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
|
||||
resout = np.eye(3)
|
||||
for axis in (-2, -1, 0, 1):
|
||||
tgt = rf(mat, axis=axis)
|
||||
res = nf(mat, axis=axis, out=resout)
|
||||
assert_almost_equal(res, resout)
|
||||
assert_almost_equal(res, tgt)
|
||||
|
||||
|
||||
class TestNanFunctions_MeanVarStd(SharedNanFunctionsTestsMixin):
|
||||
|
||||
nanfuncs = [np.nanmean, np.nanvar, np.nanstd]
|
||||
stdfuncs = [np.mean, np.var, np.std]
|
||||
|
||||
def test_dtype_error(self):
|
||||
for f in self.nanfuncs:
|
||||
for dtype in [np.bool_, np.int_, np.object_]:
|
||||
assert_raises(TypeError, f, _ndat, axis=1, dtype=dtype)
|
||||
|
||||
def test_out_dtype_error(self):
|
||||
for f in self.nanfuncs:
|
||||
for dtype in [np.bool_, np.int_, np.object_]:
|
||||
out = np.empty(_ndat.shape[0], dtype=dtype)
|
||||
assert_raises(TypeError, f, _ndat, axis=1, out=out)
|
||||
|
||||
def test_ddof(self):
|
||||
nanfuncs = [np.nanvar, np.nanstd]
|
||||
stdfuncs = [np.var, np.std]
|
||||
for nf, rf in zip(nanfuncs, stdfuncs):
|
||||
for ddof in [0, 1]:
|
||||
tgt = [rf(d, ddof=ddof) for d in _rdat]
|
||||
res = nf(_ndat, axis=1, ddof=ddof)
|
||||
assert_almost_equal(res, tgt)
|
||||
|
||||
def test_ddof_too_big(self):
|
||||
nanfuncs = [np.nanvar, np.nanstd]
|
||||
stdfuncs = [np.var, np.std]
|
||||
dsize = [len(d) for d in _rdat]
|
||||
for nf, rf in zip(nanfuncs, stdfuncs):
|
||||
for ddof in range(5):
|
||||
with suppress_warnings() as sup:
|
||||
sup.record(RuntimeWarning)
|
||||
sup.filter(np.ComplexWarning)
|
||||
tgt = [ddof >= d for d in dsize]
|
||||
res = nf(_ndat, axis=1, ddof=ddof)
|
||||
assert_equal(np.isnan(res), tgt)
|
||||
if any(tgt):
|
||||
assert_(len(sup.log) == 1)
|
||||
else:
|
||||
assert_(len(sup.log) == 0)
|
||||
|
||||
def test_allnans(self):
|
||||
mat = np.array([np.nan]*9).reshape(3, 3)
|
||||
for f in self.nanfuncs:
|
||||
for axis in [None, 0, 1]:
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
warnings.simplefilter('always')
|
||||
assert_(np.isnan(f(mat, axis=axis)).all())
|
||||
assert_(len(w) == 1)
|
||||
assert_(issubclass(w[0].category, RuntimeWarning))
|
||||
# Check scalar
|
||||
assert_(np.isnan(f(np.nan)))
|
||||
assert_(len(w) == 2)
|
||||
assert_(issubclass(w[0].category, RuntimeWarning))
|
||||
|
||||
def test_empty(self):
|
||||
mat = np.zeros((0, 3))
|
||||
for f in self.nanfuncs:
|
||||
for axis in [0, None]:
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
warnings.simplefilter('always')
|
||||
assert_(np.isnan(f(mat, axis=axis)).all())
|
||||
assert_(len(w) == 1)
|
||||
assert_(issubclass(w[0].category, RuntimeWarning))
|
||||
for axis in [1]:
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
warnings.simplefilter('always')
|
||||
assert_equal(f(mat, axis=axis), np.zeros([]))
|
||||
assert_(len(w) == 0)
|
||||
|
||||
|
||||
class TestNanFunctions_Median:
|
||||
|
||||
def test_mutation(self):
|
||||
# Check that passed array is not modified.
|
||||
ndat = _ndat.copy()
|
||||
np.nanmedian(ndat)
|
||||
assert_equal(ndat, _ndat)
|
||||
|
||||
def test_keepdims(self):
|
||||
mat = np.eye(3)
|
||||
for axis in [None, 0, 1]:
|
||||
tgt = np.median(mat, axis=axis, out=None, overwrite_input=False)
|
||||
res = np.nanmedian(mat, axis=axis, out=None, overwrite_input=False)
|
||||
assert_(res.ndim == tgt.ndim)
|
||||
|
||||
d = np.ones((3, 5, 7, 11))
|
||||
# Randomly set some elements to NaN:
|
||||
w = np.random.random((4, 200)) * np.array(d.shape)[:, None]
|
||||
w = w.astype(np.intp)
|
||||
d[tuple(w)] = np.nan
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(RuntimeWarning)
|
||||
res = np.nanmedian(d, axis=None, keepdims=True)
|
||||
assert_equal(res.shape, (1, 1, 1, 1))
|
||||
res = np.nanmedian(d, axis=(0, 1), keepdims=True)
|
||||
assert_equal(res.shape, (1, 1, 7, 11))
|
||||
res = np.nanmedian(d, axis=(0, 3), keepdims=True)
|
||||
assert_equal(res.shape, (1, 5, 7, 1))
|
||||
res = np.nanmedian(d, axis=(1,), keepdims=True)
|
||||
assert_equal(res.shape, (3, 1, 7, 11))
|
||||
res = np.nanmedian(d, axis=(0, 1, 2, 3), keepdims=True)
|
||||
assert_equal(res.shape, (1, 1, 1, 1))
|
||||
res = np.nanmedian(d, axis=(0, 1, 3), keepdims=True)
|
||||
assert_equal(res.shape, (1, 1, 7, 1))
|
||||
|
||||
def test_out(self):
|
||||
mat = np.random.rand(3, 3)
|
||||
nan_mat = np.insert(mat, [0, 2], np.nan, axis=1)
|
||||
resout = np.zeros(3)
|
||||
tgt = np.median(mat, axis=1)
|
||||
res = np.nanmedian(nan_mat, axis=1, out=resout)
|
||||
assert_almost_equal(res, resout)
|
||||
assert_almost_equal(res, tgt)
|
||||
# 0-d output:
|
||||
resout = np.zeros(())
|
||||
tgt = np.median(mat, axis=None)
|
||||
res = np.nanmedian(nan_mat, axis=None, out=resout)
|
||||
assert_almost_equal(res, resout)
|
||||
assert_almost_equal(res, tgt)
|
||||
res = np.nanmedian(nan_mat, axis=(0, 1), out=resout)
|
||||
assert_almost_equal(res, resout)
|
||||
assert_almost_equal(res, tgt)
|
||||
|
||||
def test_small_large(self):
|
||||
# test the small and large code paths, current cutoff 400 elements
|
||||
for s in [5, 20, 51, 200, 1000]:
|
||||
d = np.random.randn(4, s)
|
||||
# Randomly set some elements to NaN:
|
||||
w = np.random.randint(0, d.size, size=d.size // 5)
|
||||
d.ravel()[w] = np.nan
|
||||
d[:,0] = 1. # ensure at least one good value
|
||||
# use normal median without nans to compare
|
||||
tgt = []
|
||||
for x in d:
|
||||
nonan = np.compress(~np.isnan(x), x)
|
||||
tgt.append(np.median(nonan, overwrite_input=True))
|
||||
|
||||
assert_array_equal(np.nanmedian(d, axis=-1), tgt)
|
||||
|
||||
def test_result_values(self):
|
||||
tgt = [np.median(d) for d in _rdat]
|
||||
res = np.nanmedian(_ndat, axis=1)
|
||||
assert_almost_equal(res, tgt)
|
||||
|
||||
def test_allnans(self):
|
||||
mat = np.array([np.nan]*9).reshape(3, 3)
|
||||
for axis in [None, 0, 1]:
|
||||
with suppress_warnings() as sup:
|
||||
sup.record(RuntimeWarning)
|
||||
|
||||
assert_(np.isnan(np.nanmedian(mat, axis=axis)).all())
|
||||
if axis is None:
|
||||
assert_(len(sup.log) == 1)
|
||||
else:
|
||||
assert_(len(sup.log) == 3)
|
||||
# Check scalar
|
||||
assert_(np.isnan(np.nanmedian(np.nan)))
|
||||
if axis is None:
|
||||
assert_(len(sup.log) == 2)
|
||||
else:
|
||||
assert_(len(sup.log) == 4)
|
||||
|
||||
def test_empty(self):
|
||||
mat = np.zeros((0, 3))
|
||||
for axis in [0, None]:
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
warnings.simplefilter('always')
|
||||
assert_(np.isnan(np.nanmedian(mat, axis=axis)).all())
|
||||
assert_(len(w) == 1)
|
||||
assert_(issubclass(w[0].category, RuntimeWarning))
|
||||
for axis in [1]:
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
warnings.simplefilter('always')
|
||||
assert_equal(np.nanmedian(mat, axis=axis), np.zeros([]))
|
||||
assert_(len(w) == 0)
|
||||
|
||||
def test_scalar(self):
|
||||
assert_(np.nanmedian(0.) == 0.)
|
||||
|
||||
def test_extended_axis_invalid(self):
|
||||
d = np.ones((3, 5, 7, 11))
|
||||
assert_raises(np.AxisError, np.nanmedian, d, axis=-5)
|
||||
assert_raises(np.AxisError, np.nanmedian, d, axis=(0, -5))
|
||||
assert_raises(np.AxisError, np.nanmedian, d, axis=4)
|
||||
assert_raises(np.AxisError, np.nanmedian, d, axis=(0, 4))
|
||||
assert_raises(ValueError, np.nanmedian, d, axis=(1, 1))
|
||||
|
||||
def test_float_special(self):
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(RuntimeWarning)
|
||||
for inf in [np.inf, -np.inf]:
|
||||
a = np.array([[inf, np.nan], [np.nan, np.nan]])
|
||||
assert_equal(np.nanmedian(a, axis=0), [inf, np.nan])
|
||||
assert_equal(np.nanmedian(a, axis=1), [inf, np.nan])
|
||||
assert_equal(np.nanmedian(a), inf)
|
||||
|
||||
# minimum fill value check
|
||||
a = np.array([[np.nan, np.nan, inf],
|
||||
[np.nan, np.nan, inf]])
|
||||
assert_equal(np.nanmedian(a), inf)
|
||||
assert_equal(np.nanmedian(a, axis=0), [np.nan, np.nan, inf])
|
||||
assert_equal(np.nanmedian(a, axis=1), inf)
|
||||
|
||||
# no mask path
|
||||
a = np.array([[inf, inf], [inf, inf]])
|
||||
assert_equal(np.nanmedian(a, axis=1), inf)
|
||||
|
||||
a = np.array([[inf, 7, -inf, -9],
|
||||
[-10, np.nan, np.nan, 5],
|
||||
[4, np.nan, np.nan, inf]],
|
||||
dtype=np.float32)
|
||||
if inf > 0:
|
||||
assert_equal(np.nanmedian(a, axis=0), [4., 7., -inf, 5.])
|
||||
assert_equal(np.nanmedian(a), 4.5)
|
||||
else:
|
||||
assert_equal(np.nanmedian(a, axis=0), [-10., 7., -inf, -9.])
|
||||
assert_equal(np.nanmedian(a), -2.5)
|
||||
assert_equal(np.nanmedian(a, axis=-1), [-1., -2.5, inf])
|
||||
|
||||
for i in range(0, 10):
|
||||
for j in range(1, 10):
|
||||
a = np.array([([np.nan] * i) + ([inf] * j)] * 2)
|
||||
assert_equal(np.nanmedian(a), inf)
|
||||
assert_equal(np.nanmedian(a, axis=1), inf)
|
||||
assert_equal(np.nanmedian(a, axis=0),
|
||||
([np.nan] * i) + [inf] * j)
|
||||
|
||||
a = np.array([([np.nan] * i) + ([-inf] * j)] * 2)
|
||||
assert_equal(np.nanmedian(a), -inf)
|
||||
assert_equal(np.nanmedian(a, axis=1), -inf)
|
||||
assert_equal(np.nanmedian(a, axis=0),
|
||||
([np.nan] * i) + [-inf] * j)
|
||||
|
||||
|
||||
class TestNanFunctions_Percentile:
|
||||
|
||||
def test_mutation(self):
|
||||
# Check that passed array is not modified.
|
||||
ndat = _ndat.copy()
|
||||
np.nanpercentile(ndat, 30)
|
||||
assert_equal(ndat, _ndat)
|
||||
|
||||
def test_keepdims(self):
|
||||
mat = np.eye(3)
|
||||
for axis in [None, 0, 1]:
|
||||
tgt = np.percentile(mat, 70, axis=axis, out=None,
|
||||
overwrite_input=False)
|
||||
res = np.nanpercentile(mat, 70, axis=axis, out=None,
|
||||
overwrite_input=False)
|
||||
assert_(res.ndim == tgt.ndim)
|
||||
|
||||
d = np.ones((3, 5, 7, 11))
|
||||
# Randomly set some elements to NaN:
|
||||
w = np.random.random((4, 200)) * np.array(d.shape)[:, None]
|
||||
w = w.astype(np.intp)
|
||||
d[tuple(w)] = np.nan
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(RuntimeWarning)
|
||||
res = np.nanpercentile(d, 90, axis=None, keepdims=True)
|
||||
assert_equal(res.shape, (1, 1, 1, 1))
|
||||
res = np.nanpercentile(d, 90, axis=(0, 1), keepdims=True)
|
||||
assert_equal(res.shape, (1, 1, 7, 11))
|
||||
res = np.nanpercentile(d, 90, axis=(0, 3), keepdims=True)
|
||||
assert_equal(res.shape, (1, 5, 7, 1))
|
||||
res = np.nanpercentile(d, 90, axis=(1,), keepdims=True)
|
||||
assert_equal(res.shape, (3, 1, 7, 11))
|
||||
res = np.nanpercentile(d, 90, axis=(0, 1, 2, 3), keepdims=True)
|
||||
assert_equal(res.shape, (1, 1, 1, 1))
|
||||
res = np.nanpercentile(d, 90, axis=(0, 1, 3), keepdims=True)
|
||||
assert_equal(res.shape, (1, 1, 7, 1))
|
||||
|
||||
def test_out(self):
|
||||
mat = np.random.rand(3, 3)
|
||||
nan_mat = np.insert(mat, [0, 2], np.nan, axis=1)
|
||||
resout = np.zeros(3)
|
||||
tgt = np.percentile(mat, 42, axis=1)
|
||||
res = np.nanpercentile(nan_mat, 42, axis=1, out=resout)
|
||||
assert_almost_equal(res, resout)
|
||||
assert_almost_equal(res, tgt)
|
||||
# 0-d output:
|
||||
resout = np.zeros(())
|
||||
tgt = np.percentile(mat, 42, axis=None)
|
||||
res = np.nanpercentile(nan_mat, 42, axis=None, out=resout)
|
||||
assert_almost_equal(res, resout)
|
||||
assert_almost_equal(res, tgt)
|
||||
res = np.nanpercentile(nan_mat, 42, axis=(0, 1), out=resout)
|
||||
assert_almost_equal(res, resout)
|
||||
assert_almost_equal(res, tgt)
|
||||
|
||||
def test_result_values(self):
|
||||
tgt = [np.percentile(d, 28) for d in _rdat]
|
||||
res = np.nanpercentile(_ndat, 28, axis=1)
|
||||
assert_almost_equal(res, tgt)
|
||||
# Transpose the array to fit the output convention of numpy.percentile
|
||||
tgt = np.transpose([np.percentile(d, (28, 98)) for d in _rdat])
|
||||
res = np.nanpercentile(_ndat, (28, 98), axis=1)
|
||||
assert_almost_equal(res, tgt)
|
||||
|
||||
def test_allnans(self):
|
||||
mat = np.array([np.nan]*9).reshape(3, 3)
|
||||
for axis in [None, 0, 1]:
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
warnings.simplefilter('always')
|
||||
assert_(np.isnan(np.nanpercentile(mat, 60, axis=axis)).all())
|
||||
if axis is None:
|
||||
assert_(len(w) == 1)
|
||||
else:
|
||||
assert_(len(w) == 3)
|
||||
assert_(issubclass(w[0].category, RuntimeWarning))
|
||||
# Check scalar
|
||||
assert_(np.isnan(np.nanpercentile(np.nan, 60)))
|
||||
if axis is None:
|
||||
assert_(len(w) == 2)
|
||||
else:
|
||||
assert_(len(w) == 4)
|
||||
assert_(issubclass(w[0].category, RuntimeWarning))
|
||||
|
||||
def test_empty(self):
|
||||
mat = np.zeros((0, 3))
|
||||
for axis in [0, None]:
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
warnings.simplefilter('always')
|
||||
assert_(np.isnan(np.nanpercentile(mat, 40, axis=axis)).all())
|
||||
assert_(len(w) == 1)
|
||||
assert_(issubclass(w[0].category, RuntimeWarning))
|
||||
for axis in [1]:
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
warnings.simplefilter('always')
|
||||
assert_equal(np.nanpercentile(mat, 40, axis=axis), np.zeros([]))
|
||||
assert_(len(w) == 0)
|
||||
|
||||
def test_scalar(self):
|
||||
assert_equal(np.nanpercentile(0., 100), 0.)
|
||||
a = np.arange(6)
|
||||
r = np.nanpercentile(a, 50, axis=0)
|
||||
assert_equal(r, 2.5)
|
||||
assert_(np.isscalar(r))
|
||||
|
||||
def test_extended_axis_invalid(self):
|
||||
d = np.ones((3, 5, 7, 11))
|
||||
assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=-5)
|
||||
assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=(0, -5))
|
||||
assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=4)
|
||||
assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=(0, 4))
|
||||
assert_raises(ValueError, np.nanpercentile, d, q=5, axis=(1, 1))
|
||||
|
||||
def test_multiple_percentiles(self):
|
||||
perc = [50, 100]
|
||||
mat = np.ones((4, 3))
|
||||
nan_mat = np.nan * mat
|
||||
# For checking consistency in higher dimensional case
|
||||
large_mat = np.ones((3, 4, 5))
|
||||
large_mat[:, 0:2:4, :] = 0
|
||||
large_mat[:, :, 3:] *= 2
|
||||
for axis in [None, 0, 1]:
|
||||
for keepdim in [False, True]:
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(RuntimeWarning, "All-NaN slice encountered")
|
||||
val = np.percentile(mat, perc, axis=axis, keepdims=keepdim)
|
||||
nan_val = np.nanpercentile(nan_mat, perc, axis=axis,
|
||||
keepdims=keepdim)
|
||||
assert_equal(nan_val.shape, val.shape)
|
||||
|
||||
val = np.percentile(large_mat, perc, axis=axis,
|
||||
keepdims=keepdim)
|
||||
nan_val = np.nanpercentile(large_mat, perc, axis=axis,
|
||||
keepdims=keepdim)
|
||||
assert_equal(nan_val, val)
|
||||
|
||||
megamat = np.ones((3, 4, 5, 6))
|
||||
assert_equal(np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6))
|
||||
|
||||
|
||||
class TestNanFunctions_Quantile:
|
||||
# most of this is already tested by TestPercentile
|
||||
|
||||
def test_regression(self):
|
||||
ar = np.arange(24).reshape(2, 3, 4).astype(float)
|
||||
ar[0][1] = np.nan
|
||||
|
||||
assert_equal(np.nanquantile(ar, q=0.5), np.nanpercentile(ar, q=50))
|
||||
assert_equal(np.nanquantile(ar, q=0.5, axis=0),
|
||||
np.nanpercentile(ar, q=50, axis=0))
|
||||
assert_equal(np.nanquantile(ar, q=0.5, axis=1),
|
||||
np.nanpercentile(ar, q=50, axis=1))
|
||||
assert_equal(np.nanquantile(ar, q=[0.5], axis=1),
|
||||
np.nanpercentile(ar, q=[50], axis=1))
|
||||
assert_equal(np.nanquantile(ar, q=[0.25, 0.5, 0.75], axis=1),
|
||||
np.nanpercentile(ar, q=[25, 50, 75], axis=1))
|
||||
|
||||
def test_basic(self):
|
||||
x = np.arange(8) * 0.5
|
||||
assert_equal(np.nanquantile(x, 0), 0.)
|
||||
assert_equal(np.nanquantile(x, 1), 3.5)
|
||||
assert_equal(np.nanquantile(x, 0.5), 1.75)
|
||||
|
||||
def test_no_p_overwrite(self):
|
||||
# this is worth retesting, because quantile does not make a copy
|
||||
p0 = np.array([0, 0.75, 0.25, 0.5, 1.0])
|
||||
p = p0.copy()
|
||||
np.nanquantile(np.arange(100.), p, interpolation="midpoint")
|
||||
assert_array_equal(p, p0)
|
||||
|
||||
p0 = p0.tolist()
|
||||
p = p.tolist()
|
||||
np.nanquantile(np.arange(100.), p, interpolation="midpoint")
|
||||
assert_array_equal(p, p0)
|
||||
|
||||
@pytest.mark.parametrize("arr, expected", [
|
||||
# array of floats with some nans
|
||||
(np.array([np.nan, 5.0, np.nan, np.inf]),
|
||||
np.array([False, True, False, True])),
|
||||
# int64 array that can't possibly have nans
|
||||
(np.array([1, 5, 7, 9], dtype=np.int64),
|
||||
True),
|
||||
# bool array that can't possibly have nans
|
||||
(np.array([False, True, False, True]),
|
||||
True),
|
||||
# 2-D complex array with nans
|
||||
(np.array([[np.nan, 5.0],
|
||||
[np.nan, np.inf]], dtype=np.complex64),
|
||||
np.array([[False, True],
|
||||
[False, True]])),
|
||||
])
|
||||
def test__nan_mask(arr, expected):
|
||||
for out in [None, np.empty(arr.shape, dtype=np.bool_)]:
|
||||
actual = _nan_mask(arr, out=out)
|
||||
assert_equal(actual, expected)
|
||||
# the above won't distinguish between True proper
|
||||
# and an array of True values; we want True proper
|
||||
# for types that can't possibly contain NaN
|
||||
if type(expected) is not np.ndarray:
|
||||
assert actual is True
|
||||
|
||||
|
||||
def test__replace_nan():
|
||||
""" Test that _replace_nan returns the original array if there are no
|
||||
NaNs, not a copy.
|
||||
"""
|
||||
for dtype in [np.bool, np.int32, np.int64]:
|
||||
arr = np.array([0, 1], dtype=dtype)
|
||||
result, mask = _replace_nan(arr, 0)
|
||||
assert mask is None
|
||||
# do not make a copy if there are no nans
|
||||
assert result is arr
|
||||
|
||||
for dtype in [np.float32, np.float64]:
|
||||
arr = np.array([0, 1], dtype=dtype)
|
||||
result, mask = _replace_nan(arr, 2)
|
||||
assert (mask == False).all()
|
||||
# mask is not None, so we make a copy
|
||||
assert result is not arr
|
||||
assert_equal(result, arr)
|
||||
|
||||
arr_nan = np.array([0, 1, np.nan], dtype=dtype)
|
||||
result_nan, mask_nan = _replace_nan(arr_nan, 2)
|
||||
assert_equal(mask_nan, np.array([False, False, True]))
|
||||
assert result_nan is not arr_nan
|
||||
assert_equal(result_nan, np.array([0, 1, 2]))
|
||||
assert np.isnan(arr_nan[-1])
|
||||
376
venv/Lib/site-packages/numpy/lib/tests/test_packbits.py
Normal file
376
venv/Lib/site-packages/numpy/lib/tests/test_packbits.py
Normal file
|
|
@ -0,0 +1,376 @@
|
|||
import numpy as np
|
||||
from numpy.testing import assert_array_equal, assert_equal, assert_raises
|
||||
import pytest
|
||||
from itertools import chain
|
||||
|
||||
def test_packbits():
|
||||
# Copied from the docstring.
|
||||
a = [[[1, 0, 1], [0, 1, 0]],
|
||||
[[1, 1, 0], [0, 0, 1]]]
|
||||
for dt in '?bBhHiIlLqQ':
|
||||
arr = np.array(a, dtype=dt)
|
||||
b = np.packbits(arr, axis=-1)
|
||||
assert_equal(b.dtype, np.uint8)
|
||||
assert_array_equal(b, np.array([[[160], [64]], [[192], [32]]]))
|
||||
|
||||
assert_raises(TypeError, np.packbits, np.array(a, dtype=float))
|
||||
|
||||
|
||||
def test_packbits_empty():
|
||||
shapes = [
|
||||
(0,), (10, 20, 0), (10, 0, 20), (0, 10, 20), (20, 0, 0), (0, 20, 0),
|
||||
(0, 0, 20), (0, 0, 0),
|
||||
]
|
||||
for dt in '?bBhHiIlLqQ':
|
||||
for shape in shapes:
|
||||
a = np.empty(shape, dtype=dt)
|
||||
b = np.packbits(a)
|
||||
assert_equal(b.dtype, np.uint8)
|
||||
assert_equal(b.shape, (0,))
|
||||
|
||||
|
||||
def test_packbits_empty_with_axis():
|
||||
# Original shapes and lists of packed shapes for different axes.
|
||||
shapes = [
|
||||
((0,), [(0,)]),
|
||||
((10, 20, 0), [(2, 20, 0), (10, 3, 0), (10, 20, 0)]),
|
||||
((10, 0, 20), [(2, 0, 20), (10, 0, 20), (10, 0, 3)]),
|
||||
((0, 10, 20), [(0, 10, 20), (0, 2, 20), (0, 10, 3)]),
|
||||
((20, 0, 0), [(3, 0, 0), (20, 0, 0), (20, 0, 0)]),
|
||||
((0, 20, 0), [(0, 20, 0), (0, 3, 0), (0, 20, 0)]),
|
||||
((0, 0, 20), [(0, 0, 20), (0, 0, 20), (0, 0, 3)]),
|
||||
((0, 0, 0), [(0, 0, 0), (0, 0, 0), (0, 0, 0)]),
|
||||
]
|
||||
for dt in '?bBhHiIlLqQ':
|
||||
for in_shape, out_shapes in shapes:
|
||||
for ax, out_shape in enumerate(out_shapes):
|
||||
a = np.empty(in_shape, dtype=dt)
|
||||
b = np.packbits(a, axis=ax)
|
||||
assert_equal(b.dtype, np.uint8)
|
||||
assert_equal(b.shape, out_shape)
|
||||
|
||||
@pytest.mark.parametrize('bitorder', ('little', 'big'))
|
||||
def test_packbits_large(bitorder):
|
||||
# test data large enough for 16 byte vectorization
|
||||
a = np.array([1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0,
|
||||
0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1,
|
||||
1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0,
|
||||
1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1,
|
||||
1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1,
|
||||
1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1,
|
||||
1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1,
|
||||
0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0,
|
||||
1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1,
|
||||
1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0,
|
||||
0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1,
|
||||
1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0,
|
||||
1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0,
|
||||
1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0])
|
||||
a = a.repeat(3)
|
||||
for dtype in '?bBhHiIlLqQ':
|
||||
arr = np.array(a, dtype=dtype)
|
||||
b = np.packbits(arr, axis=None, bitorder=bitorder)
|
||||
assert_equal(b.dtype, np.uint8)
|
||||
r = [252, 127, 192, 3, 254, 7, 252, 0, 7, 31, 240, 0, 28, 1, 255, 252,
|
||||
113, 248, 3, 255, 192, 28, 15, 192, 28, 126, 0, 224, 127, 255,
|
||||
227, 142, 7, 31, 142, 63, 28, 126, 56, 227, 240, 0, 227, 128, 63,
|
||||
224, 14, 56, 252, 112, 56, 255, 241, 248, 3, 240, 56, 224, 112,
|
||||
63, 255, 255, 199, 224, 14, 0, 31, 143, 192, 3, 255, 199, 0, 1,
|
||||
255, 224, 1, 255, 252, 126, 63, 0, 1, 192, 252, 14, 63, 0, 15,
|
||||
199, 252, 113, 255, 3, 128, 56, 252, 14, 7, 0, 113, 255, 255, 142, 56, 227,
|
||||
129, 248, 227, 129, 199, 31, 128]
|
||||
if bitorder == 'big':
|
||||
assert_array_equal(b, r)
|
||||
# equal for size being multiple of 8
|
||||
assert_array_equal(np.unpackbits(b, bitorder=bitorder)[:-4], a)
|
||||
|
||||
# check last byte of different remainders (16 byte vectorization)
|
||||
b = [np.packbits(arr[:-i], axis=None)[-1] for i in range(1, 16)]
|
||||
assert_array_equal(b, [128, 128, 128, 31, 30, 28, 24, 16, 0, 0, 0, 199,
|
||||
198, 196, 192])
|
||||
|
||||
|
||||
arr = arr.reshape(36, 25)
|
||||
b = np.packbits(arr, axis=0)
|
||||
assert_equal(b.dtype, np.uint8)
|
||||
assert_array_equal(b, [[190, 186, 178, 178, 150, 215, 87, 83, 83, 195,
|
||||
199, 206, 204, 204, 140, 140, 136, 136, 8, 40, 105,
|
||||
107, 75, 74, 88],
|
||||
[72, 216, 248, 241, 227, 195, 202, 90, 90, 83,
|
||||
83, 119, 127, 109, 73, 64, 208, 244, 189, 45,
|
||||
41, 104, 122, 90, 18],
|
||||
[113, 120, 248, 216, 152, 24, 60, 52, 182, 150,
|
||||
150, 150, 146, 210, 210, 246, 255, 255, 223,
|
||||
151, 21, 17, 17, 131, 163],
|
||||
[214, 210, 210, 64, 68, 5, 5, 1, 72, 88, 92,
|
||||
92, 78, 110, 39, 181, 149, 220, 222, 218, 218,
|
||||
202, 234, 170, 168],
|
||||
[0, 128, 128, 192, 80, 112, 48, 160, 160, 224,
|
||||
240, 208, 144, 128, 160, 224, 240, 208, 144,
|
||||
144, 176, 240, 224, 192, 128]])
|
||||
|
||||
b = np.packbits(arr, axis=1)
|
||||
assert_equal(b.dtype, np.uint8)
|
||||
assert_array_equal(b, [[252, 127, 192, 0],
|
||||
[ 7, 252, 15, 128],
|
||||
[240, 0, 28, 0],
|
||||
[255, 128, 0, 128],
|
||||
[192, 31, 255, 128],
|
||||
[142, 63, 0, 0],
|
||||
[255, 240, 7, 0],
|
||||
[ 7, 224, 14, 0],
|
||||
[126, 0, 224, 0],
|
||||
[255, 255, 199, 0],
|
||||
[ 56, 28, 126, 0],
|
||||
[113, 248, 227, 128],
|
||||
[227, 142, 63, 0],
|
||||
[ 0, 28, 112, 0],
|
||||
[ 15, 248, 3, 128],
|
||||
[ 28, 126, 56, 0],
|
||||
[ 56, 255, 241, 128],
|
||||
[240, 7, 224, 0],
|
||||
[227, 129, 192, 128],
|
||||
[255, 255, 254, 0],
|
||||
[126, 0, 224, 0],
|
||||
[ 3, 241, 248, 0],
|
||||
[ 0, 255, 241, 128],
|
||||
[128, 0, 255, 128],
|
||||
[224, 1, 255, 128],
|
||||
[248, 252, 126, 0],
|
||||
[ 0, 7, 3, 128],
|
||||
[224, 113, 248, 0],
|
||||
[ 0, 252, 127, 128],
|
||||
[142, 63, 224, 0],
|
||||
[224, 14, 63, 0],
|
||||
[ 7, 3, 128, 0],
|
||||
[113, 255, 255, 128],
|
||||
[ 28, 113, 199, 0],
|
||||
[ 7, 227, 142, 0],
|
||||
[ 14, 56, 252, 0]])
|
||||
|
||||
arr = arr.T.copy()
|
||||
b = np.packbits(arr, axis=0)
|
||||
assert_equal(b.dtype, np.uint8)
|
||||
assert_array_equal(b, [[252, 7, 240, 255, 192, 142, 255, 7, 126, 255,
|
||||
56, 113, 227, 0, 15, 28, 56, 240, 227, 255,
|
||||
126, 3, 0, 128, 224, 248, 0, 224, 0, 142, 224,
|
||||
7, 113, 28, 7, 14],
|
||||
[127, 252, 0, 128, 31, 63, 240, 224, 0, 255,
|
||||
28, 248, 142, 28, 248, 126, 255, 7, 129, 255,
|
||||
0, 241, 255, 0, 1, 252, 7, 113, 252, 63, 14,
|
||||
3, 255, 113, 227, 56],
|
||||
[192, 15, 28, 0, 255, 0, 7, 14, 224, 199, 126,
|
||||
227, 63, 112, 3, 56, 241, 224, 192, 254, 224,
|
||||
248, 241, 255, 255, 126, 3, 248, 127, 224, 63,
|
||||
128, 255, 199, 142, 252],
|
||||
[0, 128, 0, 128, 128, 0, 0, 0, 0, 0, 0, 128, 0,
|
||||
0, 128, 0, 128, 0, 128, 0, 0, 0, 128, 128,
|
||||
128, 0, 128, 0, 128, 0, 0, 0, 128, 0, 0, 0]])
|
||||
|
||||
b = np.packbits(arr, axis=1)
|
||||
assert_equal(b.dtype, np.uint8)
|
||||
assert_array_equal(b, [[190, 72, 113, 214, 0],
|
||||
[186, 216, 120, 210, 128],
|
||||
[178, 248, 248, 210, 128],
|
||||
[178, 241, 216, 64, 192],
|
||||
[150, 227, 152, 68, 80],
|
||||
[215, 195, 24, 5, 112],
|
||||
[ 87, 202, 60, 5, 48],
|
||||
[ 83, 90, 52, 1, 160],
|
||||
[ 83, 90, 182, 72, 160],
|
||||
[195, 83, 150, 88, 224],
|
||||
[199, 83, 150, 92, 240],
|
||||
[206, 119, 150, 92, 208],
|
||||
[204, 127, 146, 78, 144],
|
||||
[204, 109, 210, 110, 128],
|
||||
[140, 73, 210, 39, 160],
|
||||
[140, 64, 246, 181, 224],
|
||||
[136, 208, 255, 149, 240],
|
||||
[136, 244, 255, 220, 208],
|
||||
[ 8, 189, 223, 222, 144],
|
||||
[ 40, 45, 151, 218, 144],
|
||||
[105, 41, 21, 218, 176],
|
||||
[107, 104, 17, 202, 240],
|
||||
[ 75, 122, 17, 234, 224],
|
||||
[ 74, 90, 131, 170, 192],
|
||||
[ 88, 18, 163, 168, 128]])
|
||||
|
||||
|
||||
# result is the same if input is multiplied with a nonzero value
|
||||
for dtype in 'bBhHiIlLqQ':
|
||||
arr = np.array(a, dtype=dtype)
|
||||
rnd = np.random.randint(low=np.iinfo(dtype).min,
|
||||
high=np.iinfo(dtype).max, size=arr.size,
|
||||
dtype=dtype)
|
||||
rnd[rnd == 0] = 1
|
||||
arr *= rnd.astype(dtype)
|
||||
b = np.packbits(arr, axis=-1)
|
||||
assert_array_equal(np.unpackbits(b)[:-4], a)
|
||||
|
||||
assert_raises(TypeError, np.packbits, np.array(a, dtype=float))
|
||||
|
||||
|
||||
def test_packbits_very_large():
|
||||
# test some with a larger arrays gh-8637
|
||||
# code is covered earlier but larger array makes crash on bug more likely
|
||||
for s in range(950, 1050):
|
||||
for dt in '?bBhHiIlLqQ':
|
||||
x = np.ones((200, s), dtype=bool)
|
||||
np.packbits(x, axis=1)
|
||||
|
||||
|
||||
def test_unpackbits():
|
||||
# Copied from the docstring.
|
||||
a = np.array([[2], [7], [23]], dtype=np.uint8)
|
||||
b = np.unpackbits(a, axis=1)
|
||||
assert_equal(b.dtype, np.uint8)
|
||||
assert_array_equal(b, np.array([[0, 0, 0, 0, 0, 0, 1, 0],
|
||||
[0, 0, 0, 0, 0, 1, 1, 1],
|
||||
[0, 0, 0, 1, 0, 1, 1, 1]]))
|
||||
|
||||
def test_pack_unpack_order():
|
||||
a = np.array([[2], [7], [23]], dtype=np.uint8)
|
||||
b = np.unpackbits(a, axis=1)
|
||||
assert_equal(b.dtype, np.uint8)
|
||||
b_little = np.unpackbits(a, axis=1, bitorder='little')
|
||||
b_big = np.unpackbits(a, axis=1, bitorder='big')
|
||||
assert_array_equal(b, b_big)
|
||||
assert_array_equal(a, np.packbits(b_little, axis=1, bitorder='little'))
|
||||
assert_array_equal(b[:,::-1], b_little)
|
||||
assert_array_equal(a, np.packbits(b_big, axis=1, bitorder='big'))
|
||||
assert_raises(ValueError, np.unpackbits, a, bitorder='r')
|
||||
assert_raises(TypeError, np.unpackbits, a, bitorder=10)
|
||||
|
||||
|
||||
|
||||
def test_unpackbits_empty():
|
||||
a = np.empty((0,), dtype=np.uint8)
|
||||
b = np.unpackbits(a)
|
||||
assert_equal(b.dtype, np.uint8)
|
||||
assert_array_equal(b, np.empty((0,)))
|
||||
|
||||
|
||||
def test_unpackbits_empty_with_axis():
|
||||
# Lists of packed shapes for different axes and unpacked shapes.
|
||||
shapes = [
|
||||
([(0,)], (0,)),
|
||||
([(2, 24, 0), (16, 3, 0), (16, 24, 0)], (16, 24, 0)),
|
||||
([(2, 0, 24), (16, 0, 24), (16, 0, 3)], (16, 0, 24)),
|
||||
([(0, 16, 24), (0, 2, 24), (0, 16, 3)], (0, 16, 24)),
|
||||
([(3, 0, 0), (24, 0, 0), (24, 0, 0)], (24, 0, 0)),
|
||||
([(0, 24, 0), (0, 3, 0), (0, 24, 0)], (0, 24, 0)),
|
||||
([(0, 0, 24), (0, 0, 24), (0, 0, 3)], (0, 0, 24)),
|
||||
([(0, 0, 0), (0, 0, 0), (0, 0, 0)], (0, 0, 0)),
|
||||
]
|
||||
for in_shapes, out_shape in shapes:
|
||||
for ax, in_shape in enumerate(in_shapes):
|
||||
a = np.empty(in_shape, dtype=np.uint8)
|
||||
b = np.unpackbits(a, axis=ax)
|
||||
assert_equal(b.dtype, np.uint8)
|
||||
assert_equal(b.shape, out_shape)
|
||||
|
||||
|
||||
def test_unpackbits_large():
|
||||
# test all possible numbers via comparison to already tested packbits
|
||||
d = np.arange(277, dtype=np.uint8)
|
||||
assert_array_equal(np.packbits(np.unpackbits(d)), d)
|
||||
assert_array_equal(np.packbits(np.unpackbits(d[::2])), d[::2])
|
||||
d = np.tile(d, (3, 1))
|
||||
assert_array_equal(np.packbits(np.unpackbits(d, axis=1), axis=1), d)
|
||||
d = d.T.copy()
|
||||
assert_array_equal(np.packbits(np.unpackbits(d, axis=0), axis=0), d)
|
||||
|
||||
|
||||
class TestCount():
|
||||
x = np.array([
|
||||
[1, 0, 1, 0, 0, 1, 0],
|
||||
[0, 1, 1, 1, 0, 0, 0],
|
||||
[0, 0, 1, 0, 0, 1, 1],
|
||||
[1, 1, 0, 0, 0, 1, 1],
|
||||
[1, 0, 1, 0, 1, 0, 1],
|
||||
[0, 0, 1, 1, 1, 0, 0],
|
||||
[0, 1, 0, 1, 0, 1, 0],
|
||||
], dtype=np.uint8)
|
||||
padded1 = np.zeros(57, dtype=np.uint8)
|
||||
padded1[:49] = x.ravel()
|
||||
padded1b = np.zeros(57, dtype=np.uint8)
|
||||
padded1b[:49] = x[::-1].copy().ravel()
|
||||
padded2 = np.zeros((9, 9), dtype=np.uint8)
|
||||
padded2[:7, :7] = x
|
||||
|
||||
@pytest.mark.parametrize('bitorder', ('little', 'big'))
|
||||
@pytest.mark.parametrize('count', chain(range(58), range(-1, -57, -1)))
|
||||
def test_roundtrip(self, bitorder, count):
|
||||
if count < 0:
|
||||
# one extra zero of padding
|
||||
cutoff = count - 1
|
||||
else:
|
||||
cutoff = count
|
||||
# test complete invertibility of packbits and unpackbits with count
|
||||
packed = np.packbits(self.x, bitorder=bitorder)
|
||||
unpacked = np.unpackbits(packed, count=count, bitorder=bitorder)
|
||||
assert_equal(unpacked.dtype, np.uint8)
|
||||
assert_array_equal(unpacked, self.padded1[:cutoff])
|
||||
|
||||
@pytest.mark.parametrize('kwargs', [
|
||||
{}, {'count': None},
|
||||
])
|
||||
def test_count(self, kwargs):
|
||||
packed = np.packbits(self.x)
|
||||
unpacked = np.unpackbits(packed, **kwargs)
|
||||
assert_equal(unpacked.dtype, np.uint8)
|
||||
assert_array_equal(unpacked, self.padded1[:-1])
|
||||
|
||||
@pytest.mark.parametrize('bitorder', ('little', 'big'))
|
||||
# delta==-1 when count<0 because one extra zero of padding
|
||||
@pytest.mark.parametrize('count', chain(range(8), range(-1, -9, -1)))
|
||||
def test_roundtrip_axis(self, bitorder, count):
|
||||
if count < 0:
|
||||
# one extra zero of padding
|
||||
cutoff = count - 1
|
||||
else:
|
||||
cutoff = count
|
||||
packed0 = np.packbits(self.x, axis=0, bitorder=bitorder)
|
||||
unpacked0 = np.unpackbits(packed0, axis=0, count=count,
|
||||
bitorder=bitorder)
|
||||
assert_equal(unpacked0.dtype, np.uint8)
|
||||
assert_array_equal(unpacked0, self.padded2[:cutoff, :self.x.shape[1]])
|
||||
|
||||
packed1 = np.packbits(self.x, axis=1, bitorder=bitorder)
|
||||
unpacked1 = np.unpackbits(packed1, axis=1, count=count,
|
||||
bitorder=bitorder)
|
||||
assert_equal(unpacked1.dtype, np.uint8)
|
||||
assert_array_equal(unpacked1, self.padded2[:self.x.shape[0], :cutoff])
|
||||
|
||||
@pytest.mark.parametrize('kwargs', [
|
||||
{}, {'count': None},
|
||||
{'bitorder' : 'little'},
|
||||
{'bitorder': 'little', 'count': None},
|
||||
{'bitorder' : 'big'},
|
||||
{'bitorder': 'big', 'count': None},
|
||||
])
|
||||
def test_axis_count(self, kwargs):
|
||||
packed0 = np.packbits(self.x, axis=0)
|
||||
unpacked0 = np.unpackbits(packed0, axis=0, **kwargs)
|
||||
assert_equal(unpacked0.dtype, np.uint8)
|
||||
if kwargs.get('bitorder', 'big') == 'big':
|
||||
assert_array_equal(unpacked0, self.padded2[:-1, :self.x.shape[1]])
|
||||
else:
|
||||
assert_array_equal(unpacked0[::-1, :], self.padded2[:-1, :self.x.shape[1]])
|
||||
|
||||
packed1 = np.packbits(self.x, axis=1)
|
||||
unpacked1 = np.unpackbits(packed1, axis=1, **kwargs)
|
||||
assert_equal(unpacked1.dtype, np.uint8)
|
||||
if kwargs.get('bitorder', 'big') == 'big':
|
||||
assert_array_equal(unpacked1, self.padded2[:self.x.shape[0], :-1])
|
||||
else:
|
||||
assert_array_equal(unpacked1[:, ::-1], self.padded2[:self.x.shape[0], :-1])
|
||||
|
||||
def test_bad_count(self):
|
||||
packed0 = np.packbits(self.x, axis=0)
|
||||
assert_raises(ValueError, np.unpackbits, packed0, axis=0, count=-9)
|
||||
packed1 = np.packbits(self.x, axis=1)
|
||||
assert_raises(ValueError, np.unpackbits, packed1, axis=1, count=-9)
|
||||
packed = np.packbits(self.x)
|
||||
assert_raises(ValueError, np.unpackbits, packed, count=-57)
|
||||
259
venv/Lib/site-packages/numpy/lib/tests/test_polynomial.py
Normal file
259
venv/Lib/site-packages/numpy/lib/tests/test_polynomial.py
Normal file
|
|
@ -0,0 +1,259 @@
|
|||
import numpy as np
|
||||
from numpy.testing import (
|
||||
assert_, assert_equal, assert_array_equal, assert_almost_equal,
|
||||
assert_array_almost_equal, assert_raises, assert_allclose
|
||||
)
|
||||
|
||||
|
||||
class TestPolynomial:
|
||||
def test_poly1d_str_and_repr(self):
|
||||
p = np.poly1d([1., 2, 3])
|
||||
assert_equal(repr(p), 'poly1d([1., 2., 3.])')
|
||||
assert_equal(str(p),
|
||||
' 2\n'
|
||||
'1 x + 2 x + 3')
|
||||
|
||||
q = np.poly1d([3., 2, 1])
|
||||
assert_equal(repr(q), 'poly1d([3., 2., 1.])')
|
||||
assert_equal(str(q),
|
||||
' 2\n'
|
||||
'3 x + 2 x + 1')
|
||||
|
||||
r = np.poly1d([1.89999 + 2j, -3j, -5.12345678, 2 + 1j])
|
||||
assert_equal(str(r),
|
||||
' 3 2\n'
|
||||
'(1.9 + 2j) x - 3j x - 5.123 x + (2 + 1j)')
|
||||
|
||||
assert_equal(str(np.poly1d([-3, -2, -1])),
|
||||
' 2\n'
|
||||
'-3 x - 2 x - 1')
|
||||
|
||||
def test_poly1d_resolution(self):
|
||||
p = np.poly1d([1., 2, 3])
|
||||
q = np.poly1d([3., 2, 1])
|
||||
assert_equal(p(0), 3.0)
|
||||
assert_equal(p(5), 38.0)
|
||||
assert_equal(q(0), 1.0)
|
||||
assert_equal(q(5), 86.0)
|
||||
|
||||
def test_poly1d_math(self):
|
||||
# here we use some simple coeffs to make calculations easier
|
||||
p = np.poly1d([1., 2, 4])
|
||||
q = np.poly1d([4., 2, 1])
|
||||
assert_equal(p/q, (np.poly1d([0.25]), np.poly1d([1.5, 3.75])))
|
||||
assert_equal(p.integ(), np.poly1d([1/3, 1., 4., 0.]))
|
||||
assert_equal(p.integ(1), np.poly1d([1/3, 1., 4., 0.]))
|
||||
|
||||
p = np.poly1d([1., 2, 3])
|
||||
q = np.poly1d([3., 2, 1])
|
||||
assert_equal(p * q, np.poly1d([3., 8., 14., 8., 3.]))
|
||||
assert_equal(p + q, np.poly1d([4., 4., 4.]))
|
||||
assert_equal(p - q, np.poly1d([-2., 0., 2.]))
|
||||
assert_equal(p ** 4, np.poly1d([1., 8., 36., 104., 214., 312., 324., 216., 81.]))
|
||||
assert_equal(p(q), np.poly1d([9., 12., 16., 8., 6.]))
|
||||
assert_equal(q(p), np.poly1d([3., 12., 32., 40., 34.]))
|
||||
assert_equal(p.deriv(), np.poly1d([2., 2.]))
|
||||
assert_equal(p.deriv(2), np.poly1d([2.]))
|
||||
assert_equal(np.polydiv(np.poly1d([1, 0, -1]), np.poly1d([1, 1])),
|
||||
(np.poly1d([1., -1.]), np.poly1d([0.])))
|
||||
|
||||
def test_poly1d_misc(self):
|
||||
p = np.poly1d([1., 2, 3])
|
||||
assert_equal(np.asarray(p), np.array([1., 2., 3.]))
|
||||
assert_equal(len(p), 2)
|
||||
assert_equal((p[0], p[1], p[2], p[3]), (3.0, 2.0, 1.0, 0))
|
||||
|
||||
def test_poly1d_variable_arg(self):
|
||||
q = np.poly1d([1., 2, 3], variable='y')
|
||||
assert_equal(str(q),
|
||||
' 2\n'
|
||||
'1 y + 2 y + 3')
|
||||
q = np.poly1d([1., 2, 3], variable='lambda')
|
||||
assert_equal(str(q),
|
||||
' 2\n'
|
||||
'1 lambda + 2 lambda + 3')
|
||||
|
||||
def test_poly(self):
|
||||
assert_array_almost_equal(np.poly([3, -np.sqrt(2), np.sqrt(2)]),
|
||||
[1, -3, -2, 6])
|
||||
|
||||
# From matlab docs
|
||||
A = [[1, 2, 3], [4, 5, 6], [7, 8, 0]]
|
||||
assert_array_almost_equal(np.poly(A), [1, -6, -72, -27])
|
||||
|
||||
# Should produce real output for perfect conjugates
|
||||
assert_(np.isrealobj(np.poly([+1.082j, +2.613j, -2.613j, -1.082j])))
|
||||
assert_(np.isrealobj(np.poly([0+1j, -0+-1j, 1+2j,
|
||||
1-2j, 1.+3.5j, 1-3.5j])))
|
||||
assert_(np.isrealobj(np.poly([1j, -1j, 1+2j, 1-2j, 1+3j, 1-3.j])))
|
||||
assert_(np.isrealobj(np.poly([1j, -1j, 1+2j, 1-2j])))
|
||||
assert_(np.isrealobj(np.poly([1j, -1j, 2j, -2j])))
|
||||
assert_(np.isrealobj(np.poly([1j, -1j])))
|
||||
assert_(np.isrealobj(np.poly([1, -1])))
|
||||
|
||||
assert_(np.iscomplexobj(np.poly([1j, -1.0000001j])))
|
||||
|
||||
np.random.seed(42)
|
||||
a = np.random.randn(100) + 1j*np.random.randn(100)
|
||||
assert_(np.isrealobj(np.poly(np.concatenate((a, np.conjugate(a))))))
|
||||
|
||||
def test_roots(self):
|
||||
assert_array_equal(np.roots([1, 0, 0]), [0, 0])
|
||||
|
||||
def test_str_leading_zeros(self):
|
||||
p = np.poly1d([4, 3, 2, 1])
|
||||
p[3] = 0
|
||||
assert_equal(str(p),
|
||||
" 2\n"
|
||||
"3 x + 2 x + 1")
|
||||
|
||||
p = np.poly1d([1, 2])
|
||||
p[0] = 0
|
||||
p[1] = 0
|
||||
assert_equal(str(p), " \n0")
|
||||
|
||||
def test_polyfit(self):
|
||||
c = np.array([3., 2., 1.])
|
||||
x = np.linspace(0, 2, 7)
|
||||
y = np.polyval(c, x)
|
||||
err = [1, -1, 1, -1, 1, -1, 1]
|
||||
weights = np.arange(8, 1, -1)**2/7.0
|
||||
|
||||
# Check exception when too few points for variance estimate. Note that
|
||||
# the estimate requires the number of data points to exceed
|
||||
# degree + 1
|
||||
assert_raises(ValueError, np.polyfit,
|
||||
[1], [1], deg=0, cov=True)
|
||||
|
||||
# check 1D case
|
||||
m, cov = np.polyfit(x, y+err, 2, cov=True)
|
||||
est = [3.8571, 0.2857, 1.619]
|
||||
assert_almost_equal(est, m, decimal=4)
|
||||
val0 = [[ 1.4694, -2.9388, 0.8163],
|
||||
[-2.9388, 6.3673, -2.1224],
|
||||
[ 0.8163, -2.1224, 1.161 ]]
|
||||
assert_almost_equal(val0, cov, decimal=4)
|
||||
|
||||
m2, cov2 = np.polyfit(x, y+err, 2, w=weights, cov=True)
|
||||
assert_almost_equal([4.8927, -1.0177, 1.7768], m2, decimal=4)
|
||||
val = [[ 4.3964, -5.0052, 0.4878],
|
||||
[-5.0052, 6.8067, -0.9089],
|
||||
[ 0.4878, -0.9089, 0.3337]]
|
||||
assert_almost_equal(val, cov2, decimal=4)
|
||||
|
||||
m3, cov3 = np.polyfit(x, y+err, 2, w=weights, cov="unscaled")
|
||||
assert_almost_equal([4.8927, -1.0177, 1.7768], m3, decimal=4)
|
||||
val = [[ 0.1473, -0.1677, 0.0163],
|
||||
[-0.1677, 0.228 , -0.0304],
|
||||
[ 0.0163, -0.0304, 0.0112]]
|
||||
assert_almost_equal(val, cov3, decimal=4)
|
||||
|
||||
# check 2D (n,1) case
|
||||
y = y[:, np.newaxis]
|
||||
c = c[:, np.newaxis]
|
||||
assert_almost_equal(c, np.polyfit(x, y, 2))
|
||||
# check 2D (n,2) case
|
||||
yy = np.concatenate((y, y), axis=1)
|
||||
cc = np.concatenate((c, c), axis=1)
|
||||
assert_almost_equal(cc, np.polyfit(x, yy, 2))
|
||||
|
||||
m, cov = np.polyfit(x, yy + np.array(err)[:, np.newaxis], 2, cov=True)
|
||||
assert_almost_equal(est, m[:, 0], decimal=4)
|
||||
assert_almost_equal(est, m[:, 1], decimal=4)
|
||||
assert_almost_equal(val0, cov[:, :, 0], decimal=4)
|
||||
assert_almost_equal(val0, cov[:, :, 1], decimal=4)
|
||||
|
||||
# check order 1 (deg=0) case, were the analytic results are simple
|
||||
np.random.seed(123)
|
||||
y = np.random.normal(size=(4, 10000))
|
||||
mean, cov = np.polyfit(np.zeros(y.shape[0]), y, deg=0, cov=True)
|
||||
# Should get sigma_mean = sigma/sqrt(N) = 1./sqrt(4) = 0.5.
|
||||
assert_allclose(mean.std(), 0.5, atol=0.01)
|
||||
assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01)
|
||||
# Without scaling, since reduced chi2 is 1, the result should be the same.
|
||||
mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=np.ones(y.shape[0]),
|
||||
deg=0, cov="unscaled")
|
||||
assert_allclose(mean.std(), 0.5, atol=0.01)
|
||||
assert_almost_equal(np.sqrt(cov.mean()), 0.5)
|
||||
# If we estimate our errors wrong, no change with scaling:
|
||||
w = np.full(y.shape[0], 1./0.5)
|
||||
mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov=True)
|
||||
assert_allclose(mean.std(), 0.5, atol=0.01)
|
||||
assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01)
|
||||
# But if we do not scale, our estimate for the error in the mean will
|
||||
# differ.
|
||||
mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov="unscaled")
|
||||
assert_allclose(mean.std(), 0.5, atol=0.01)
|
||||
assert_almost_equal(np.sqrt(cov.mean()), 0.25)
|
||||
|
||||
def test_objects(self):
|
||||
from decimal import Decimal
|
||||
p = np.poly1d([Decimal('4.0'), Decimal('3.0'), Decimal('2.0')])
|
||||
p2 = p * Decimal('1.333333333333333')
|
||||
assert_(p2[1] == Decimal("3.9999999999999990"))
|
||||
p2 = p.deriv()
|
||||
assert_(p2[1] == Decimal('8.0'))
|
||||
p2 = p.integ()
|
||||
assert_(p2[3] == Decimal("1.333333333333333333333333333"))
|
||||
assert_(p2[2] == Decimal('1.5'))
|
||||
assert_(np.issubdtype(p2.coeffs.dtype, np.object_))
|
||||
p = np.poly([Decimal(1), Decimal(2)])
|
||||
assert_equal(np.poly([Decimal(1), Decimal(2)]),
|
||||
[1, Decimal(-3), Decimal(2)])
|
||||
|
||||
def test_complex(self):
|
||||
p = np.poly1d([3j, 2j, 1j])
|
||||
p2 = p.integ()
|
||||
assert_((p2.coeffs == [1j, 1j, 1j, 0]).all())
|
||||
p2 = p.deriv()
|
||||
assert_((p2.coeffs == [6j, 2j]).all())
|
||||
|
||||
def test_integ_coeffs(self):
|
||||
p = np.poly1d([3, 2, 1])
|
||||
p2 = p.integ(3, k=[9, 7, 6])
|
||||
assert_(
|
||||
(p2.coeffs == [1/4./5., 1/3./4., 1/2./3., 9/1./2., 7, 6]).all())
|
||||
|
||||
def test_zero_dims(self):
|
||||
try:
|
||||
np.poly(np.zeros((0, 0)))
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def test_poly_int_overflow(self):
|
||||
"""
|
||||
Regression test for gh-5096.
|
||||
"""
|
||||
v = np.arange(1, 21)
|
||||
assert_almost_equal(np.poly(v), np.poly(np.diag(v)))
|
||||
|
||||
def test_poly_eq(self):
|
||||
p = np.poly1d([1, 2, 3])
|
||||
p2 = np.poly1d([1, 2, 4])
|
||||
assert_equal(p == None, False)
|
||||
assert_equal(p != None, True)
|
||||
assert_equal(p == p, True)
|
||||
assert_equal(p == p2, False)
|
||||
assert_equal(p != p2, True)
|
||||
|
||||
def test_polydiv(self):
|
||||
b = np.poly1d([2, 6, 6, 1])
|
||||
a = np.poly1d([-1j, (1+2j), -(2+1j), 1])
|
||||
q, r = np.polydiv(b, a)
|
||||
assert_equal(q.coeffs.dtype, np.complex128)
|
||||
assert_equal(r.coeffs.dtype, np.complex128)
|
||||
assert_equal(q*a + r, b)
|
||||
|
||||
def test_poly_coeffs_mutable(self):
|
||||
""" Coefficients should be modifiable """
|
||||
p = np.poly1d([1, 2, 3])
|
||||
|
||||
p.coeffs += 1
|
||||
assert_equal(p.coeffs, [2, 3, 4])
|
||||
|
||||
p.coeffs[2] += 10
|
||||
assert_equal(p.coeffs, [2, 3, 14])
|
||||
|
||||
# this never used to be allowed - let's not add features to deprecated
|
||||
# APIs
|
||||
assert_raises(AttributeError, setattr, p, 'coeffs', np.array(1))
|
||||
979
venv/Lib/site-packages/numpy/lib/tests/test_recfunctions.py
Normal file
979
venv/Lib/site-packages/numpy/lib/tests/test_recfunctions.py
Normal file
|
|
@ -0,0 +1,979 @@
|
|||
import pytest
|
||||
|
||||
import numpy as np
|
||||
import numpy.ma as ma
|
||||
from numpy.ma.mrecords import MaskedRecords
|
||||
from numpy.ma.testutils import assert_equal
|
||||
from numpy.testing import assert_, assert_raises
|
||||
from numpy.lib.recfunctions import (
|
||||
drop_fields, rename_fields, get_fieldstructure, recursive_fill_fields,
|
||||
find_duplicates, merge_arrays, append_fields, stack_arrays, join_by,
|
||||
repack_fields, unstructured_to_structured, structured_to_unstructured,
|
||||
apply_along_fields, require_fields, assign_fields_by_name)
|
||||
get_fieldspec = np.lib.recfunctions._get_fieldspec
|
||||
get_names = np.lib.recfunctions.get_names
|
||||
get_names_flat = np.lib.recfunctions.get_names_flat
|
||||
zip_descr = np.lib.recfunctions._zip_descr
|
||||
zip_dtype = np.lib.recfunctions._zip_dtype
|
||||
|
||||
|
||||
class TestRecFunctions:
|
||||
# Misc tests
|
||||
|
||||
def setup(self):
|
||||
x = np.array([1, 2, ])
|
||||
y = np.array([10, 20, 30])
|
||||
z = np.array([('A', 1.), ('B', 2.)],
|
||||
dtype=[('A', '|S3'), ('B', float)])
|
||||
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
|
||||
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
|
||||
self.data = (w, x, y, z)
|
||||
|
||||
def test_zip_descr(self):
|
||||
# Test zip_descr
|
||||
(w, x, y, z) = self.data
|
||||
|
||||
# Std array
|
||||
test = zip_descr((x, x), flatten=True)
|
||||
assert_equal(test,
|
||||
np.dtype([('', int), ('', int)]))
|
||||
test = zip_descr((x, x), flatten=False)
|
||||
assert_equal(test,
|
||||
np.dtype([('', int), ('', int)]))
|
||||
|
||||
# Std & flexible-dtype
|
||||
test = zip_descr((x, z), flatten=True)
|
||||
assert_equal(test,
|
||||
np.dtype([('', int), ('A', '|S3'), ('B', float)]))
|
||||
test = zip_descr((x, z), flatten=False)
|
||||
assert_equal(test,
|
||||
np.dtype([('', int),
|
||||
('', [('A', '|S3'), ('B', float)])]))
|
||||
|
||||
# Standard & nested dtype
|
||||
test = zip_descr((x, w), flatten=True)
|
||||
assert_equal(test,
|
||||
np.dtype([('', int),
|
||||
('a', int),
|
||||
('ba', float), ('bb', int)]))
|
||||
test = zip_descr((x, w), flatten=False)
|
||||
assert_equal(test,
|
||||
np.dtype([('', int),
|
||||
('', [('a', int),
|
||||
('b', [('ba', float), ('bb', int)])])]))
|
||||
|
||||
def test_drop_fields(self):
|
||||
# Test drop_fields
|
||||
a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
|
||||
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
|
||||
|
||||
# A basic field
|
||||
test = drop_fields(a, 'a')
|
||||
control = np.array([((2, 3.0),), ((5, 6.0),)],
|
||||
dtype=[('b', [('ba', float), ('bb', int)])])
|
||||
assert_equal(test, control)
|
||||
|
||||
# Another basic field (but nesting two fields)
|
||||
test = drop_fields(a, 'b')
|
||||
control = np.array([(1,), (4,)], dtype=[('a', int)])
|
||||
assert_equal(test, control)
|
||||
|
||||
# A nested sub-field
|
||||
test = drop_fields(a, ['ba', ])
|
||||
control = np.array([(1, (3.0,)), (4, (6.0,))],
|
||||
dtype=[('a', int), ('b', [('bb', int)])])
|
||||
assert_equal(test, control)
|
||||
|
||||
# All the nested sub-field from a field: zap that field
|
||||
test = drop_fields(a, ['ba', 'bb'])
|
||||
control = np.array([(1,), (4,)], dtype=[('a', int)])
|
||||
assert_equal(test, control)
|
||||
|
||||
# dropping all fields results in an array with no fields
|
||||
test = drop_fields(a, ['a', 'b'])
|
||||
control = np.array([(), ()], dtype=[])
|
||||
assert_equal(test, control)
|
||||
|
||||
def test_rename_fields(self):
|
||||
# Test rename fields
|
||||
a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
|
||||
dtype=[('a', int),
|
||||
('b', [('ba', float), ('bb', (float, 2))])])
|
||||
test = rename_fields(a, {'a': 'A', 'bb': 'BB'})
|
||||
newdtype = [('A', int), ('b', [('ba', float), ('BB', (float, 2))])]
|
||||
control = a.view(newdtype)
|
||||
assert_equal(test.dtype, newdtype)
|
||||
assert_equal(test, control)
|
||||
|
||||
def test_get_names(self):
|
||||
# Test get_names
|
||||
ndtype = np.dtype([('A', '|S3'), ('B', float)])
|
||||
test = get_names(ndtype)
|
||||
assert_equal(test, ('A', 'B'))
|
||||
|
||||
ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])])
|
||||
test = get_names(ndtype)
|
||||
assert_equal(test, ('a', ('b', ('ba', 'bb'))))
|
||||
|
||||
ndtype = np.dtype([('a', int), ('b', [])])
|
||||
test = get_names(ndtype)
|
||||
assert_equal(test, ('a', ('b', ())))
|
||||
|
||||
ndtype = np.dtype([])
|
||||
test = get_names(ndtype)
|
||||
assert_equal(test, ())
|
||||
|
||||
def test_get_names_flat(self):
|
||||
# Test get_names_flat
|
||||
ndtype = np.dtype([('A', '|S3'), ('B', float)])
|
||||
test = get_names_flat(ndtype)
|
||||
assert_equal(test, ('A', 'B'))
|
||||
|
||||
ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])])
|
||||
test = get_names_flat(ndtype)
|
||||
assert_equal(test, ('a', 'b', 'ba', 'bb'))
|
||||
|
||||
ndtype = np.dtype([('a', int), ('b', [])])
|
||||
test = get_names_flat(ndtype)
|
||||
assert_equal(test, ('a', 'b'))
|
||||
|
||||
ndtype = np.dtype([])
|
||||
test = get_names_flat(ndtype)
|
||||
assert_equal(test, ())
|
||||
|
||||
def test_get_fieldstructure(self):
|
||||
# Test get_fieldstructure
|
||||
|
||||
# No nested fields
|
||||
ndtype = np.dtype([('A', '|S3'), ('B', float)])
|
||||
test = get_fieldstructure(ndtype)
|
||||
assert_equal(test, {'A': [], 'B': []})
|
||||
|
||||
# One 1-nested field
|
||||
ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])])
|
||||
test = get_fieldstructure(ndtype)
|
||||
assert_equal(test, {'A': [], 'B': [], 'BA': ['B', ], 'BB': ['B']})
|
||||
|
||||
# One 2-nested fields
|
||||
ndtype = np.dtype([('A', int),
|
||||
('B', [('BA', int),
|
||||
('BB', [('BBA', int), ('BBB', int)])])])
|
||||
test = get_fieldstructure(ndtype)
|
||||
control = {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'],
|
||||
'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
|
||||
assert_equal(test, control)
|
||||
|
||||
# 0 fields
|
||||
ndtype = np.dtype([])
|
||||
test = get_fieldstructure(ndtype)
|
||||
assert_equal(test, {})
|
||||
|
||||
def test_find_duplicates(self):
|
||||
# Test find_duplicates
|
||||
a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')),
|
||||
(1, (1., 'B')), (2, (2., 'B')), (2, (2., 'C'))],
|
||||
mask=[(0, (0, 0)), (0, (0, 0)), (0, (0, 0)),
|
||||
(0, (0, 0)), (1, (0, 0)), (0, (1, 0))],
|
||||
dtype=[('A', int), ('B', [('BA', float), ('BB', '|S1')])])
|
||||
|
||||
test = find_duplicates(a, ignoremask=False, return_index=True)
|
||||
control = [0, 2]
|
||||
assert_equal(sorted(test[-1]), control)
|
||||
assert_equal(test[0], a[test[-1]])
|
||||
|
||||
test = find_duplicates(a, key='A', return_index=True)
|
||||
control = [0, 1, 2, 3, 5]
|
||||
assert_equal(sorted(test[-1]), control)
|
||||
assert_equal(test[0], a[test[-1]])
|
||||
|
||||
test = find_duplicates(a, key='B', return_index=True)
|
||||
control = [0, 1, 2, 4]
|
||||
assert_equal(sorted(test[-1]), control)
|
||||
assert_equal(test[0], a[test[-1]])
|
||||
|
||||
test = find_duplicates(a, key='BA', return_index=True)
|
||||
control = [0, 1, 2, 4]
|
||||
assert_equal(sorted(test[-1]), control)
|
||||
assert_equal(test[0], a[test[-1]])
|
||||
|
||||
test = find_duplicates(a, key='BB', return_index=True)
|
||||
control = [0, 1, 2, 3, 4]
|
||||
assert_equal(sorted(test[-1]), control)
|
||||
assert_equal(test[0], a[test[-1]])
|
||||
|
||||
def test_find_duplicates_ignoremask(self):
|
||||
# Test the ignoremask option of find_duplicates
|
||||
ndtype = [('a', int)]
|
||||
a = ma.array([1, 1, 1, 2, 2, 3, 3],
|
||||
mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
|
||||
test = find_duplicates(a, ignoremask=True, return_index=True)
|
||||
control = [0, 1, 3, 4]
|
||||
assert_equal(sorted(test[-1]), control)
|
||||
assert_equal(test[0], a[test[-1]])
|
||||
|
||||
test = find_duplicates(a, ignoremask=False, return_index=True)
|
||||
control = [0, 1, 2, 3, 4, 6]
|
||||
assert_equal(sorted(test[-1]), control)
|
||||
assert_equal(test[0], a[test[-1]])
|
||||
|
||||
def test_repack_fields(self):
|
||||
dt = np.dtype('u1,f4,i8', align=True)
|
||||
a = np.zeros(2, dtype=dt)
|
||||
|
||||
assert_equal(repack_fields(dt), np.dtype('u1,f4,i8'))
|
||||
assert_equal(repack_fields(a).itemsize, 13)
|
||||
assert_equal(repack_fields(repack_fields(dt), align=True), dt)
|
||||
|
||||
# make sure type is preserved
|
||||
dt = np.dtype((np.record, dt))
|
||||
assert_(repack_fields(dt).type is np.record)
|
||||
|
||||
def test_structured_to_unstructured(self):
|
||||
a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
|
||||
out = structured_to_unstructured(a)
|
||||
assert_equal(out, np.zeros((4,5), dtype='f8'))
|
||||
|
||||
b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
|
||||
dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
|
||||
out = np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1)
|
||||
assert_equal(out, np.array([ 3. , 5.5, 9. , 11. ]))
|
||||
out = np.mean(structured_to_unstructured(b[['x']]), axis=-1)
|
||||
assert_equal(out, np.array([ 1. , 4. , 7. , 10. ]))
|
||||
|
||||
c = np.arange(20).reshape((4,5))
|
||||
out = unstructured_to_structured(c, a.dtype)
|
||||
want = np.array([( 0, ( 1., 2), [ 3., 4.]),
|
||||
( 5, ( 6., 7), [ 8., 9.]),
|
||||
(10, (11., 12), [13., 14.]),
|
||||
(15, (16., 17), [18., 19.])],
|
||||
dtype=[('a', 'i4'),
|
||||
('b', [('f0', 'f4'), ('f1', 'u2')]),
|
||||
('c', 'f4', (2,))])
|
||||
assert_equal(out, want)
|
||||
|
||||
d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
|
||||
dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
|
||||
assert_equal(apply_along_fields(np.mean, d),
|
||||
np.array([ 8.0/3, 16.0/3, 26.0/3, 11. ]))
|
||||
assert_equal(apply_along_fields(np.mean, d[['x', 'z']]),
|
||||
np.array([ 3. , 5.5, 9. , 11. ]))
|
||||
|
||||
# check that for uniform field dtypes we get a view, not a copy:
|
||||
d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
|
||||
dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'i4')])
|
||||
dd = structured_to_unstructured(d)
|
||||
ddd = unstructured_to_structured(dd, d.dtype)
|
||||
assert_(dd.base is d)
|
||||
assert_(ddd.base is d)
|
||||
|
||||
# including uniform fields with subarrays unpacked
|
||||
d = np.array([(1, [2, 3], [[ 4, 5], [ 6, 7]]),
|
||||
(8, [9, 10], [[11, 12], [13, 14]])],
|
||||
dtype=[('x0', 'i4'), ('x1', ('i4', 2)),
|
||||
('x2', ('i4', (2, 2)))])
|
||||
dd = structured_to_unstructured(d)
|
||||
ddd = unstructured_to_structured(dd, d.dtype)
|
||||
assert_(dd.base is d)
|
||||
assert_(ddd.base is d)
|
||||
|
||||
# test that nested fields with identical names don't break anything
|
||||
point = np.dtype([('x', int), ('y', int)])
|
||||
triangle = np.dtype([('a', point), ('b', point), ('c', point)])
|
||||
arr = np.zeros(10, triangle)
|
||||
res = structured_to_unstructured(arr, dtype=int)
|
||||
assert_equal(res, np.zeros((10, 6), dtype=int))
|
||||
|
||||
|
||||
# test nested combinations of subarrays and structured arrays, gh-13333
|
||||
def subarray(dt, shape):
|
||||
return np.dtype((dt, shape))
|
||||
|
||||
def structured(*dts):
|
||||
return np.dtype([('x{}'.format(i), dt) for i, dt in enumerate(dts)])
|
||||
|
||||
def inspect(dt, dtype=None):
|
||||
arr = np.zeros((), dt)
|
||||
ret = structured_to_unstructured(arr, dtype=dtype)
|
||||
backarr = unstructured_to_structured(ret, dt)
|
||||
return ret.shape, ret.dtype, backarr.dtype
|
||||
|
||||
dt = structured(subarray(structured(np.int32, np.int32), 3))
|
||||
assert_equal(inspect(dt), ((6,), np.int32, dt))
|
||||
|
||||
dt = structured(subarray(subarray(np.int32, 2), 2))
|
||||
assert_equal(inspect(dt), ((4,), np.int32, dt))
|
||||
|
||||
dt = structured(np.int32)
|
||||
assert_equal(inspect(dt), ((1,), np.int32, dt))
|
||||
|
||||
dt = structured(np.int32, subarray(subarray(np.int32, 2), 2))
|
||||
assert_equal(inspect(dt), ((5,), np.int32, dt))
|
||||
|
||||
dt = structured()
|
||||
assert_raises(ValueError, structured_to_unstructured, np.zeros(3, dt))
|
||||
|
||||
# these currently don't work, but we may make it work in the future
|
||||
assert_raises(NotImplementedError, structured_to_unstructured,
|
||||
np.zeros(3, dt), dtype=np.int32)
|
||||
assert_raises(NotImplementedError, unstructured_to_structured,
|
||||
np.zeros((3,0), dtype=np.int32))
|
||||
|
||||
def test_field_assignment_by_name(self):
|
||||
a = np.ones(2, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')])
|
||||
newdt = [('b', 'f4'), ('c', 'u1')]
|
||||
|
||||
assert_equal(require_fields(a, newdt), np.ones(2, newdt))
|
||||
|
||||
b = np.array([(1,2), (3,4)], dtype=newdt)
|
||||
assign_fields_by_name(a, b, zero_unassigned=False)
|
||||
assert_equal(a, np.array([(1,1,2),(1,3,4)], dtype=a.dtype))
|
||||
assign_fields_by_name(a, b)
|
||||
assert_equal(a, np.array([(0,1,2),(0,3,4)], dtype=a.dtype))
|
||||
|
||||
# test nested fields
|
||||
a = np.ones(2, dtype=[('a', [('b', 'f8'), ('c', 'u1')])])
|
||||
newdt = [('a', [('c', 'u1')])]
|
||||
assert_equal(require_fields(a, newdt), np.ones(2, newdt))
|
||||
b = np.array([((2,),), ((3,),)], dtype=newdt)
|
||||
assign_fields_by_name(a, b, zero_unassigned=False)
|
||||
assert_equal(a, np.array([((1,2),), ((1,3),)], dtype=a.dtype))
|
||||
assign_fields_by_name(a, b)
|
||||
assert_equal(a, np.array([((0,2),), ((0,3),)], dtype=a.dtype))
|
||||
|
||||
# test unstructured code path for 0d arrays
|
||||
a, b = np.array(3), np.array(0)
|
||||
assign_fields_by_name(b, a)
|
||||
assert_equal(b[()], 3)
|
||||
|
||||
|
||||
class TestRecursiveFillFields:
|
||||
# Test recursive_fill_fields.
|
||||
def test_simple_flexible(self):
|
||||
# Test recursive_fill_fields on flexible-array
|
||||
a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
|
||||
b = np.zeros((3,), dtype=a.dtype)
|
||||
test = recursive_fill_fields(a, b)
|
||||
control = np.array([(1, 10.), (2, 20.), (0, 0.)],
|
||||
dtype=[('A', int), ('B', float)])
|
||||
assert_equal(test, control)
|
||||
|
||||
def test_masked_flexible(self):
|
||||
# Test recursive_fill_fields on masked flexible-array
|
||||
a = ma.array([(1, 10.), (2, 20.)], mask=[(0, 1), (1, 0)],
|
||||
dtype=[('A', int), ('B', float)])
|
||||
b = ma.zeros((3,), dtype=a.dtype)
|
||||
test = recursive_fill_fields(a, b)
|
||||
control = ma.array([(1, 10.), (2, 20.), (0, 0.)],
|
||||
mask=[(0, 1), (1, 0), (0, 0)],
|
||||
dtype=[('A', int), ('B', float)])
|
||||
assert_equal(test, control)
|
||||
|
||||
|
||||
class TestMergeArrays:
|
||||
# Test merge_arrays
|
||||
|
||||
def setup(self):
|
||||
x = np.array([1, 2, ])
|
||||
y = np.array([10, 20, 30])
|
||||
z = np.array(
|
||||
[('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
|
||||
w = np.array(
|
||||
[(1, (2, 3.0, ())), (4, (5, 6.0, ()))],
|
||||
dtype=[('a', int), ('b', [('ba', float), ('bb', int), ('bc', [])])])
|
||||
self.data = (w, x, y, z)
|
||||
|
||||
def test_solo(self):
|
||||
# Test merge_arrays on a single array.
|
||||
(_, x, _, z) = self.data
|
||||
|
||||
test = merge_arrays(x)
|
||||
control = np.array([(1,), (2,)], dtype=[('f0', int)])
|
||||
assert_equal(test, control)
|
||||
test = merge_arrays((x,))
|
||||
assert_equal(test, control)
|
||||
|
||||
test = merge_arrays(z, flatten=False)
|
||||
assert_equal(test, z)
|
||||
test = merge_arrays(z, flatten=True)
|
||||
assert_equal(test, z)
|
||||
|
||||
def test_solo_w_flatten(self):
|
||||
# Test merge_arrays on a single array w & w/o flattening
|
||||
w = self.data[0]
|
||||
test = merge_arrays(w, flatten=False)
|
||||
assert_equal(test, w)
|
||||
|
||||
test = merge_arrays(w, flatten=True)
|
||||
control = np.array([(1, 2, 3.0), (4, 5, 6.0)],
|
||||
dtype=[('a', int), ('ba', float), ('bb', int)])
|
||||
assert_equal(test, control)
|
||||
|
||||
def test_standard(self):
|
||||
# Test standard & standard
|
||||
# Test merge arrays
|
||||
(_, x, y, _) = self.data
|
||||
test = merge_arrays((x, y), usemask=False)
|
||||
control = np.array([(1, 10), (2, 20), (-1, 30)],
|
||||
dtype=[('f0', int), ('f1', int)])
|
||||
assert_equal(test, control)
|
||||
|
||||
test = merge_arrays((x, y), usemask=True)
|
||||
control = ma.array([(1, 10), (2, 20), (-1, 30)],
|
||||
mask=[(0, 0), (0, 0), (1, 0)],
|
||||
dtype=[('f0', int), ('f1', int)])
|
||||
assert_equal(test, control)
|
||||
assert_equal(test.mask, control.mask)
|
||||
|
||||
def test_flatten(self):
|
||||
# Test standard & flexible
|
||||
(_, x, _, z) = self.data
|
||||
test = merge_arrays((x, z), flatten=True)
|
||||
control = np.array([(1, 'A', 1.), (2, 'B', 2.)],
|
||||
dtype=[('f0', int), ('A', '|S3'), ('B', float)])
|
||||
assert_equal(test, control)
|
||||
|
||||
test = merge_arrays((x, z), flatten=False)
|
||||
control = np.array([(1, ('A', 1.)), (2, ('B', 2.))],
|
||||
dtype=[('f0', int),
|
||||
('f1', [('A', '|S3'), ('B', float)])])
|
||||
assert_equal(test, control)
|
||||
|
||||
def test_flatten_wflexible(self):
|
||||
# Test flatten standard & nested
|
||||
(w, x, _, _) = self.data
|
||||
test = merge_arrays((x, w), flatten=True)
|
||||
control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)],
|
||||
dtype=[('f0', int),
|
||||
('a', int), ('ba', float), ('bb', int)])
|
||||
assert_equal(test, control)
|
||||
|
||||
test = merge_arrays((x, w), flatten=False)
|
||||
controldtype = [('f0', int),
|
||||
('f1', [('a', int),
|
||||
('b', [('ba', float), ('bb', int), ('bc', [])])])]
|
||||
control = np.array([(1., (1, (2, 3.0, ()))), (2, (4, (5, 6.0, ())))],
|
||||
dtype=controldtype)
|
||||
assert_equal(test, control)
|
||||
|
||||
def test_wmasked_arrays(self):
|
||||
# Test merge_arrays masked arrays
|
||||
(_, x, _, _) = self.data
|
||||
mx = ma.array([1, 2, 3], mask=[1, 0, 0])
|
||||
test = merge_arrays((x, mx), usemask=True)
|
||||
control = ma.array([(1, 1), (2, 2), (-1, 3)],
|
||||
mask=[(0, 1), (0, 0), (1, 0)],
|
||||
dtype=[('f0', int), ('f1', int)])
|
||||
assert_equal(test, control)
|
||||
test = merge_arrays((x, mx), usemask=True, asrecarray=True)
|
||||
assert_equal(test, control)
|
||||
assert_(isinstance(test, MaskedRecords))
|
||||
|
||||
def test_w_singlefield(self):
|
||||
# Test single field
|
||||
test = merge_arrays((np.array([1, 2]).view([('a', int)]),
|
||||
np.array([10., 20., 30.])),)
|
||||
control = ma.array([(1, 10.), (2, 20.), (-1, 30.)],
|
||||
mask=[(0, 0), (0, 0), (1, 0)],
|
||||
dtype=[('a', int), ('f1', float)])
|
||||
assert_equal(test, control)
|
||||
|
||||
def test_w_shorter_flex(self):
|
||||
# Test merge_arrays w/ a shorter flexndarray.
|
||||
z = self.data[-1]
|
||||
|
||||
# Fixme, this test looks incomplete and broken
|
||||
#test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)])))
|
||||
#control = np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)],
|
||||
# dtype=[('A', '|S3'), ('B', float), ('C', int)])
|
||||
#assert_equal(test, control)
|
||||
|
||||
# Hack to avoid pyflakes warnings about unused variables
|
||||
merge_arrays((z, np.array([10, 20, 30]).view([('C', int)])))
|
||||
np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)],
|
||||
dtype=[('A', '|S3'), ('B', float), ('C', int)])
|
||||
|
||||
def test_singlerecord(self):
|
||||
(_, x, y, z) = self.data
|
||||
test = merge_arrays((x[0], y[0], z[0]), usemask=False)
|
||||
control = np.array([(1, 10, ('A', 1))],
|
||||
dtype=[('f0', int),
|
||||
('f1', int),
|
||||
('f2', [('A', '|S3'), ('B', float)])])
|
||||
assert_equal(test, control)
|
||||
|
||||
|
||||
class TestAppendFields:
|
||||
# Test append_fields
|
||||
|
||||
def setup(self):
|
||||
x = np.array([1, 2, ])
|
||||
y = np.array([10, 20, 30])
|
||||
z = np.array(
|
||||
[('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
|
||||
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
|
||||
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
|
||||
self.data = (w, x, y, z)
|
||||
|
||||
def test_append_single(self):
|
||||
# Test simple case
|
||||
(_, x, _, _) = self.data
|
||||
test = append_fields(x, 'A', data=[10, 20, 30])
|
||||
control = ma.array([(1, 10), (2, 20), (-1, 30)],
|
||||
mask=[(0, 0), (0, 0), (1, 0)],
|
||||
dtype=[('f0', int), ('A', int)],)
|
||||
assert_equal(test, control)
|
||||
|
||||
def test_append_double(self):
|
||||
# Test simple case
|
||||
(_, x, _, _) = self.data
|
||||
test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]])
|
||||
control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)],
|
||||
mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)],
|
||||
dtype=[('f0', int), ('A', int), ('B', int)],)
|
||||
assert_equal(test, control)
|
||||
|
||||
def test_append_on_flex(self):
|
||||
# Test append_fields on flexible type arrays
|
||||
z = self.data[-1]
|
||||
test = append_fields(z, 'C', data=[10, 20, 30])
|
||||
control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)],
|
||||
mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)],
|
||||
dtype=[('A', '|S3'), ('B', float), ('C', int)],)
|
||||
assert_equal(test, control)
|
||||
|
||||
def test_append_on_nested(self):
|
||||
# Test append_fields on nested fields
|
||||
w = self.data[0]
|
||||
test = append_fields(w, 'C', data=[10, 20, 30])
|
||||
control = ma.array([(1, (2, 3.0), 10),
|
||||
(4, (5, 6.0), 20),
|
||||
(-1, (-1, -1.), 30)],
|
||||
mask=[(
|
||||
0, (0, 0), 0), (0, (0, 0), 0), (1, (1, 1), 0)],
|
||||
dtype=[('a', int),
|
||||
('b', [('ba', float), ('bb', int)]),
|
||||
('C', int)],)
|
||||
assert_equal(test, control)
|
||||
|
||||
|
||||
class TestStackArrays:
|
||||
# Test stack_arrays
|
||||
def setup(self):
|
||||
x = np.array([1, 2, ])
|
||||
y = np.array([10, 20, 30])
|
||||
z = np.array(
|
||||
[('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
|
||||
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
|
||||
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
|
||||
self.data = (w, x, y, z)
|
||||
|
||||
def test_solo(self):
|
||||
# Test stack_arrays on single arrays
|
||||
(_, x, _, _) = self.data
|
||||
test = stack_arrays((x,))
|
||||
assert_equal(test, x)
|
||||
assert_(test is x)
|
||||
|
||||
test = stack_arrays(x)
|
||||
assert_equal(test, x)
|
||||
assert_(test is x)
|
||||
|
||||
def test_unnamed_fields(self):
|
||||
# Tests combinations of arrays w/o named fields
|
||||
(_, x, y, _) = self.data
|
||||
|
||||
test = stack_arrays((x, x), usemask=False)
|
||||
control = np.array([1, 2, 1, 2])
|
||||
assert_equal(test, control)
|
||||
|
||||
test = stack_arrays((x, y), usemask=False)
|
||||
control = np.array([1, 2, 10, 20, 30])
|
||||
assert_equal(test, control)
|
||||
|
||||
test = stack_arrays((y, x), usemask=False)
|
||||
control = np.array([10, 20, 30, 1, 2])
|
||||
assert_equal(test, control)
|
||||
|
||||
def test_unnamed_and_named_fields(self):
|
||||
# Test combination of arrays w/ & w/o named fields
|
||||
(_, x, _, z) = self.data
|
||||
|
||||
test = stack_arrays((x, z))
|
||||
control = ma.array([(1, -1, -1), (2, -1, -1),
|
||||
(-1, 'A', 1), (-1, 'B', 2)],
|
||||
mask=[(0, 1, 1), (0, 1, 1),
|
||||
(1, 0, 0), (1, 0, 0)],
|
||||
dtype=[('f0', int), ('A', '|S3'), ('B', float)])
|
||||
assert_equal(test, control)
|
||||
assert_equal(test.mask, control.mask)
|
||||
|
||||
test = stack_arrays((z, x))
|
||||
control = ma.array([('A', 1, -1), ('B', 2, -1),
|
||||
(-1, -1, 1), (-1, -1, 2), ],
|
||||
mask=[(0, 0, 1), (0, 0, 1),
|
||||
(1, 1, 0), (1, 1, 0)],
|
||||
dtype=[('A', '|S3'), ('B', float), ('f2', int)])
|
||||
assert_equal(test, control)
|
||||
assert_equal(test.mask, control.mask)
|
||||
|
||||
test = stack_arrays((z, z, x))
|
||||
control = ma.array([('A', 1, -1), ('B', 2, -1),
|
||||
('A', 1, -1), ('B', 2, -1),
|
||||
(-1, -1, 1), (-1, -1, 2), ],
|
||||
mask=[(0, 0, 1), (0, 0, 1),
|
||||
(0, 0, 1), (0, 0, 1),
|
||||
(1, 1, 0), (1, 1, 0)],
|
||||
dtype=[('A', '|S3'), ('B', float), ('f2', int)])
|
||||
assert_equal(test, control)
|
||||
|
||||
def test_matching_named_fields(self):
|
||||
# Test combination of arrays w/ matching field names
|
||||
(_, x, _, z) = self.data
|
||||
zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
|
||||
dtype=[('A', '|S3'), ('B', float), ('C', float)])
|
||||
test = stack_arrays((z, zz))
|
||||
control = ma.array([('A', 1, -1), ('B', 2, -1),
|
||||
(
|
||||
'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
|
||||
dtype=[('A', '|S3'), ('B', float), ('C', float)],
|
||||
mask=[(0, 0, 1), (0, 0, 1),
|
||||
(0, 0, 0), (0, 0, 0), (0, 0, 0)])
|
||||
assert_equal(test, control)
|
||||
assert_equal(test.mask, control.mask)
|
||||
|
||||
test = stack_arrays((z, zz, x))
|
||||
ndtype = [('A', '|S3'), ('B', float), ('C', float), ('f3', int)]
|
||||
control = ma.array([('A', 1, -1, -1), ('B', 2, -1, -1),
|
||||
('a', 10., 100., -1), ('b', 20., 200., -1),
|
||||
('c', 30., 300., -1),
|
||||
(-1, -1, -1, 1), (-1, -1, -1, 2)],
|
||||
dtype=ndtype,
|
||||
mask=[(0, 0, 1, 1), (0, 0, 1, 1),
|
||||
(0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1),
|
||||
(1, 1, 1, 0), (1, 1, 1, 0)])
|
||||
assert_equal(test, control)
|
||||
assert_equal(test.mask, control.mask)
|
||||
|
||||
def test_defaults(self):
|
||||
# Test defaults: no exception raised if keys of defaults are not fields.
|
||||
(_, _, _, z) = self.data
|
||||
zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
|
||||
dtype=[('A', '|S3'), ('B', float), ('C', float)])
|
||||
defaults = {'A': '???', 'B': -999., 'C': -9999., 'D': -99999.}
|
||||
test = stack_arrays((z, zz), defaults=defaults)
|
||||
control = ma.array([('A', 1, -9999.), ('B', 2, -9999.),
|
||||
(
|
||||
'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
|
||||
dtype=[('A', '|S3'), ('B', float), ('C', float)],
|
||||
mask=[(0, 0, 1), (0, 0, 1),
|
||||
(0, 0, 0), (0, 0, 0), (0, 0, 0)])
|
||||
assert_equal(test, control)
|
||||
assert_equal(test.data, control.data)
|
||||
assert_equal(test.mask, control.mask)
|
||||
|
||||
def test_autoconversion(self):
|
||||
# Tests autoconversion
|
||||
adtype = [('A', int), ('B', bool), ('C', float)]
|
||||
a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)
|
||||
bdtype = [('A', int), ('B', float), ('C', float)]
|
||||
b = ma.array([(4, 5, 6)], dtype=bdtype)
|
||||
control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],
|
||||
dtype=bdtype)
|
||||
test = stack_arrays((a, b), autoconvert=True)
|
||||
assert_equal(test, control)
|
||||
assert_equal(test.mask, control.mask)
|
||||
with assert_raises(TypeError):
|
||||
stack_arrays((a, b), autoconvert=False)
|
||||
|
||||
def test_checktitles(self):
|
||||
# Test using titles in the field names
|
||||
adtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)]
|
||||
a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)
|
||||
bdtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)]
|
||||
b = ma.array([(4, 5, 6)], dtype=bdtype)
|
||||
test = stack_arrays((a, b))
|
||||
control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],
|
||||
dtype=bdtype)
|
||||
assert_equal(test, control)
|
||||
assert_equal(test.mask, control.mask)
|
||||
|
||||
def test_subdtype(self):
|
||||
z = np.array([
|
||||
('A', 1), ('B', 2)
|
||||
], dtype=[('A', '|S3'), ('B', float, (1,))])
|
||||
zz = np.array([
|
||||
('a', [10.], 100.), ('b', [20.], 200.), ('c', [30.], 300.)
|
||||
], dtype=[('A', '|S3'), ('B', float, (1,)), ('C', float)])
|
||||
|
||||
res = stack_arrays((z, zz))
|
||||
expected = ma.array(
|
||||
data=[
|
||||
(b'A', [1.0], 0),
|
||||
(b'B', [2.0], 0),
|
||||
(b'a', [10.0], 100.0),
|
||||
(b'b', [20.0], 200.0),
|
||||
(b'c', [30.0], 300.0)],
|
||||
mask=[
|
||||
(False, [False], True),
|
||||
(False, [False], True),
|
||||
(False, [False], False),
|
||||
(False, [False], False),
|
||||
(False, [False], False)
|
||||
],
|
||||
dtype=zz.dtype
|
||||
)
|
||||
assert_equal(res.dtype, expected.dtype)
|
||||
assert_equal(res, expected)
|
||||
assert_equal(res.mask, expected.mask)
|
||||
|
||||
|
||||
class TestJoinBy:
|
||||
def setup(self):
|
||||
self.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
|
||||
np.arange(100, 110))),
|
||||
dtype=[('a', int), ('b', int), ('c', int)])
|
||||
self.b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75),
|
||||
np.arange(100, 110))),
|
||||
dtype=[('a', int), ('b', int), ('d', int)])
|
||||
|
||||
def test_inner_join(self):
|
||||
# Basic test of join_by
|
||||
a, b = self.a, self.b
|
||||
|
||||
test = join_by('a', a, b, jointype='inner')
|
||||
control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101),
|
||||
(7, 57, 67, 107, 102), (8, 58, 68, 108, 103),
|
||||
(9, 59, 69, 109, 104)],
|
||||
dtype=[('a', int), ('b1', int), ('b2', int),
|
||||
('c', int), ('d', int)])
|
||||
assert_equal(test, control)
|
||||
|
||||
def test_join(self):
|
||||
a, b = self.a, self.b
|
||||
|
||||
# Fixme, this test is broken
|
||||
#test = join_by(('a', 'b'), a, b)
|
||||
#control = np.array([(5, 55, 105, 100), (6, 56, 106, 101),
|
||||
# (7, 57, 107, 102), (8, 58, 108, 103),
|
||||
# (9, 59, 109, 104)],
|
||||
# dtype=[('a', int), ('b', int),
|
||||
# ('c', int), ('d', int)])
|
||||
#assert_equal(test, control)
|
||||
|
||||
# Hack to avoid pyflakes unused variable warnings
|
||||
join_by(('a', 'b'), a, b)
|
||||
np.array([(5, 55, 105, 100), (6, 56, 106, 101),
|
||||
(7, 57, 107, 102), (8, 58, 108, 103),
|
||||
(9, 59, 109, 104)],
|
||||
dtype=[('a', int), ('b', int),
|
||||
('c', int), ('d', int)])
|
||||
|
||||
def test_join_subdtype(self):
|
||||
# tests the bug in https://stackoverflow.com/q/44769632/102441
|
||||
foo = np.array([(1,)],
|
||||
dtype=[('key', int)])
|
||||
bar = np.array([(1, np.array([1,2,3]))],
|
||||
dtype=[('key', int), ('value', 'uint16', 3)])
|
||||
res = join_by('key', foo, bar)
|
||||
assert_equal(res, bar.view(ma.MaskedArray))
|
||||
|
||||
def test_outer_join(self):
|
||||
a, b = self.a, self.b
|
||||
|
||||
test = join_by(('a', 'b'), a, b, 'outer')
|
||||
control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1),
|
||||
(2, 52, 102, -1), (3, 53, 103, -1),
|
||||
(4, 54, 104, -1), (5, 55, 105, -1),
|
||||
(5, 65, -1, 100), (6, 56, 106, -1),
|
||||
(6, 66, -1, 101), (7, 57, 107, -1),
|
||||
(7, 67, -1, 102), (8, 58, 108, -1),
|
||||
(8, 68, -1, 103), (9, 59, 109, -1),
|
||||
(9, 69, -1, 104), (10, 70, -1, 105),
|
||||
(11, 71, -1, 106), (12, 72, -1, 107),
|
||||
(13, 73, -1, 108), (14, 74, -1, 109)],
|
||||
mask=[(0, 0, 0, 1), (0, 0, 0, 1),
|
||||
(0, 0, 0, 1), (0, 0, 0, 1),
|
||||
(0, 0, 0, 1), (0, 0, 0, 1),
|
||||
(0, 0, 1, 0), (0, 0, 0, 1),
|
||||
(0, 0, 1, 0), (0, 0, 0, 1),
|
||||
(0, 0, 1, 0), (0, 0, 0, 1),
|
||||
(0, 0, 1, 0), (0, 0, 0, 1),
|
||||
(0, 0, 1, 0), (0, 0, 1, 0),
|
||||
(0, 0, 1, 0), (0, 0, 1, 0),
|
||||
(0, 0, 1, 0), (0, 0, 1, 0)],
|
||||
dtype=[('a', int), ('b', int),
|
||||
('c', int), ('d', int)])
|
||||
assert_equal(test, control)
|
||||
|
||||
def test_leftouter_join(self):
|
||||
a, b = self.a, self.b
|
||||
|
||||
test = join_by(('a', 'b'), a, b, 'leftouter')
|
||||
control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1),
|
||||
(2, 52, 102, -1), (3, 53, 103, -1),
|
||||
(4, 54, 104, -1), (5, 55, 105, -1),
|
||||
(6, 56, 106, -1), (7, 57, 107, -1),
|
||||
(8, 58, 108, -1), (9, 59, 109, -1)],
|
||||
mask=[(0, 0, 0, 1), (0, 0, 0, 1),
|
||||
(0, 0, 0, 1), (0, 0, 0, 1),
|
||||
(0, 0, 0, 1), (0, 0, 0, 1),
|
||||
(0, 0, 0, 1), (0, 0, 0, 1),
|
||||
(0, 0, 0, 1), (0, 0, 0, 1)],
|
||||
dtype=[('a', int), ('b', int), ('c', int), ('d', int)])
|
||||
assert_equal(test, control)
|
||||
|
||||
def test_different_field_order(self):
|
||||
# gh-8940
|
||||
a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')])
|
||||
b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')])
|
||||
# this should not give a FutureWarning:
|
||||
j = join_by(['c', 'b'], a, b, jointype='inner', usemask=False)
|
||||
assert_equal(j.dtype.names, ['b', 'c', 'a1', 'a2'])
|
||||
|
||||
def test_duplicate_keys(self):
|
||||
a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')])
|
||||
b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')])
|
||||
assert_raises(ValueError, join_by, ['a', 'b', 'b'], a, b)
|
||||
|
||||
@pytest.mark.xfail(reason="See comment at gh-9343")
|
||||
def test_same_name_different_dtypes_key(self):
|
||||
a_dtype = np.dtype([('key', 'S5'), ('value', '<f4')])
|
||||
b_dtype = np.dtype([('key', 'S10'), ('value', '<f4')])
|
||||
expected_dtype = np.dtype([
|
||||
('key', 'S10'), ('value1', '<f4'), ('value2', '<f4')])
|
||||
|
||||
a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype)
|
||||
b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype)
|
||||
res = join_by('key', a, b)
|
||||
|
||||
assert_equal(res.dtype, expected_dtype)
|
||||
|
||||
def test_same_name_different_dtypes(self):
|
||||
# gh-9338
|
||||
a_dtype = np.dtype([('key', 'S10'), ('value', '<f4')])
|
||||
b_dtype = np.dtype([('key', 'S10'), ('value', '<f8')])
|
||||
expected_dtype = np.dtype([
|
||||
('key', '|S10'), ('value1', '<f4'), ('value2', '<f8')])
|
||||
|
||||
a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype)
|
||||
b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype)
|
||||
res = join_by('key', a, b)
|
||||
|
||||
assert_equal(res.dtype, expected_dtype)
|
||||
|
||||
def test_subarray_key(self):
|
||||
a_dtype = np.dtype([('pos', int, 3), ('f', '<f4')])
|
||||
a = np.array([([1, 1, 1], np.pi), ([1, 2, 3], 0.0)], dtype=a_dtype)
|
||||
|
||||
b_dtype = np.dtype([('pos', int, 3), ('g', '<f4')])
|
||||
b = np.array([([1, 1, 1], 3), ([3, 2, 1], 0.0)], dtype=b_dtype)
|
||||
|
||||
expected_dtype = np.dtype([('pos', int, 3), ('f', '<f4'), ('g', '<f4')])
|
||||
expected = np.array([([1, 1, 1], np.pi, 3)], dtype=expected_dtype)
|
||||
|
||||
res = join_by('pos', a, b)
|
||||
assert_equal(res.dtype, expected_dtype)
|
||||
assert_equal(res, expected)
|
||||
|
||||
def test_padded_dtype(self):
|
||||
dt = np.dtype('i1,f4', align=True)
|
||||
dt.names = ('k', 'v')
|
||||
assert_(len(dt.descr), 3) # padding field is inserted
|
||||
|
||||
a = np.array([(1, 3), (3, 2)], dt)
|
||||
b = np.array([(1, 1), (2, 2)], dt)
|
||||
res = join_by('k', a, b)
|
||||
|
||||
# no padding fields remain
|
||||
expected_dtype = np.dtype([
|
||||
('k', 'i1'), ('v1', 'f4'), ('v2', 'f4')
|
||||
])
|
||||
|
||||
assert_equal(res.dtype, expected_dtype)
|
||||
|
||||
|
||||
class TestJoinBy2:
|
||||
@classmethod
|
||||
def setup(cls):
|
||||
cls.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
|
||||
np.arange(100, 110))),
|
||||
dtype=[('a', int), ('b', int), ('c', int)])
|
||||
cls.b = np.array(list(zip(np.arange(10), np.arange(65, 75),
|
||||
np.arange(100, 110))),
|
||||
dtype=[('a', int), ('b', int), ('d', int)])
|
||||
|
||||
def test_no_r1postfix(self):
|
||||
# Basic test of join_by no_r1postfix
|
||||
a, b = self.a, self.b
|
||||
|
||||
test = join_by(
|
||||
'a', a, b, r1postfix='', r2postfix='2', jointype='inner')
|
||||
control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101),
|
||||
(2, 52, 67, 102, 102), (3, 53, 68, 103, 103),
|
||||
(4, 54, 69, 104, 104), (5, 55, 70, 105, 105),
|
||||
(6, 56, 71, 106, 106), (7, 57, 72, 107, 107),
|
||||
(8, 58, 73, 108, 108), (9, 59, 74, 109, 109)],
|
||||
dtype=[('a', int), ('b', int), ('b2', int),
|
||||
('c', int), ('d', int)])
|
||||
assert_equal(test, control)
|
||||
|
||||
def test_no_postfix(self):
|
||||
assert_raises(ValueError, join_by, 'a', self.a, self.b,
|
||||
r1postfix='', r2postfix='')
|
||||
|
||||
def test_no_r2postfix(self):
|
||||
# Basic test of join_by no_r2postfix
|
||||
a, b = self.a, self.b
|
||||
|
||||
test = join_by(
|
||||
'a', a, b, r1postfix='1', r2postfix='', jointype='inner')
|
||||
control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101),
|
||||
(2, 52, 67, 102, 102), (3, 53, 68, 103, 103),
|
||||
(4, 54, 69, 104, 104), (5, 55, 70, 105, 105),
|
||||
(6, 56, 71, 106, 106), (7, 57, 72, 107, 107),
|
||||
(8, 58, 73, 108, 108), (9, 59, 74, 109, 109)],
|
||||
dtype=[('a', int), ('b1', int), ('b', int),
|
||||
('c', int), ('d', int)])
|
||||
assert_equal(test, control)
|
||||
|
||||
def test_two_keys_two_vars(self):
|
||||
a = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),
|
||||
np.arange(50, 60), np.arange(10, 20))),
|
||||
dtype=[('k', int), ('a', int), ('b', int), ('c', int)])
|
||||
|
||||
b = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),
|
||||
np.arange(65, 75), np.arange(0, 10))),
|
||||
dtype=[('k', int), ('a', int), ('b', int), ('c', int)])
|
||||
|
||||
control = np.array([(10, 0, 50, 65, 10, 0), (11, 0, 51, 66, 11, 1),
|
||||
(10, 1, 52, 67, 12, 2), (11, 1, 53, 68, 13, 3),
|
||||
(10, 2, 54, 69, 14, 4), (11, 2, 55, 70, 15, 5),
|
||||
(10, 3, 56, 71, 16, 6), (11, 3, 57, 72, 17, 7),
|
||||
(10, 4, 58, 73, 18, 8), (11, 4, 59, 74, 19, 9)],
|
||||
dtype=[('k', int), ('a', int), ('b1', int),
|
||||
('b2', int), ('c1', int), ('c2', int)])
|
||||
test = join_by(
|
||||
['a', 'k'], a, b, r1postfix='1', r2postfix='2', jointype='inner')
|
||||
assert_equal(test.dtype, control.dtype)
|
||||
assert_equal(test, control)
|
||||
|
||||
class TestAppendFieldsObj:
|
||||
"""
|
||||
Test append_fields with arrays containing objects
|
||||
"""
|
||||
# https://github.com/numpy/numpy/issues/2346
|
||||
|
||||
def setup(self):
|
||||
from datetime import date
|
||||
self.data = dict(obj=date(2000, 1, 1))
|
||||
|
||||
def test_append_to_objects(self):
|
||||
"Test append_fields when the base array contains objects"
|
||||
obj = self.data['obj']
|
||||
x = np.array([(obj, 1.), (obj, 2.)],
|
||||
dtype=[('A', object), ('B', float)])
|
||||
y = np.array([10, 20], dtype=int)
|
||||
test = append_fields(x, 'C', data=y, usemask=False)
|
||||
control = np.array([(obj, 1.0, 10), (obj, 2.0, 20)],
|
||||
dtype=[('A', object), ('B', float), ('C', int)])
|
||||
assert_equal(test, control)
|
||||
247
venv/Lib/site-packages/numpy/lib/tests/test_regression.py
Normal file
247
venv/Lib/site-packages/numpy/lib/tests/test_regression.py
Normal file
|
|
@ -0,0 +1,247 @@
|
|||
import os
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import (
|
||||
assert_, assert_equal, assert_array_equal, assert_array_almost_equal,
|
||||
assert_raises, _assert_valid_refcount,
|
||||
)
|
||||
|
||||
|
||||
class TestRegression:
|
||||
def test_poly1d(self):
|
||||
# Ticket #28
|
||||
assert_equal(np.poly1d([1]) - np.poly1d([1, 0]),
|
||||
np.poly1d([-1, 1]))
|
||||
|
||||
def test_cov_parameters(self):
|
||||
# Ticket #91
|
||||
x = np.random.random((3, 3))
|
||||
y = x.copy()
|
||||
np.cov(x, rowvar=True)
|
||||
np.cov(y, rowvar=False)
|
||||
assert_array_equal(x, y)
|
||||
|
||||
def test_mem_digitize(self):
|
||||
# Ticket #95
|
||||
for i in range(100):
|
||||
np.digitize([1, 2, 3, 4], [1, 3])
|
||||
np.digitize([0, 1, 2, 3, 4], [1, 3])
|
||||
|
||||
def test_unique_zero_sized(self):
|
||||
# Ticket #205
|
||||
assert_array_equal([], np.unique(np.array([])))
|
||||
|
||||
def test_mem_vectorise(self):
|
||||
# Ticket #325
|
||||
vt = np.vectorize(lambda *args: args)
|
||||
vt(np.zeros((1, 2, 1)), np.zeros((2, 1, 1)), np.zeros((1, 1, 2)))
|
||||
vt(np.zeros((1, 2, 1)), np.zeros((2, 1, 1)), np.zeros((1,
|
||||
1, 2)), np.zeros((2, 2)))
|
||||
|
||||
def test_mgrid_single_element(self):
|
||||
# Ticket #339
|
||||
assert_array_equal(np.mgrid[0:0:1j], [0])
|
||||
assert_array_equal(np.mgrid[0:0], [])
|
||||
|
||||
def test_refcount_vectorize(self):
|
||||
# Ticket #378
|
||||
def p(x, y):
|
||||
return 123
|
||||
v = np.vectorize(p)
|
||||
_assert_valid_refcount(v)
|
||||
|
||||
def test_poly1d_nan_roots(self):
|
||||
# Ticket #396
|
||||
p = np.poly1d([np.nan, np.nan, 1], r=False)
|
||||
assert_raises(np.linalg.LinAlgError, getattr, p, "r")
|
||||
|
||||
def test_mem_polymul(self):
|
||||
# Ticket #448
|
||||
np.polymul([], [1.])
|
||||
|
||||
def test_mem_string_concat(self):
|
||||
# Ticket #469
|
||||
x = np.array([])
|
||||
np.append(x, 'asdasd\tasdasd')
|
||||
|
||||
def test_poly_div(self):
|
||||
# Ticket #553
|
||||
u = np.poly1d([1, 2, 3])
|
||||
v = np.poly1d([1, 2, 3, 4, 5])
|
||||
q, r = np.polydiv(u, v)
|
||||
assert_equal(q*v + r, u)
|
||||
|
||||
def test_poly_eq(self):
|
||||
# Ticket #554
|
||||
x = np.poly1d([1, 2, 3])
|
||||
y = np.poly1d([3, 4])
|
||||
assert_(x != y)
|
||||
assert_(x == x)
|
||||
|
||||
def test_polyfit_build(self):
|
||||
# Ticket #628
|
||||
ref = [-1.06123820e-06, 5.70886914e-04, -1.13822012e-01,
|
||||
9.95368241e+00, -3.14526520e+02]
|
||||
x = [90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103,
|
||||
104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
|
||||
116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 129,
|
||||
130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141,
|
||||
146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
|
||||
158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
|
||||
170, 171, 172, 173, 174, 175, 176]
|
||||
y = [9.0, 3.0, 7.0, 4.0, 4.0, 8.0, 6.0, 11.0, 9.0, 8.0, 11.0, 5.0,
|
||||
6.0, 5.0, 9.0, 8.0, 6.0, 10.0, 6.0, 10.0, 7.0, 6.0, 6.0, 6.0,
|
||||
13.0, 4.0, 9.0, 11.0, 4.0, 5.0, 8.0, 5.0, 7.0, 7.0, 6.0, 12.0,
|
||||
7.0, 7.0, 9.0, 4.0, 12.0, 6.0, 6.0, 4.0, 3.0, 9.0, 8.0, 8.0,
|
||||
6.0, 7.0, 9.0, 10.0, 6.0, 8.0, 4.0, 7.0, 7.0, 10.0, 8.0, 8.0,
|
||||
6.0, 3.0, 8.0, 4.0, 5.0, 7.0, 8.0, 6.0, 6.0, 4.0, 12.0, 9.0,
|
||||
8.0, 8.0, 8.0, 6.0, 7.0, 4.0, 4.0, 5.0, 7.0]
|
||||
tested = np.polyfit(x, y, 4)
|
||||
assert_array_almost_equal(ref, tested)
|
||||
|
||||
def test_polydiv_type(self):
|
||||
# Make polydiv work for complex types
|
||||
msg = "Wrong type, should be complex"
|
||||
x = np.ones(3, dtype=complex)
|
||||
q, r = np.polydiv(x, x)
|
||||
assert_(q.dtype == complex, msg)
|
||||
msg = "Wrong type, should be float"
|
||||
x = np.ones(3, dtype=int)
|
||||
q, r = np.polydiv(x, x)
|
||||
assert_(q.dtype == float, msg)
|
||||
|
||||
def test_histogramdd_too_many_bins(self):
|
||||
# Ticket 928.
|
||||
assert_raises(ValueError, np.histogramdd, np.ones((1, 10)), bins=2**10)
|
||||
|
||||
def test_polyint_type(self):
|
||||
# Ticket #944
|
||||
msg = "Wrong type, should be complex"
|
||||
x = np.ones(3, dtype=complex)
|
||||
assert_(np.polyint(x).dtype == complex, msg)
|
||||
msg = "Wrong type, should be float"
|
||||
x = np.ones(3, dtype=int)
|
||||
assert_(np.polyint(x).dtype == float, msg)
|
||||
|
||||
def test_ndenumerate_crash(self):
|
||||
# Ticket 1140
|
||||
# Shouldn't crash:
|
||||
list(np.ndenumerate(np.array([[]])))
|
||||
|
||||
def test_asfarray_none(self):
|
||||
# Test for changeset r5065
|
||||
assert_array_equal(np.array([np.nan]), np.asfarray([None]))
|
||||
|
||||
def test_large_fancy_indexing(self):
|
||||
# Large enough to fail on 64-bit.
|
||||
nbits = np.dtype(np.intp).itemsize * 8
|
||||
thesize = int((2**nbits)**(1.0/5.0)+1)
|
||||
|
||||
def dp():
|
||||
n = 3
|
||||
a = np.ones((n,)*5)
|
||||
i = np.random.randint(0, n, size=thesize)
|
||||
a[np.ix_(i, i, i, i, i)] = 0
|
||||
|
||||
def dp2():
|
||||
n = 3
|
||||
a = np.ones((n,)*5)
|
||||
i = np.random.randint(0, n, size=thesize)
|
||||
a[np.ix_(i, i, i, i, i)]
|
||||
|
||||
assert_raises(ValueError, dp)
|
||||
assert_raises(ValueError, dp2)
|
||||
|
||||
def test_void_coercion(self):
|
||||
dt = np.dtype([('a', 'f4'), ('b', 'i4')])
|
||||
x = np.zeros((1,), dt)
|
||||
assert_(np.r_[x, x].dtype == dt)
|
||||
|
||||
def test_who_with_0dim_array(self):
|
||||
# ticket #1243
|
||||
import os
|
||||
import sys
|
||||
|
||||
oldstdout = sys.stdout
|
||||
sys.stdout = open(os.devnull, 'w')
|
||||
try:
|
||||
try:
|
||||
np.who({'foo': np.array(1)})
|
||||
except Exception:
|
||||
raise AssertionError("ticket #1243")
|
||||
finally:
|
||||
sys.stdout.close()
|
||||
sys.stdout = oldstdout
|
||||
|
||||
def test_include_dirs(self):
|
||||
# As a sanity check, just test that get_include
|
||||
# includes something reasonable. Somewhat
|
||||
# related to ticket #1405.
|
||||
include_dirs = [np.get_include()]
|
||||
for path in include_dirs:
|
||||
assert_(isinstance(path, str))
|
||||
assert_(path != '')
|
||||
|
||||
def test_polyder_return_type(self):
|
||||
# Ticket #1249
|
||||
assert_(isinstance(np.polyder(np.poly1d([1]), 0), np.poly1d))
|
||||
assert_(isinstance(np.polyder([1], 0), np.ndarray))
|
||||
assert_(isinstance(np.polyder(np.poly1d([1]), 1), np.poly1d))
|
||||
assert_(isinstance(np.polyder([1], 1), np.ndarray))
|
||||
|
||||
def test_append_fields_dtype_list(self):
|
||||
# Ticket #1676
|
||||
from numpy.lib.recfunctions import append_fields
|
||||
|
||||
base = np.array([1, 2, 3], dtype=np.int32)
|
||||
names = ['a', 'b', 'c']
|
||||
data = np.eye(3).astype(np.int32)
|
||||
dlist = [np.float64, np.int32, np.int32]
|
||||
try:
|
||||
append_fields(base, names, data, dlist)
|
||||
except Exception:
|
||||
raise AssertionError()
|
||||
|
||||
def test_loadtxt_fields_subarrays(self):
|
||||
# For ticket #1936
|
||||
from io import StringIO
|
||||
|
||||
dt = [("a", 'u1', 2), ("b", 'u1', 2)]
|
||||
x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt)
|
||||
assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt))
|
||||
|
||||
dt = [("a", [("a", 'u1', (1, 3)), ("b", 'u1')])]
|
||||
x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt)
|
||||
assert_equal(x, np.array([(((0, 1, 2), 3),)], dtype=dt))
|
||||
|
||||
dt = [("a", 'u1', (2, 2))]
|
||||
x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt)
|
||||
assert_equal(x, np.array([(((0, 1), (2, 3)),)], dtype=dt))
|
||||
|
||||
dt = [("a", 'u1', (2, 3, 2))]
|
||||
x = np.loadtxt(StringIO("0 1 2 3 4 5 6 7 8 9 10 11"), dtype=dt)
|
||||
data = [((((0, 1), (2, 3), (4, 5)), ((6, 7), (8, 9), (10, 11))),)]
|
||||
assert_equal(x, np.array(data, dtype=dt))
|
||||
|
||||
def test_nansum_with_boolean(self):
|
||||
# gh-2978
|
||||
a = np.zeros(2, dtype=bool)
|
||||
try:
|
||||
np.nansum(a)
|
||||
except Exception:
|
||||
raise AssertionError()
|
||||
|
||||
def test_py3_compat(self):
|
||||
# gh-2561
|
||||
# Test if the oldstyle class test is bypassed in python3
|
||||
class C():
|
||||
"""Old-style class in python2, normal class in python3"""
|
||||
pass
|
||||
|
||||
out = open(os.devnull, 'w')
|
||||
try:
|
||||
np.info(C(), output=out)
|
||||
except AttributeError:
|
||||
raise AssertionError()
|
||||
finally:
|
||||
out.close()
|
||||
717
venv/Lib/site-packages/numpy/lib/tests/test_shape_base.py
Normal file
717
venv/Lib/site-packages/numpy/lib/tests/test_shape_base.py
Normal file
|
|
@ -0,0 +1,717 @@
|
|||
import numpy as np
|
||||
import functools
|
||||
import sys
|
||||
import pytest
|
||||
|
||||
from numpy.lib.shape_base import (
|
||||
apply_along_axis, apply_over_axes, array_split, split, hsplit, dsplit,
|
||||
vsplit, dstack, column_stack, kron, tile, expand_dims, take_along_axis,
|
||||
put_along_axis
|
||||
)
|
||||
from numpy.testing import (
|
||||
assert_, assert_equal, assert_array_equal, assert_raises, assert_warns
|
||||
)
|
||||
|
||||
|
||||
IS_64BIT = sys.maxsize > 2**32
|
||||
|
||||
|
||||
def _add_keepdims(func):
|
||||
""" hack in keepdims behavior into a function taking an axis """
|
||||
@functools.wraps(func)
|
||||
def wrapped(a, axis, **kwargs):
|
||||
res = func(a, axis=axis, **kwargs)
|
||||
if axis is None:
|
||||
axis = 0 # res is now a scalar, so we can insert this anywhere
|
||||
return np.expand_dims(res, axis=axis)
|
||||
return wrapped
|
||||
|
||||
|
||||
class TestTakeAlongAxis:
|
||||
def test_argequivalent(self):
|
||||
""" Test it translates from arg<func> to <func> """
|
||||
from numpy.random import rand
|
||||
a = rand(3, 4, 5)
|
||||
|
||||
funcs = [
|
||||
(np.sort, np.argsort, dict()),
|
||||
(_add_keepdims(np.min), _add_keepdims(np.argmin), dict()),
|
||||
(_add_keepdims(np.max), _add_keepdims(np.argmax), dict()),
|
||||
(np.partition, np.argpartition, dict(kth=2)),
|
||||
]
|
||||
|
||||
for func, argfunc, kwargs in funcs:
|
||||
for axis in list(range(a.ndim)) + [None]:
|
||||
a_func = func(a, axis=axis, **kwargs)
|
||||
ai_func = argfunc(a, axis=axis, **kwargs)
|
||||
assert_equal(a_func, take_along_axis(a, ai_func, axis=axis))
|
||||
|
||||
def test_invalid(self):
|
||||
""" Test it errors when indices has too few dimensions """
|
||||
a = np.ones((10, 10))
|
||||
ai = np.ones((10, 2), dtype=np.intp)
|
||||
|
||||
# sanity check
|
||||
take_along_axis(a, ai, axis=1)
|
||||
|
||||
# not enough indices
|
||||
assert_raises(ValueError, take_along_axis, a, np.array(1), axis=1)
|
||||
# bool arrays not allowed
|
||||
assert_raises(IndexError, take_along_axis, a, ai.astype(bool), axis=1)
|
||||
# float arrays not allowed
|
||||
assert_raises(IndexError, take_along_axis, a, ai.astype(float), axis=1)
|
||||
# invalid axis
|
||||
assert_raises(np.AxisError, take_along_axis, a, ai, axis=10)
|
||||
|
||||
def test_empty(self):
|
||||
""" Test everything is ok with empty results, even with inserted dims """
|
||||
a = np.ones((3, 4, 5))
|
||||
ai = np.ones((3, 0, 5), dtype=np.intp)
|
||||
|
||||
actual = take_along_axis(a, ai, axis=1)
|
||||
assert_equal(actual.shape, ai.shape)
|
||||
|
||||
def test_broadcast(self):
|
||||
""" Test that non-indexing dimensions are broadcast in both directions """
|
||||
a = np.ones((3, 4, 1))
|
||||
ai = np.ones((1, 2, 5), dtype=np.intp)
|
||||
actual = take_along_axis(a, ai, axis=1)
|
||||
assert_equal(actual.shape, (3, 2, 5))
|
||||
|
||||
|
||||
class TestPutAlongAxis:
|
||||
def test_replace_max(self):
|
||||
a_base = np.array([[10, 30, 20], [60, 40, 50]])
|
||||
|
||||
for axis in list(range(a_base.ndim)) + [None]:
|
||||
# we mutate this in the loop
|
||||
a = a_base.copy()
|
||||
|
||||
# replace the max with a small value
|
||||
i_max = _add_keepdims(np.argmax)(a, axis=axis)
|
||||
put_along_axis(a, i_max, -99, axis=axis)
|
||||
|
||||
# find the new minimum, which should max
|
||||
i_min = _add_keepdims(np.argmin)(a, axis=axis)
|
||||
|
||||
assert_equal(i_min, i_max)
|
||||
|
||||
def test_broadcast(self):
|
||||
""" Test that non-indexing dimensions are broadcast in both directions """
|
||||
a = np.ones((3, 4, 1))
|
||||
ai = np.arange(10, dtype=np.intp).reshape((1, 2, 5)) % 4
|
||||
put_along_axis(a, ai, 20, axis=1)
|
||||
assert_equal(take_along_axis(a, ai, axis=1), 20)
|
||||
|
||||
|
||||
class TestApplyAlongAxis:
|
||||
def test_simple(self):
|
||||
a = np.ones((20, 10), 'd')
|
||||
assert_array_equal(
|
||||
apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1]))
|
||||
|
||||
def test_simple101(self):
|
||||
a = np.ones((10, 101), 'd')
|
||||
assert_array_equal(
|
||||
apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1]))
|
||||
|
||||
def test_3d(self):
|
||||
a = np.arange(27).reshape((3, 3, 3))
|
||||
assert_array_equal(apply_along_axis(np.sum, 0, a),
|
||||
[[27, 30, 33], [36, 39, 42], [45, 48, 51]])
|
||||
|
||||
def test_preserve_subclass(self):
|
||||
def double(row):
|
||||
return row * 2
|
||||
|
||||
class MyNDArray(np.ndarray):
|
||||
pass
|
||||
|
||||
m = np.array([[0, 1], [2, 3]]).view(MyNDArray)
|
||||
expected = np.array([[0, 2], [4, 6]]).view(MyNDArray)
|
||||
|
||||
result = apply_along_axis(double, 0, m)
|
||||
assert_(isinstance(result, MyNDArray))
|
||||
assert_array_equal(result, expected)
|
||||
|
||||
result = apply_along_axis(double, 1, m)
|
||||
assert_(isinstance(result, MyNDArray))
|
||||
assert_array_equal(result, expected)
|
||||
|
||||
def test_subclass(self):
|
||||
class MinimalSubclass(np.ndarray):
|
||||
data = 1
|
||||
|
||||
def minimal_function(array):
|
||||
return array.data
|
||||
|
||||
a = np.zeros((6, 3)).view(MinimalSubclass)
|
||||
|
||||
assert_array_equal(
|
||||
apply_along_axis(minimal_function, 0, a), np.array([1, 1, 1])
|
||||
)
|
||||
|
||||
def test_scalar_array(self, cls=np.ndarray):
|
||||
a = np.ones((6, 3)).view(cls)
|
||||
res = apply_along_axis(np.sum, 0, a)
|
||||
assert_(isinstance(res, cls))
|
||||
assert_array_equal(res, np.array([6, 6, 6]).view(cls))
|
||||
|
||||
def test_0d_array(self, cls=np.ndarray):
|
||||
def sum_to_0d(x):
|
||||
""" Sum x, returning a 0d array of the same class """
|
||||
assert_equal(x.ndim, 1)
|
||||
return np.squeeze(np.sum(x, keepdims=True))
|
||||
a = np.ones((6, 3)).view(cls)
|
||||
res = apply_along_axis(sum_to_0d, 0, a)
|
||||
assert_(isinstance(res, cls))
|
||||
assert_array_equal(res, np.array([6, 6, 6]).view(cls))
|
||||
|
||||
res = apply_along_axis(sum_to_0d, 1, a)
|
||||
assert_(isinstance(res, cls))
|
||||
assert_array_equal(res, np.array([3, 3, 3, 3, 3, 3]).view(cls))
|
||||
|
||||
def test_axis_insertion(self, cls=np.ndarray):
|
||||
def f1to2(x):
|
||||
"""produces an asymmetric non-square matrix from x"""
|
||||
assert_equal(x.ndim, 1)
|
||||
return (x[::-1] * x[1:,None]).view(cls)
|
||||
|
||||
a2d = np.arange(6*3).reshape((6, 3))
|
||||
|
||||
# 2d insertion along first axis
|
||||
actual = apply_along_axis(f1to2, 0, a2d)
|
||||
expected = np.stack([
|
||||
f1to2(a2d[:,i]) for i in range(a2d.shape[1])
|
||||
], axis=-1).view(cls)
|
||||
assert_equal(type(actual), type(expected))
|
||||
assert_equal(actual, expected)
|
||||
|
||||
# 2d insertion along last axis
|
||||
actual = apply_along_axis(f1to2, 1, a2d)
|
||||
expected = np.stack([
|
||||
f1to2(a2d[i,:]) for i in range(a2d.shape[0])
|
||||
], axis=0).view(cls)
|
||||
assert_equal(type(actual), type(expected))
|
||||
assert_equal(actual, expected)
|
||||
|
||||
# 3d insertion along middle axis
|
||||
a3d = np.arange(6*5*3).reshape((6, 5, 3))
|
||||
|
||||
actual = apply_along_axis(f1to2, 1, a3d)
|
||||
expected = np.stack([
|
||||
np.stack([
|
||||
f1to2(a3d[i,:,j]) for i in range(a3d.shape[0])
|
||||
], axis=0)
|
||||
for j in range(a3d.shape[2])
|
||||
], axis=-1).view(cls)
|
||||
assert_equal(type(actual), type(expected))
|
||||
assert_equal(actual, expected)
|
||||
|
||||
def test_subclass_preservation(self):
|
||||
class MinimalSubclass(np.ndarray):
|
||||
pass
|
||||
self.test_scalar_array(MinimalSubclass)
|
||||
self.test_0d_array(MinimalSubclass)
|
||||
self.test_axis_insertion(MinimalSubclass)
|
||||
|
||||
def test_axis_insertion_ma(self):
|
||||
def f1to2(x):
|
||||
"""produces an asymmetric non-square matrix from x"""
|
||||
assert_equal(x.ndim, 1)
|
||||
res = x[::-1] * x[1:,None]
|
||||
return np.ma.masked_where(res%5==0, res)
|
||||
a = np.arange(6*3).reshape((6, 3))
|
||||
res = apply_along_axis(f1to2, 0, a)
|
||||
assert_(isinstance(res, np.ma.masked_array))
|
||||
assert_equal(res.ndim, 3)
|
||||
assert_array_equal(res[:,:,0].mask, f1to2(a[:,0]).mask)
|
||||
assert_array_equal(res[:,:,1].mask, f1to2(a[:,1]).mask)
|
||||
assert_array_equal(res[:,:,2].mask, f1to2(a[:,2]).mask)
|
||||
|
||||
def test_tuple_func1d(self):
|
||||
def sample_1d(x):
|
||||
return x[1], x[0]
|
||||
res = np.apply_along_axis(sample_1d, 1, np.array([[1, 2], [3, 4]]))
|
||||
assert_array_equal(res, np.array([[2, 1], [4, 3]]))
|
||||
|
||||
def test_empty(self):
|
||||
# can't apply_along_axis when there's no chance to call the function
|
||||
def never_call(x):
|
||||
assert_(False) # should never be reached
|
||||
|
||||
a = np.empty((0, 0))
|
||||
assert_raises(ValueError, np.apply_along_axis, never_call, 0, a)
|
||||
assert_raises(ValueError, np.apply_along_axis, never_call, 1, a)
|
||||
|
||||
# but it's sometimes ok with some non-zero dimensions
|
||||
def empty_to_1(x):
|
||||
assert_(len(x) == 0)
|
||||
return 1
|
||||
|
||||
a = np.empty((10, 0))
|
||||
actual = np.apply_along_axis(empty_to_1, 1, a)
|
||||
assert_equal(actual, np.ones(10))
|
||||
assert_raises(ValueError, np.apply_along_axis, empty_to_1, 0, a)
|
||||
|
||||
def test_with_iterable_object(self):
|
||||
# from issue 5248
|
||||
d = np.array([
|
||||
[{1, 11}, {2, 22}, {3, 33}],
|
||||
[{4, 44}, {5, 55}, {6, 66}]
|
||||
])
|
||||
actual = np.apply_along_axis(lambda a: set.union(*a), 0, d)
|
||||
expected = np.array([{1, 11, 4, 44}, {2, 22, 5, 55}, {3, 33, 6, 66}])
|
||||
|
||||
assert_equal(actual, expected)
|
||||
|
||||
# issue 8642 - assert_equal doesn't detect this!
|
||||
for i in np.ndindex(actual.shape):
|
||||
assert_equal(type(actual[i]), type(expected[i]))
|
||||
|
||||
|
||||
class TestApplyOverAxes:
|
||||
def test_simple(self):
|
||||
a = np.arange(24).reshape(2, 3, 4)
|
||||
aoa_a = apply_over_axes(np.sum, a, [0, 2])
|
||||
assert_array_equal(aoa_a, np.array([[[60], [92], [124]]]))
|
||||
|
||||
|
||||
class TestExpandDims:
|
||||
def test_functionality(self):
|
||||
s = (2, 3, 4, 5)
|
||||
a = np.empty(s)
|
||||
for axis in range(-5, 4):
|
||||
b = expand_dims(a, axis)
|
||||
assert_(b.shape[axis] == 1)
|
||||
assert_(np.squeeze(b).shape == s)
|
||||
|
||||
def test_axis_tuple(self):
|
||||
a = np.empty((3, 3, 3))
|
||||
assert np.expand_dims(a, axis=(0, 1, 2)).shape == (1, 1, 1, 3, 3, 3)
|
||||
assert np.expand_dims(a, axis=(0, -1, -2)).shape == (1, 3, 3, 3, 1, 1)
|
||||
assert np.expand_dims(a, axis=(0, 3, 5)).shape == (1, 3, 3, 1, 3, 1)
|
||||
assert np.expand_dims(a, axis=(0, -3, -5)).shape == (1, 1, 3, 1, 3, 3)
|
||||
|
||||
def test_axis_out_of_range(self):
|
||||
s = (2, 3, 4, 5)
|
||||
a = np.empty(s)
|
||||
assert_raises(np.AxisError, expand_dims, a, -6)
|
||||
assert_raises(np.AxisError, expand_dims, a, 5)
|
||||
|
||||
a = np.empty((3, 3, 3))
|
||||
assert_raises(np.AxisError, expand_dims, a, (0, -6))
|
||||
assert_raises(np.AxisError, expand_dims, a, (0, 5))
|
||||
|
||||
def test_repeated_axis(self):
|
||||
a = np.empty((3, 3, 3))
|
||||
assert_raises(ValueError, expand_dims, a, axis=(1, 1))
|
||||
|
||||
def test_subclasses(self):
|
||||
a = np.arange(10).reshape((2, 5))
|
||||
a = np.ma.array(a, mask=a%3 == 0)
|
||||
|
||||
expanded = np.expand_dims(a, axis=1)
|
||||
assert_(isinstance(expanded, np.ma.MaskedArray))
|
||||
assert_equal(expanded.shape, (2, 1, 5))
|
||||
assert_equal(expanded.mask.shape, (2, 1, 5))
|
||||
|
||||
|
||||
class TestArraySplit:
|
||||
def test_integer_0_split(self):
|
||||
a = np.arange(10)
|
||||
assert_raises(ValueError, array_split, a, 0)
|
||||
|
||||
def test_integer_split(self):
|
||||
a = np.arange(10)
|
||||
res = array_split(a, 1)
|
||||
desired = [np.arange(10)]
|
||||
compare_results(res, desired)
|
||||
|
||||
res = array_split(a, 2)
|
||||
desired = [np.arange(5), np.arange(5, 10)]
|
||||
compare_results(res, desired)
|
||||
|
||||
res = array_split(a, 3)
|
||||
desired = [np.arange(4), np.arange(4, 7), np.arange(7, 10)]
|
||||
compare_results(res, desired)
|
||||
|
||||
res = array_split(a, 4)
|
||||
desired = [np.arange(3), np.arange(3, 6), np.arange(6, 8),
|
||||
np.arange(8, 10)]
|
||||
compare_results(res, desired)
|
||||
|
||||
res = array_split(a, 5)
|
||||
desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6),
|
||||
np.arange(6, 8), np.arange(8, 10)]
|
||||
compare_results(res, desired)
|
||||
|
||||
res = array_split(a, 6)
|
||||
desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6),
|
||||
np.arange(6, 8), np.arange(8, 9), np.arange(9, 10)]
|
||||
compare_results(res, desired)
|
||||
|
||||
res = array_split(a, 7)
|
||||
desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6),
|
||||
np.arange(6, 7), np.arange(7, 8), np.arange(8, 9),
|
||||
np.arange(9, 10)]
|
||||
compare_results(res, desired)
|
||||
|
||||
res = array_split(a, 8)
|
||||
desired = [np.arange(2), np.arange(2, 4), np.arange(4, 5),
|
||||
np.arange(5, 6), np.arange(6, 7), np.arange(7, 8),
|
||||
np.arange(8, 9), np.arange(9, 10)]
|
||||
compare_results(res, desired)
|
||||
|
||||
res = array_split(a, 9)
|
||||
desired = [np.arange(2), np.arange(2, 3), np.arange(3, 4),
|
||||
np.arange(4, 5), np.arange(5, 6), np.arange(6, 7),
|
||||
np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)]
|
||||
compare_results(res, desired)
|
||||
|
||||
res = array_split(a, 10)
|
||||
desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3),
|
||||
np.arange(3, 4), np.arange(4, 5), np.arange(5, 6),
|
||||
np.arange(6, 7), np.arange(7, 8), np.arange(8, 9),
|
||||
np.arange(9, 10)]
|
||||
compare_results(res, desired)
|
||||
|
||||
res = array_split(a, 11)
|
||||
desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3),
|
||||
np.arange(3, 4), np.arange(4, 5), np.arange(5, 6),
|
||||
np.arange(6, 7), np.arange(7, 8), np.arange(8, 9),
|
||||
np.arange(9, 10), np.array([])]
|
||||
compare_results(res, desired)
|
||||
|
||||
def test_integer_split_2D_rows(self):
|
||||
a = np.array([np.arange(10), np.arange(10)])
|
||||
res = array_split(a, 3, axis=0)
|
||||
tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]),
|
||||
np.zeros((0, 10))]
|
||||
compare_results(res, tgt)
|
||||
assert_(a.dtype.type is res[-1].dtype.type)
|
||||
|
||||
# Same thing for manual splits:
|
||||
res = array_split(a, [0, 1, 2], axis=0)
|
||||
tgt = [np.zeros((0, 10)), np.array([np.arange(10)]),
|
||||
np.array([np.arange(10)])]
|
||||
compare_results(res, tgt)
|
||||
assert_(a.dtype.type is res[-1].dtype.type)
|
||||
|
||||
def test_integer_split_2D_cols(self):
|
||||
a = np.array([np.arange(10), np.arange(10)])
|
||||
res = array_split(a, 3, axis=-1)
|
||||
desired = [np.array([np.arange(4), np.arange(4)]),
|
||||
np.array([np.arange(4, 7), np.arange(4, 7)]),
|
||||
np.array([np.arange(7, 10), np.arange(7, 10)])]
|
||||
compare_results(res, desired)
|
||||
|
||||
def test_integer_split_2D_default(self):
|
||||
""" This will fail if we change default axis
|
||||
"""
|
||||
a = np.array([np.arange(10), np.arange(10)])
|
||||
res = array_split(a, 3)
|
||||
tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]),
|
||||
np.zeros((0, 10))]
|
||||
compare_results(res, tgt)
|
||||
assert_(a.dtype.type is res[-1].dtype.type)
|
||||
# perhaps should check higher dimensions
|
||||
|
||||
@pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform")
|
||||
def test_integer_split_2D_rows_greater_max_int32(self):
|
||||
a = np.broadcast_to([0], (1 << 32, 2))
|
||||
res = array_split(a, 4)
|
||||
chunk = np.broadcast_to([0], (1 << 30, 2))
|
||||
tgt = [chunk] * 4
|
||||
for i in range(len(tgt)):
|
||||
assert_equal(res[i].shape, tgt[i].shape)
|
||||
|
||||
def test_index_split_simple(self):
|
||||
a = np.arange(10)
|
||||
indices = [1, 5, 7]
|
||||
res = array_split(a, indices, axis=-1)
|
||||
desired = [np.arange(0, 1), np.arange(1, 5), np.arange(5, 7),
|
||||
np.arange(7, 10)]
|
||||
compare_results(res, desired)
|
||||
|
||||
def test_index_split_low_bound(self):
|
||||
a = np.arange(10)
|
||||
indices = [0, 5, 7]
|
||||
res = array_split(a, indices, axis=-1)
|
||||
desired = [np.array([]), np.arange(0, 5), np.arange(5, 7),
|
||||
np.arange(7, 10)]
|
||||
compare_results(res, desired)
|
||||
|
||||
def test_index_split_high_bound(self):
|
||||
a = np.arange(10)
|
||||
indices = [0, 5, 7, 10, 12]
|
||||
res = array_split(a, indices, axis=-1)
|
||||
desired = [np.array([]), np.arange(0, 5), np.arange(5, 7),
|
||||
np.arange(7, 10), np.array([]), np.array([])]
|
||||
compare_results(res, desired)
|
||||
|
||||
|
||||
class TestSplit:
|
||||
# The split function is essentially the same as array_split,
|
||||
# except that it test if splitting will result in an
|
||||
# equal split. Only test for this case.
|
||||
|
||||
def test_equal_split(self):
|
||||
a = np.arange(10)
|
||||
res = split(a, 2)
|
||||
desired = [np.arange(5), np.arange(5, 10)]
|
||||
compare_results(res, desired)
|
||||
|
||||
def test_unequal_split(self):
|
||||
a = np.arange(10)
|
||||
assert_raises(ValueError, split, a, 3)
|
||||
|
||||
|
||||
class TestColumnStack:
|
||||
def test_non_iterable(self):
|
||||
assert_raises(TypeError, column_stack, 1)
|
||||
|
||||
def test_1D_arrays(self):
|
||||
# example from docstring
|
||||
a = np.array((1, 2, 3))
|
||||
b = np.array((2, 3, 4))
|
||||
expected = np.array([[1, 2],
|
||||
[2, 3],
|
||||
[3, 4]])
|
||||
actual = np.column_stack((a, b))
|
||||
assert_equal(actual, expected)
|
||||
|
||||
def test_2D_arrays(self):
|
||||
# same as hstack 2D docstring example
|
||||
a = np.array([[1], [2], [3]])
|
||||
b = np.array([[2], [3], [4]])
|
||||
expected = np.array([[1, 2],
|
||||
[2, 3],
|
||||
[3, 4]])
|
||||
actual = np.column_stack((a, b))
|
||||
assert_equal(actual, expected)
|
||||
|
||||
def test_generator(self):
|
||||
with assert_warns(FutureWarning):
|
||||
column_stack((np.arange(3) for _ in range(2)))
|
||||
|
||||
|
||||
class TestDstack:
|
||||
def test_non_iterable(self):
|
||||
assert_raises(TypeError, dstack, 1)
|
||||
|
||||
def test_0D_array(self):
|
||||
a = np.array(1)
|
||||
b = np.array(2)
|
||||
res = dstack([a, b])
|
||||
desired = np.array([[[1, 2]]])
|
||||
assert_array_equal(res, desired)
|
||||
|
||||
def test_1D_array(self):
|
||||
a = np.array([1])
|
||||
b = np.array([2])
|
||||
res = dstack([a, b])
|
||||
desired = np.array([[[1, 2]]])
|
||||
assert_array_equal(res, desired)
|
||||
|
||||
def test_2D_array(self):
|
||||
a = np.array([[1], [2]])
|
||||
b = np.array([[1], [2]])
|
||||
res = dstack([a, b])
|
||||
desired = np.array([[[1, 1]], [[2, 2, ]]])
|
||||
assert_array_equal(res, desired)
|
||||
|
||||
def test_2D_array2(self):
|
||||
a = np.array([1, 2])
|
||||
b = np.array([1, 2])
|
||||
res = dstack([a, b])
|
||||
desired = np.array([[[1, 1], [2, 2]]])
|
||||
assert_array_equal(res, desired)
|
||||
|
||||
def test_generator(self):
|
||||
with assert_warns(FutureWarning):
|
||||
dstack((np.arange(3) for _ in range(2)))
|
||||
|
||||
|
||||
# array_split has more comprehensive test of splitting.
|
||||
# only do simple test on hsplit, vsplit, and dsplit
|
||||
class TestHsplit:
|
||||
"""Only testing for integer splits.
|
||||
|
||||
"""
|
||||
def test_non_iterable(self):
|
||||
assert_raises(ValueError, hsplit, 1, 1)
|
||||
|
||||
def test_0D_array(self):
|
||||
a = np.array(1)
|
||||
try:
|
||||
hsplit(a, 2)
|
||||
assert_(0)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def test_1D_array(self):
|
||||
a = np.array([1, 2, 3, 4])
|
||||
res = hsplit(a, 2)
|
||||
desired = [np.array([1, 2]), np.array([3, 4])]
|
||||
compare_results(res, desired)
|
||||
|
||||
def test_2D_array(self):
|
||||
a = np.array([[1, 2, 3, 4],
|
||||
[1, 2, 3, 4]])
|
||||
res = hsplit(a, 2)
|
||||
desired = [np.array([[1, 2], [1, 2]]), np.array([[3, 4], [3, 4]])]
|
||||
compare_results(res, desired)
|
||||
|
||||
|
||||
class TestVsplit:
|
||||
"""Only testing for integer splits.
|
||||
|
||||
"""
|
||||
def test_non_iterable(self):
|
||||
assert_raises(ValueError, vsplit, 1, 1)
|
||||
|
||||
def test_0D_array(self):
|
||||
a = np.array(1)
|
||||
assert_raises(ValueError, vsplit, a, 2)
|
||||
|
||||
def test_1D_array(self):
|
||||
a = np.array([1, 2, 3, 4])
|
||||
try:
|
||||
vsplit(a, 2)
|
||||
assert_(0)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def test_2D_array(self):
|
||||
a = np.array([[1, 2, 3, 4],
|
||||
[1, 2, 3, 4]])
|
||||
res = vsplit(a, 2)
|
||||
desired = [np.array([[1, 2, 3, 4]]), np.array([[1, 2, 3, 4]])]
|
||||
compare_results(res, desired)
|
||||
|
||||
|
||||
class TestDsplit:
|
||||
# Only testing for integer splits.
|
||||
def test_non_iterable(self):
|
||||
assert_raises(ValueError, dsplit, 1, 1)
|
||||
|
||||
def test_0D_array(self):
|
||||
a = np.array(1)
|
||||
assert_raises(ValueError, dsplit, a, 2)
|
||||
|
||||
def test_1D_array(self):
|
||||
a = np.array([1, 2, 3, 4])
|
||||
assert_raises(ValueError, dsplit, a, 2)
|
||||
|
||||
def test_2D_array(self):
|
||||
a = np.array([[1, 2, 3, 4],
|
||||
[1, 2, 3, 4]])
|
||||
try:
|
||||
dsplit(a, 2)
|
||||
assert_(0)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def test_3D_array(self):
|
||||
a = np.array([[[1, 2, 3, 4],
|
||||
[1, 2, 3, 4]],
|
||||
[[1, 2, 3, 4],
|
||||
[1, 2, 3, 4]]])
|
||||
res = dsplit(a, 2)
|
||||
desired = [np.array([[[1, 2], [1, 2]], [[1, 2], [1, 2]]]),
|
||||
np.array([[[3, 4], [3, 4]], [[3, 4], [3, 4]]])]
|
||||
compare_results(res, desired)
|
||||
|
||||
|
||||
class TestSqueeze:
|
||||
def test_basic(self):
|
||||
from numpy.random import rand
|
||||
|
||||
a = rand(20, 10, 10, 1, 1)
|
||||
b = rand(20, 1, 10, 1, 20)
|
||||
c = rand(1, 1, 20, 10)
|
||||
assert_array_equal(np.squeeze(a), np.reshape(a, (20, 10, 10)))
|
||||
assert_array_equal(np.squeeze(b), np.reshape(b, (20, 10, 20)))
|
||||
assert_array_equal(np.squeeze(c), np.reshape(c, (20, 10)))
|
||||
|
||||
# Squeezing to 0-dim should still give an ndarray
|
||||
a = [[[1.5]]]
|
||||
res = np.squeeze(a)
|
||||
assert_equal(res, 1.5)
|
||||
assert_equal(res.ndim, 0)
|
||||
assert_equal(type(res), np.ndarray)
|
||||
|
||||
|
||||
class TestKron:
|
||||
def test_return_type(self):
|
||||
class myarray(np.ndarray):
|
||||
__array_priority__ = 0.0
|
||||
|
||||
a = np.ones([2, 2])
|
||||
ma = myarray(a.shape, a.dtype, a.data)
|
||||
assert_equal(type(kron(a, a)), np.ndarray)
|
||||
assert_equal(type(kron(ma, ma)), myarray)
|
||||
assert_equal(type(kron(a, ma)), np.ndarray)
|
||||
assert_equal(type(kron(ma, a)), myarray)
|
||||
|
||||
|
||||
class TestTile:
|
||||
def test_basic(self):
|
||||
a = np.array([0, 1, 2])
|
||||
b = [[1, 2], [3, 4]]
|
||||
assert_equal(tile(a, 2), [0, 1, 2, 0, 1, 2])
|
||||
assert_equal(tile(a, (2, 2)), [[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]])
|
||||
assert_equal(tile(a, (1, 2)), [[0, 1, 2, 0, 1, 2]])
|
||||
assert_equal(tile(b, 2), [[1, 2, 1, 2], [3, 4, 3, 4]])
|
||||
assert_equal(tile(b, (2, 1)), [[1, 2], [3, 4], [1, 2], [3, 4]])
|
||||
assert_equal(tile(b, (2, 2)), [[1, 2, 1, 2], [3, 4, 3, 4],
|
||||
[1, 2, 1, 2], [3, 4, 3, 4]])
|
||||
|
||||
def test_tile_one_repetition_on_array_gh4679(self):
|
||||
a = np.arange(5)
|
||||
b = tile(a, 1)
|
||||
b += 2
|
||||
assert_equal(a, np.arange(5))
|
||||
|
||||
def test_empty(self):
|
||||
a = np.array([[[]]])
|
||||
b = np.array([[], []])
|
||||
c = tile(b, 2).shape
|
||||
d = tile(a, (3, 2, 5)).shape
|
||||
assert_equal(c, (2, 0))
|
||||
assert_equal(d, (3, 2, 0))
|
||||
|
||||
def test_kroncompare(self):
|
||||
from numpy.random import randint
|
||||
|
||||
reps = [(2,), (1, 2), (2, 1), (2, 2), (2, 3, 2), (3, 2)]
|
||||
shape = [(3,), (2, 3), (3, 4, 3), (3, 2, 3), (4, 3, 2, 4), (2, 2)]
|
||||
for s in shape:
|
||||
b = randint(0, 10, size=s)
|
||||
for r in reps:
|
||||
a = np.ones(r, b.dtype)
|
||||
large = tile(b, r)
|
||||
klarge = kron(a, b)
|
||||
assert_equal(large, klarge)
|
||||
|
||||
|
||||
class TestMayShareMemory:
|
||||
def test_basic(self):
|
||||
d = np.ones((50, 60))
|
||||
d2 = np.ones((30, 60, 6))
|
||||
assert_(np.may_share_memory(d, d))
|
||||
assert_(np.may_share_memory(d, d[::-1]))
|
||||
assert_(np.may_share_memory(d, d[::2]))
|
||||
assert_(np.may_share_memory(d, d[1:, ::-1]))
|
||||
|
||||
assert_(not np.may_share_memory(d[::-1], d2))
|
||||
assert_(not np.may_share_memory(d[::2], d2))
|
||||
assert_(not np.may_share_memory(d[1:, ::-1], d2))
|
||||
assert_(np.may_share_memory(d2[1:, ::-1], d2))
|
||||
|
||||
|
||||
# Utility
|
||||
def compare_results(res, desired):
|
||||
for i in range(len(desired)):
|
||||
assert_array_equal(res[i], desired[i])
|
||||
479
venv/Lib/site-packages/numpy/lib/tests/test_stride_tricks.py
Normal file
479
venv/Lib/site-packages/numpy/lib/tests/test_stride_tricks.py
Normal file
|
|
@ -0,0 +1,479 @@
|
|||
import numpy as np
|
||||
from numpy.core._rational_tests import rational
|
||||
from numpy.testing import (
|
||||
assert_equal, assert_array_equal, assert_raises, assert_,
|
||||
assert_raises_regex, assert_warns,
|
||||
)
|
||||
from numpy.lib.stride_tricks import (
|
||||
as_strided, broadcast_arrays, _broadcast_shape, broadcast_to
|
||||
)
|
||||
|
||||
def assert_shapes_correct(input_shapes, expected_shape):
|
||||
# Broadcast a list of arrays with the given input shapes and check the
|
||||
# common output shape.
|
||||
|
||||
inarrays = [np.zeros(s) for s in input_shapes]
|
||||
outarrays = broadcast_arrays(*inarrays)
|
||||
outshapes = [a.shape for a in outarrays]
|
||||
expected = [expected_shape] * len(inarrays)
|
||||
assert_equal(outshapes, expected)
|
||||
|
||||
|
||||
def assert_incompatible_shapes_raise(input_shapes):
|
||||
# Broadcast a list of arrays with the given (incompatible) input shapes
|
||||
# and check that they raise a ValueError.
|
||||
|
||||
inarrays = [np.zeros(s) for s in input_shapes]
|
||||
assert_raises(ValueError, broadcast_arrays, *inarrays)
|
||||
|
||||
|
||||
def assert_same_as_ufunc(shape0, shape1, transposed=False, flipped=False):
|
||||
# Broadcast two shapes against each other and check that the data layout
|
||||
# is the same as if a ufunc did the broadcasting.
|
||||
|
||||
x0 = np.zeros(shape0, dtype=int)
|
||||
# Note that multiply.reduce's identity element is 1.0, so when shape1==(),
|
||||
# this gives the desired n==1.
|
||||
n = int(np.multiply.reduce(shape1))
|
||||
x1 = np.arange(n).reshape(shape1)
|
||||
if transposed:
|
||||
x0 = x0.T
|
||||
x1 = x1.T
|
||||
if flipped:
|
||||
x0 = x0[::-1]
|
||||
x1 = x1[::-1]
|
||||
# Use the add ufunc to do the broadcasting. Since we're adding 0s to x1, the
|
||||
# result should be exactly the same as the broadcasted view of x1.
|
||||
y = x0 + x1
|
||||
b0, b1 = broadcast_arrays(x0, x1)
|
||||
assert_array_equal(y, b1)
|
||||
|
||||
|
||||
def test_same():
|
||||
x = np.arange(10)
|
||||
y = np.arange(10)
|
||||
bx, by = broadcast_arrays(x, y)
|
||||
assert_array_equal(x, bx)
|
||||
assert_array_equal(y, by)
|
||||
|
||||
def test_broadcast_kwargs():
|
||||
# ensure that a TypeError is appropriately raised when
|
||||
# np.broadcast_arrays() is called with any keyword
|
||||
# argument other than 'subok'
|
||||
x = np.arange(10)
|
||||
y = np.arange(10)
|
||||
|
||||
with assert_raises_regex(TypeError, 'got an unexpected keyword'):
|
||||
broadcast_arrays(x, y, dtype='float64')
|
||||
|
||||
|
||||
def test_one_off():
|
||||
x = np.array([[1, 2, 3]])
|
||||
y = np.array([[1], [2], [3]])
|
||||
bx, by = broadcast_arrays(x, y)
|
||||
bx0 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]])
|
||||
by0 = bx0.T
|
||||
assert_array_equal(bx0, bx)
|
||||
assert_array_equal(by0, by)
|
||||
|
||||
|
||||
def test_same_input_shapes():
|
||||
# Check that the final shape is just the input shape.
|
||||
|
||||
data = [
|
||||
(),
|
||||
(1,),
|
||||
(3,),
|
||||
(0, 1),
|
||||
(0, 3),
|
||||
(1, 0),
|
||||
(3, 0),
|
||||
(1, 3),
|
||||
(3, 1),
|
||||
(3, 3),
|
||||
]
|
||||
for shape in data:
|
||||
input_shapes = [shape]
|
||||
# Single input.
|
||||
assert_shapes_correct(input_shapes, shape)
|
||||
# Double input.
|
||||
input_shapes2 = [shape, shape]
|
||||
assert_shapes_correct(input_shapes2, shape)
|
||||
# Triple input.
|
||||
input_shapes3 = [shape, shape, shape]
|
||||
assert_shapes_correct(input_shapes3, shape)
|
||||
|
||||
|
||||
def test_two_compatible_by_ones_input_shapes():
|
||||
# Check that two different input shapes of the same length, but some have
|
||||
# ones, broadcast to the correct shape.
|
||||
|
||||
data = [
|
||||
[[(1,), (3,)], (3,)],
|
||||
[[(1, 3), (3, 3)], (3, 3)],
|
||||
[[(3, 1), (3, 3)], (3, 3)],
|
||||
[[(1, 3), (3, 1)], (3, 3)],
|
||||
[[(1, 1), (3, 3)], (3, 3)],
|
||||
[[(1, 1), (1, 3)], (1, 3)],
|
||||
[[(1, 1), (3, 1)], (3, 1)],
|
||||
[[(1, 0), (0, 0)], (0, 0)],
|
||||
[[(0, 1), (0, 0)], (0, 0)],
|
||||
[[(1, 0), (0, 1)], (0, 0)],
|
||||
[[(1, 1), (0, 0)], (0, 0)],
|
||||
[[(1, 1), (1, 0)], (1, 0)],
|
||||
[[(1, 1), (0, 1)], (0, 1)],
|
||||
]
|
||||
for input_shapes, expected_shape in data:
|
||||
assert_shapes_correct(input_shapes, expected_shape)
|
||||
# Reverse the input shapes since broadcasting should be symmetric.
|
||||
assert_shapes_correct(input_shapes[::-1], expected_shape)
|
||||
|
||||
|
||||
def test_two_compatible_by_prepending_ones_input_shapes():
|
||||
# Check that two different input shapes (of different lengths) broadcast
|
||||
# to the correct shape.
|
||||
|
||||
data = [
|
||||
[[(), (3,)], (3,)],
|
||||
[[(3,), (3, 3)], (3, 3)],
|
||||
[[(3,), (3, 1)], (3, 3)],
|
||||
[[(1,), (3, 3)], (3, 3)],
|
||||
[[(), (3, 3)], (3, 3)],
|
||||
[[(1, 1), (3,)], (1, 3)],
|
||||
[[(1,), (3, 1)], (3, 1)],
|
||||
[[(1,), (1, 3)], (1, 3)],
|
||||
[[(), (1, 3)], (1, 3)],
|
||||
[[(), (3, 1)], (3, 1)],
|
||||
[[(), (0,)], (0,)],
|
||||
[[(0,), (0, 0)], (0, 0)],
|
||||
[[(0,), (0, 1)], (0, 0)],
|
||||
[[(1,), (0, 0)], (0, 0)],
|
||||
[[(), (0, 0)], (0, 0)],
|
||||
[[(1, 1), (0,)], (1, 0)],
|
||||
[[(1,), (0, 1)], (0, 1)],
|
||||
[[(1,), (1, 0)], (1, 0)],
|
||||
[[(), (1, 0)], (1, 0)],
|
||||
[[(), (0, 1)], (0, 1)],
|
||||
]
|
||||
for input_shapes, expected_shape in data:
|
||||
assert_shapes_correct(input_shapes, expected_shape)
|
||||
# Reverse the input shapes since broadcasting should be symmetric.
|
||||
assert_shapes_correct(input_shapes[::-1], expected_shape)
|
||||
|
||||
|
||||
def test_incompatible_shapes_raise_valueerror():
|
||||
# Check that a ValueError is raised for incompatible shapes.
|
||||
|
||||
data = [
|
||||
[(3,), (4,)],
|
||||
[(2, 3), (2,)],
|
||||
[(3,), (3,), (4,)],
|
||||
[(1, 3, 4), (2, 3, 3)],
|
||||
]
|
||||
for input_shapes in data:
|
||||
assert_incompatible_shapes_raise(input_shapes)
|
||||
# Reverse the input shapes since broadcasting should be symmetric.
|
||||
assert_incompatible_shapes_raise(input_shapes[::-1])
|
||||
|
||||
|
||||
def test_same_as_ufunc():
|
||||
# Check that the data layout is the same as if a ufunc did the operation.
|
||||
|
||||
data = [
|
||||
[[(1,), (3,)], (3,)],
|
||||
[[(1, 3), (3, 3)], (3, 3)],
|
||||
[[(3, 1), (3, 3)], (3, 3)],
|
||||
[[(1, 3), (3, 1)], (3, 3)],
|
||||
[[(1, 1), (3, 3)], (3, 3)],
|
||||
[[(1, 1), (1, 3)], (1, 3)],
|
||||
[[(1, 1), (3, 1)], (3, 1)],
|
||||
[[(1, 0), (0, 0)], (0, 0)],
|
||||
[[(0, 1), (0, 0)], (0, 0)],
|
||||
[[(1, 0), (0, 1)], (0, 0)],
|
||||
[[(1, 1), (0, 0)], (0, 0)],
|
||||
[[(1, 1), (1, 0)], (1, 0)],
|
||||
[[(1, 1), (0, 1)], (0, 1)],
|
||||
[[(), (3,)], (3,)],
|
||||
[[(3,), (3, 3)], (3, 3)],
|
||||
[[(3,), (3, 1)], (3, 3)],
|
||||
[[(1,), (3, 3)], (3, 3)],
|
||||
[[(), (3, 3)], (3, 3)],
|
||||
[[(1, 1), (3,)], (1, 3)],
|
||||
[[(1,), (3, 1)], (3, 1)],
|
||||
[[(1,), (1, 3)], (1, 3)],
|
||||
[[(), (1, 3)], (1, 3)],
|
||||
[[(), (3, 1)], (3, 1)],
|
||||
[[(), (0,)], (0,)],
|
||||
[[(0,), (0, 0)], (0, 0)],
|
||||
[[(0,), (0, 1)], (0, 0)],
|
||||
[[(1,), (0, 0)], (0, 0)],
|
||||
[[(), (0, 0)], (0, 0)],
|
||||
[[(1, 1), (0,)], (1, 0)],
|
||||
[[(1,), (0, 1)], (0, 1)],
|
||||
[[(1,), (1, 0)], (1, 0)],
|
||||
[[(), (1, 0)], (1, 0)],
|
||||
[[(), (0, 1)], (0, 1)],
|
||||
]
|
||||
for input_shapes, expected_shape in data:
|
||||
assert_same_as_ufunc(input_shapes[0], input_shapes[1],
|
||||
"Shapes: %s %s" % (input_shapes[0], input_shapes[1]))
|
||||
# Reverse the input shapes since broadcasting should be symmetric.
|
||||
assert_same_as_ufunc(input_shapes[1], input_shapes[0])
|
||||
# Try them transposed, too.
|
||||
assert_same_as_ufunc(input_shapes[0], input_shapes[1], True)
|
||||
# ... and flipped for non-rank-0 inputs in order to test negative
|
||||
# strides.
|
||||
if () not in input_shapes:
|
||||
assert_same_as_ufunc(input_shapes[0], input_shapes[1], False, True)
|
||||
assert_same_as_ufunc(input_shapes[0], input_shapes[1], True, True)
|
||||
|
||||
|
||||
def test_broadcast_to_succeeds():
|
||||
data = [
|
||||
[np.array(0), (0,), np.array(0)],
|
||||
[np.array(0), (1,), np.zeros(1)],
|
||||
[np.array(0), (3,), np.zeros(3)],
|
||||
[np.ones(1), (1,), np.ones(1)],
|
||||
[np.ones(1), (2,), np.ones(2)],
|
||||
[np.ones(1), (1, 2, 3), np.ones((1, 2, 3))],
|
||||
[np.arange(3), (3,), np.arange(3)],
|
||||
[np.arange(3), (1, 3), np.arange(3).reshape(1, -1)],
|
||||
[np.arange(3), (2, 3), np.array([[0, 1, 2], [0, 1, 2]])],
|
||||
# test if shape is not a tuple
|
||||
[np.ones(0), 0, np.ones(0)],
|
||||
[np.ones(1), 1, np.ones(1)],
|
||||
[np.ones(1), 2, np.ones(2)],
|
||||
# these cases with size 0 are strange, but they reproduce the behavior
|
||||
# of broadcasting with ufuncs (see test_same_as_ufunc above)
|
||||
[np.ones(1), (0,), np.ones(0)],
|
||||
[np.ones((1, 2)), (0, 2), np.ones((0, 2))],
|
||||
[np.ones((2, 1)), (2, 0), np.ones((2, 0))],
|
||||
]
|
||||
for input_array, shape, expected in data:
|
||||
actual = broadcast_to(input_array, shape)
|
||||
assert_array_equal(expected, actual)
|
||||
|
||||
|
||||
def test_broadcast_to_raises():
|
||||
data = [
|
||||
[(0,), ()],
|
||||
[(1,), ()],
|
||||
[(3,), ()],
|
||||
[(3,), (1,)],
|
||||
[(3,), (2,)],
|
||||
[(3,), (4,)],
|
||||
[(1, 2), (2, 1)],
|
||||
[(1, 1), (1,)],
|
||||
[(1,), -1],
|
||||
[(1,), (-1,)],
|
||||
[(1, 2), (-1, 2)],
|
||||
]
|
||||
for orig_shape, target_shape in data:
|
||||
arr = np.zeros(orig_shape)
|
||||
assert_raises(ValueError, lambda: broadcast_to(arr, target_shape))
|
||||
|
||||
|
||||
def test_broadcast_shape():
|
||||
# broadcast_shape is already exercized indirectly by broadcast_arrays
|
||||
assert_equal(_broadcast_shape(), ())
|
||||
assert_equal(_broadcast_shape([1, 2]), (2,))
|
||||
assert_equal(_broadcast_shape(np.ones((1, 1))), (1, 1))
|
||||
assert_equal(_broadcast_shape(np.ones((1, 1)), np.ones((3, 4))), (3, 4))
|
||||
assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 32)), (1, 2))
|
||||
assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 100)), (1, 2))
|
||||
|
||||
# regression tests for gh-5862
|
||||
assert_equal(_broadcast_shape(*([np.ones(2)] * 32 + [1])), (2,))
|
||||
bad_args = [np.ones(2)] * 32 + [np.ones(3)] * 32
|
||||
assert_raises(ValueError, lambda: _broadcast_shape(*bad_args))
|
||||
|
||||
|
||||
def test_as_strided():
|
||||
a = np.array([None])
|
||||
a_view = as_strided(a)
|
||||
expected = np.array([None])
|
||||
assert_array_equal(a_view, np.array([None]))
|
||||
|
||||
a = np.array([1, 2, 3, 4])
|
||||
a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,))
|
||||
expected = np.array([1, 3])
|
||||
assert_array_equal(a_view, expected)
|
||||
|
||||
a = np.array([1, 2, 3, 4])
|
||||
a_view = as_strided(a, shape=(3, 4), strides=(0, 1 * a.itemsize))
|
||||
expected = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
|
||||
assert_array_equal(a_view, expected)
|
||||
|
||||
# Regression test for gh-5081
|
||||
dt = np.dtype([('num', 'i4'), ('obj', 'O')])
|
||||
a = np.empty((4,), dtype=dt)
|
||||
a['num'] = np.arange(1, 5)
|
||||
a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
|
||||
expected_num = [[1, 2, 3, 4]] * 3
|
||||
expected_obj = [[None]*4]*3
|
||||
assert_equal(a_view.dtype, dt)
|
||||
assert_array_equal(expected_num, a_view['num'])
|
||||
assert_array_equal(expected_obj, a_view['obj'])
|
||||
|
||||
# Make sure that void types without fields are kept unchanged
|
||||
a = np.empty((4,), dtype='V4')
|
||||
a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
|
||||
assert_equal(a.dtype, a_view.dtype)
|
||||
|
||||
# Make sure that the only type that could fail is properly handled
|
||||
dt = np.dtype({'names': [''], 'formats': ['V4']})
|
||||
a = np.empty((4,), dtype=dt)
|
||||
a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
|
||||
assert_equal(a.dtype, a_view.dtype)
|
||||
|
||||
# Custom dtypes should not be lost (gh-9161)
|
||||
r = [rational(i) for i in range(4)]
|
||||
a = np.array(r, dtype=rational)
|
||||
a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
|
||||
assert_equal(a.dtype, a_view.dtype)
|
||||
assert_array_equal([r] * 3, a_view)
|
||||
|
||||
def as_strided_writeable():
|
||||
arr = np.ones(10)
|
||||
view = as_strided(arr, writeable=False)
|
||||
assert_(not view.flags.writeable)
|
||||
|
||||
# Check that writeable also is fine:
|
||||
view = as_strided(arr, writeable=True)
|
||||
assert_(view.flags.writeable)
|
||||
view[...] = 3
|
||||
assert_array_equal(arr, np.full_like(arr, 3))
|
||||
|
||||
# Test that things do not break down for readonly:
|
||||
arr.flags.writeable = False
|
||||
view = as_strided(arr, writeable=False)
|
||||
view = as_strided(arr, writeable=True)
|
||||
assert_(not view.flags.writeable)
|
||||
|
||||
|
||||
class VerySimpleSubClass(np.ndarray):
|
||||
def __new__(cls, *args, **kwargs):
|
||||
return np.array(*args, subok=True, **kwargs).view(cls)
|
||||
|
||||
|
||||
class SimpleSubClass(VerySimpleSubClass):
|
||||
def __new__(cls, *args, **kwargs):
|
||||
self = np.array(*args, subok=True, **kwargs).view(cls)
|
||||
self.info = 'simple'
|
||||
return self
|
||||
|
||||
def __array_finalize__(self, obj):
|
||||
self.info = getattr(obj, 'info', '') + ' finalized'
|
||||
|
||||
|
||||
def test_subclasses():
|
||||
# test that subclass is preserved only if subok=True
|
||||
a = VerySimpleSubClass([1, 2, 3, 4])
|
||||
assert_(type(a) is VerySimpleSubClass)
|
||||
a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,))
|
||||
assert_(type(a_view) is np.ndarray)
|
||||
a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True)
|
||||
assert_(type(a_view) is VerySimpleSubClass)
|
||||
# test that if a subclass has __array_finalize__, it is used
|
||||
a = SimpleSubClass([1, 2, 3, 4])
|
||||
a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True)
|
||||
assert_(type(a_view) is SimpleSubClass)
|
||||
assert_(a_view.info == 'simple finalized')
|
||||
|
||||
# similar tests for broadcast_arrays
|
||||
b = np.arange(len(a)).reshape(-1, 1)
|
||||
a_view, b_view = broadcast_arrays(a, b)
|
||||
assert_(type(a_view) is np.ndarray)
|
||||
assert_(type(b_view) is np.ndarray)
|
||||
assert_(a_view.shape == b_view.shape)
|
||||
a_view, b_view = broadcast_arrays(a, b, subok=True)
|
||||
assert_(type(a_view) is SimpleSubClass)
|
||||
assert_(a_view.info == 'simple finalized')
|
||||
assert_(type(b_view) is np.ndarray)
|
||||
assert_(a_view.shape == b_view.shape)
|
||||
|
||||
# and for broadcast_to
|
||||
shape = (2, 4)
|
||||
a_view = broadcast_to(a, shape)
|
||||
assert_(type(a_view) is np.ndarray)
|
||||
assert_(a_view.shape == shape)
|
||||
a_view = broadcast_to(a, shape, subok=True)
|
||||
assert_(type(a_view) is SimpleSubClass)
|
||||
assert_(a_view.info == 'simple finalized')
|
||||
assert_(a_view.shape == shape)
|
||||
|
||||
|
||||
def test_writeable():
|
||||
# broadcast_to should return a readonly array
|
||||
original = np.array([1, 2, 3])
|
||||
result = broadcast_to(original, (2, 3))
|
||||
assert_equal(result.flags.writeable, False)
|
||||
assert_raises(ValueError, result.__setitem__, slice(None), 0)
|
||||
|
||||
# but the result of broadcast_arrays needs to be writeable, to
|
||||
# preserve backwards compatibility
|
||||
for is_broadcast, results in [(False, broadcast_arrays(original,)),
|
||||
(True, broadcast_arrays(0, original))]:
|
||||
for result in results:
|
||||
# This will change to False in a future version
|
||||
if is_broadcast:
|
||||
with assert_warns(FutureWarning):
|
||||
assert_equal(result.flags.writeable, True)
|
||||
with assert_warns(DeprecationWarning):
|
||||
result[:] = 0
|
||||
# Warning not emitted, writing to the array resets it
|
||||
assert_equal(result.flags.writeable, True)
|
||||
else:
|
||||
# No warning:
|
||||
assert_equal(result.flags.writeable, True)
|
||||
|
||||
for results in [broadcast_arrays(original),
|
||||
broadcast_arrays(0, original)]:
|
||||
for result in results:
|
||||
# resets the warn_on_write DeprecationWarning
|
||||
result.flags.writeable = True
|
||||
# check: no warning emitted
|
||||
assert_equal(result.flags.writeable, True)
|
||||
result[:] = 0
|
||||
|
||||
# keep readonly input readonly
|
||||
original.flags.writeable = False
|
||||
_, result = broadcast_arrays(0, original)
|
||||
assert_equal(result.flags.writeable, False)
|
||||
|
||||
# regression test for GH6491
|
||||
shape = (2,)
|
||||
strides = [0]
|
||||
tricky_array = as_strided(np.array(0), shape, strides)
|
||||
other = np.zeros((1,))
|
||||
first, second = broadcast_arrays(tricky_array, other)
|
||||
assert_(first.shape == second.shape)
|
||||
|
||||
|
||||
def test_writeable_memoryview():
|
||||
# The result of broadcast_arrays exports as a non-writeable memoryview
|
||||
# because otherwise there is no good way to opt in to the new behaviour
|
||||
# (i.e. you would need to set writeable to False explicitly).
|
||||
# See gh-13929.
|
||||
original = np.array([1, 2, 3])
|
||||
|
||||
for is_broadcast, results in [(False, broadcast_arrays(original,)),
|
||||
(True, broadcast_arrays(0, original))]:
|
||||
for result in results:
|
||||
# This will change to False in a future version
|
||||
if is_broadcast:
|
||||
# memoryview(result, writable=True) will give warning but cannot
|
||||
# be tested using the python API.
|
||||
assert memoryview(result).readonly
|
||||
else:
|
||||
assert not memoryview(result).readonly
|
||||
|
||||
|
||||
def test_reference_types():
|
||||
input_array = np.array('a', dtype=object)
|
||||
expected = np.array(['a'] * 3, dtype=object)
|
||||
actual = broadcast_to(input_array, (3,))
|
||||
assert_array_equal(expected, actual)
|
||||
|
||||
actual, _ = broadcast_arrays(input_array, np.ones(3))
|
||||
assert_array_equal(expected, actual)
|
||||
532
venv/Lib/site-packages/numpy/lib/tests/test_twodim_base.py
Normal file
532
venv/Lib/site-packages/numpy/lib/tests/test_twodim_base.py
Normal file
|
|
@ -0,0 +1,532 @@
|
|||
"""Test functions for matrix module
|
||||
|
||||
"""
|
||||
from numpy.testing import (
|
||||
assert_equal, assert_array_equal, assert_array_max_ulp,
|
||||
assert_array_almost_equal, assert_raises, assert_
|
||||
)
|
||||
|
||||
from numpy import (
|
||||
arange, add, fliplr, flipud, zeros, ones, eye, array, diag, histogram2d,
|
||||
tri, mask_indices, triu_indices, triu_indices_from, tril_indices,
|
||||
tril_indices_from, vander,
|
||||
)
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
from numpy.core.tests.test_overrides import requires_array_function
|
||||
|
||||
|
||||
def get_mat(n):
|
||||
data = arange(n)
|
||||
data = add.outer(data, data)
|
||||
return data
|
||||
|
||||
|
||||
class TestEye:
|
||||
def test_basic(self):
|
||||
assert_equal(eye(4),
|
||||
array([[1, 0, 0, 0],
|
||||
[0, 1, 0, 0],
|
||||
[0, 0, 1, 0],
|
||||
[0, 0, 0, 1]]))
|
||||
|
||||
assert_equal(eye(4, dtype='f'),
|
||||
array([[1, 0, 0, 0],
|
||||
[0, 1, 0, 0],
|
||||
[0, 0, 1, 0],
|
||||
[0, 0, 0, 1]], 'f'))
|
||||
|
||||
assert_equal(eye(3) == 1,
|
||||
eye(3, dtype=bool))
|
||||
|
||||
def test_diag(self):
|
||||
assert_equal(eye(4, k=1),
|
||||
array([[0, 1, 0, 0],
|
||||
[0, 0, 1, 0],
|
||||
[0, 0, 0, 1],
|
||||
[0, 0, 0, 0]]))
|
||||
|
||||
assert_equal(eye(4, k=-1),
|
||||
array([[0, 0, 0, 0],
|
||||
[1, 0, 0, 0],
|
||||
[0, 1, 0, 0],
|
||||
[0, 0, 1, 0]]))
|
||||
|
||||
def test_2d(self):
|
||||
assert_equal(eye(4, 3),
|
||||
array([[1, 0, 0],
|
||||
[0, 1, 0],
|
||||
[0, 0, 1],
|
||||
[0, 0, 0]]))
|
||||
|
||||
assert_equal(eye(3, 4),
|
||||
array([[1, 0, 0, 0],
|
||||
[0, 1, 0, 0],
|
||||
[0, 0, 1, 0]]))
|
||||
|
||||
def test_diag2d(self):
|
||||
assert_equal(eye(3, 4, k=2),
|
||||
array([[0, 0, 1, 0],
|
||||
[0, 0, 0, 1],
|
||||
[0, 0, 0, 0]]))
|
||||
|
||||
assert_equal(eye(4, 3, k=-2),
|
||||
array([[0, 0, 0],
|
||||
[0, 0, 0],
|
||||
[1, 0, 0],
|
||||
[0, 1, 0]]))
|
||||
|
||||
def test_eye_bounds(self):
|
||||
assert_equal(eye(2, 2, 1), [[0, 1], [0, 0]])
|
||||
assert_equal(eye(2, 2, -1), [[0, 0], [1, 0]])
|
||||
assert_equal(eye(2, 2, 2), [[0, 0], [0, 0]])
|
||||
assert_equal(eye(2, 2, -2), [[0, 0], [0, 0]])
|
||||
assert_equal(eye(3, 2, 2), [[0, 0], [0, 0], [0, 0]])
|
||||
assert_equal(eye(3, 2, 1), [[0, 1], [0, 0], [0, 0]])
|
||||
assert_equal(eye(3, 2, -1), [[0, 0], [1, 0], [0, 1]])
|
||||
assert_equal(eye(3, 2, -2), [[0, 0], [0, 0], [1, 0]])
|
||||
assert_equal(eye(3, 2, -3), [[0, 0], [0, 0], [0, 0]])
|
||||
|
||||
def test_strings(self):
|
||||
assert_equal(eye(2, 2, dtype='S3'),
|
||||
[[b'1', b''], [b'', b'1']])
|
||||
|
||||
def test_bool(self):
|
||||
assert_equal(eye(2, 2, dtype=bool), [[True, False], [False, True]])
|
||||
|
||||
def test_order(self):
|
||||
mat_c = eye(4, 3, k=-1)
|
||||
mat_f = eye(4, 3, k=-1, order='F')
|
||||
assert_equal(mat_c, mat_f)
|
||||
assert mat_c.flags.c_contiguous
|
||||
assert not mat_c.flags.f_contiguous
|
||||
assert not mat_f.flags.c_contiguous
|
||||
assert mat_f.flags.f_contiguous
|
||||
|
||||
|
||||
class TestDiag:
|
||||
def test_vector(self):
|
||||
vals = (100 * arange(5)).astype('l')
|
||||
b = zeros((5, 5))
|
||||
for k in range(5):
|
||||
b[k, k] = vals[k]
|
||||
assert_equal(diag(vals), b)
|
||||
b = zeros((7, 7))
|
||||
c = b.copy()
|
||||
for k in range(5):
|
||||
b[k, k + 2] = vals[k]
|
||||
c[k + 2, k] = vals[k]
|
||||
assert_equal(diag(vals, k=2), b)
|
||||
assert_equal(diag(vals, k=-2), c)
|
||||
|
||||
def test_matrix(self, vals=None):
|
||||
if vals is None:
|
||||
vals = (100 * get_mat(5) + 1).astype('l')
|
||||
b = zeros((5,))
|
||||
for k in range(5):
|
||||
b[k] = vals[k, k]
|
||||
assert_equal(diag(vals), b)
|
||||
b = b * 0
|
||||
for k in range(3):
|
||||
b[k] = vals[k, k + 2]
|
||||
assert_equal(diag(vals, 2), b[:3])
|
||||
for k in range(3):
|
||||
b[k] = vals[k + 2, k]
|
||||
assert_equal(diag(vals, -2), b[:3])
|
||||
|
||||
def test_fortran_order(self):
|
||||
vals = array((100 * get_mat(5) + 1), order='F', dtype='l')
|
||||
self.test_matrix(vals)
|
||||
|
||||
def test_diag_bounds(self):
|
||||
A = [[1, 2], [3, 4], [5, 6]]
|
||||
assert_equal(diag(A, k=2), [])
|
||||
assert_equal(diag(A, k=1), [2])
|
||||
assert_equal(diag(A, k=0), [1, 4])
|
||||
assert_equal(diag(A, k=-1), [3, 6])
|
||||
assert_equal(diag(A, k=-2), [5])
|
||||
assert_equal(diag(A, k=-3), [])
|
||||
|
||||
def test_failure(self):
|
||||
assert_raises(ValueError, diag, [[[1]]])
|
||||
|
||||
|
||||
class TestFliplr:
|
||||
def test_basic(self):
|
||||
assert_raises(ValueError, fliplr, ones(4))
|
||||
a = get_mat(4)
|
||||
b = a[:, ::-1]
|
||||
assert_equal(fliplr(a), b)
|
||||
a = [[0, 1, 2],
|
||||
[3, 4, 5]]
|
||||
b = [[2, 1, 0],
|
||||
[5, 4, 3]]
|
||||
assert_equal(fliplr(a), b)
|
||||
|
||||
|
||||
class TestFlipud:
|
||||
def test_basic(self):
|
||||
a = get_mat(4)
|
||||
b = a[::-1, :]
|
||||
assert_equal(flipud(a), b)
|
||||
a = [[0, 1, 2],
|
||||
[3, 4, 5]]
|
||||
b = [[3, 4, 5],
|
||||
[0, 1, 2]]
|
||||
assert_equal(flipud(a), b)
|
||||
|
||||
|
||||
class TestHistogram2d:
|
||||
def test_simple(self):
|
||||
x = array(
|
||||
[0.41702200, 0.72032449, 1.1437481e-4, 0.302332573, 0.146755891])
|
||||
y = array(
|
||||
[0.09233859, 0.18626021, 0.34556073, 0.39676747, 0.53881673])
|
||||
xedges = np.linspace(0, 1, 10)
|
||||
yedges = np.linspace(0, 1, 10)
|
||||
H = histogram2d(x, y, (xedges, yedges))[0]
|
||||
answer = array(
|
||||
[[0, 0, 0, 1, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 1, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[1, 0, 1, 0, 0, 0, 0, 0, 0],
|
||||
[0, 1, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0]])
|
||||
assert_array_equal(H.T, answer)
|
||||
H = histogram2d(x, y, xedges)[0]
|
||||
assert_array_equal(H.T, answer)
|
||||
H, xedges, yedges = histogram2d(list(range(10)), list(range(10)))
|
||||
assert_array_equal(H, eye(10, 10))
|
||||
assert_array_equal(xedges, np.linspace(0, 9, 11))
|
||||
assert_array_equal(yedges, np.linspace(0, 9, 11))
|
||||
|
||||
def test_asym(self):
|
||||
x = array([1, 1, 2, 3, 4, 4, 4, 5])
|
||||
y = array([1, 3, 2, 0, 1, 2, 3, 4])
|
||||
H, xed, yed = histogram2d(
|
||||
x, y, (6, 5), range=[[0, 6], [0, 5]], density=True)
|
||||
answer = array(
|
||||
[[0., 0, 0, 0, 0],
|
||||
[0, 1, 0, 1, 0],
|
||||
[0, 0, 1, 0, 0],
|
||||
[1, 0, 0, 0, 0],
|
||||
[0, 1, 1, 1, 0],
|
||||
[0, 0, 0, 0, 1]])
|
||||
assert_array_almost_equal(H, answer/8., 3)
|
||||
assert_array_equal(xed, np.linspace(0, 6, 7))
|
||||
assert_array_equal(yed, np.linspace(0, 5, 6))
|
||||
|
||||
def test_density(self):
|
||||
x = array([1, 2, 3, 1, 2, 3, 1, 2, 3])
|
||||
y = array([1, 1, 1, 2, 2, 2, 3, 3, 3])
|
||||
H, xed, yed = histogram2d(
|
||||
x, y, [[1, 2, 3, 5], [1, 2, 3, 5]], density=True)
|
||||
answer = array([[1, 1, .5],
|
||||
[1, 1, .5],
|
||||
[.5, .5, .25]])/9.
|
||||
assert_array_almost_equal(H, answer, 3)
|
||||
|
||||
def test_all_outliers(self):
|
||||
r = np.random.rand(100) + 1. + 1e6 # histogramdd rounds by decimal=6
|
||||
H, xed, yed = histogram2d(r, r, (4, 5), range=([0, 1], [0, 1]))
|
||||
assert_array_equal(H, 0)
|
||||
|
||||
def test_empty(self):
|
||||
a, edge1, edge2 = histogram2d([], [], bins=([0, 1], [0, 1]))
|
||||
assert_array_max_ulp(a, array([[0.]]))
|
||||
|
||||
a, edge1, edge2 = histogram2d([], [], bins=4)
|
||||
assert_array_max_ulp(a, np.zeros((4, 4)))
|
||||
|
||||
def test_binparameter_combination(self):
|
||||
x = array(
|
||||
[0, 0.09207008, 0.64575234, 0.12875982, 0.47390599,
|
||||
0.59944483, 1])
|
||||
y = array(
|
||||
[0, 0.14344267, 0.48988575, 0.30558665, 0.44700682,
|
||||
0.15886423, 1])
|
||||
edges = (0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1)
|
||||
H, xe, ye = histogram2d(x, y, (edges, 4))
|
||||
answer = array(
|
||||
[[2., 0., 0., 0.],
|
||||
[0., 1., 0., 0.],
|
||||
[0., 0., 0., 0.],
|
||||
[0., 0., 0., 0.],
|
||||
[0., 1., 0., 0.],
|
||||
[1., 0., 0., 0.],
|
||||
[0., 1., 0., 0.],
|
||||
[0., 0., 0., 0.],
|
||||
[0., 0., 0., 0.],
|
||||
[0., 0., 0., 1.]])
|
||||
assert_array_equal(H, answer)
|
||||
assert_array_equal(ye, array([0., 0.25, 0.5, 0.75, 1]))
|
||||
H, xe, ye = histogram2d(x, y, (4, edges))
|
||||
answer = array(
|
||||
[[1., 1., 0., 1., 0., 0., 0., 0., 0., 0.],
|
||||
[0., 0., 0., 0., 1., 0., 0., 0., 0., 0.],
|
||||
[0., 1., 0., 0., 1., 0., 0., 0., 0., 0.],
|
||||
[0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]])
|
||||
assert_array_equal(H, answer)
|
||||
assert_array_equal(xe, array([0., 0.25, 0.5, 0.75, 1]))
|
||||
|
||||
@requires_array_function
|
||||
def test_dispatch(self):
|
||||
class ShouldDispatch:
|
||||
def __array_function__(self, function, types, args, kwargs):
|
||||
return types, args, kwargs
|
||||
|
||||
xy = [1, 2]
|
||||
s_d = ShouldDispatch()
|
||||
r = histogram2d(s_d, xy)
|
||||
# Cannot use assert_equal since that dispatches...
|
||||
assert_(r == ((ShouldDispatch,), (s_d, xy), {}))
|
||||
r = histogram2d(xy, s_d)
|
||||
assert_(r == ((ShouldDispatch,), (xy, s_d), {}))
|
||||
r = histogram2d(xy, xy, bins=s_d)
|
||||
assert_(r, ((ShouldDispatch,), (xy, xy), dict(bins=s_d)))
|
||||
r = histogram2d(xy, xy, bins=[s_d, 5])
|
||||
assert_(r, ((ShouldDispatch,), (xy, xy), dict(bins=[s_d, 5])))
|
||||
assert_raises(Exception, histogram2d, xy, xy, bins=[s_d])
|
||||
r = histogram2d(xy, xy, weights=s_d)
|
||||
assert_(r, ((ShouldDispatch,), (xy, xy), dict(weights=s_d)))
|
||||
|
||||
|
||||
class TestTri:
|
||||
def test_dtype(self):
|
||||
out = array([[1, 0, 0],
|
||||
[1, 1, 0],
|
||||
[1, 1, 1]])
|
||||
assert_array_equal(tri(3), out)
|
||||
assert_array_equal(tri(3, dtype=bool), out.astype(bool))
|
||||
|
||||
|
||||
def test_tril_triu_ndim2():
|
||||
for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']:
|
||||
a = np.ones((2, 2), dtype=dtype)
|
||||
b = np.tril(a)
|
||||
c = np.triu(a)
|
||||
assert_array_equal(b, [[1, 0], [1, 1]])
|
||||
assert_array_equal(c, b.T)
|
||||
# should return the same dtype as the original array
|
||||
assert_equal(b.dtype, a.dtype)
|
||||
assert_equal(c.dtype, a.dtype)
|
||||
|
||||
|
||||
def test_tril_triu_ndim3():
|
||||
for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']:
|
||||
a = np.array([
|
||||
[[1, 1], [1, 1]],
|
||||
[[1, 1], [1, 0]],
|
||||
[[1, 1], [0, 0]],
|
||||
], dtype=dtype)
|
||||
a_tril_desired = np.array([
|
||||
[[1, 0], [1, 1]],
|
||||
[[1, 0], [1, 0]],
|
||||
[[1, 0], [0, 0]],
|
||||
], dtype=dtype)
|
||||
a_triu_desired = np.array([
|
||||
[[1, 1], [0, 1]],
|
||||
[[1, 1], [0, 0]],
|
||||
[[1, 1], [0, 0]],
|
||||
], dtype=dtype)
|
||||
a_triu_observed = np.triu(a)
|
||||
a_tril_observed = np.tril(a)
|
||||
assert_array_equal(a_triu_observed, a_triu_desired)
|
||||
assert_array_equal(a_tril_observed, a_tril_desired)
|
||||
assert_equal(a_triu_observed.dtype, a.dtype)
|
||||
assert_equal(a_tril_observed.dtype, a.dtype)
|
||||
|
||||
|
||||
def test_tril_triu_with_inf():
|
||||
# Issue 4859
|
||||
arr = np.array([[1, 1, np.inf],
|
||||
[1, 1, 1],
|
||||
[np.inf, 1, 1]])
|
||||
out_tril = np.array([[1, 0, 0],
|
||||
[1, 1, 0],
|
||||
[np.inf, 1, 1]])
|
||||
out_triu = out_tril.T
|
||||
assert_array_equal(np.triu(arr), out_triu)
|
||||
assert_array_equal(np.tril(arr), out_tril)
|
||||
|
||||
|
||||
def test_tril_triu_dtype():
|
||||
# Issue 4916
|
||||
# tril and triu should return the same dtype as input
|
||||
for c in np.typecodes['All']:
|
||||
if c == 'V':
|
||||
continue
|
||||
arr = np.zeros((3, 3), dtype=c)
|
||||
assert_equal(np.triu(arr).dtype, arr.dtype)
|
||||
assert_equal(np.tril(arr).dtype, arr.dtype)
|
||||
|
||||
# check special cases
|
||||
arr = np.array([['2001-01-01T12:00', '2002-02-03T13:56'],
|
||||
['2004-01-01T12:00', '2003-01-03T13:45']],
|
||||
dtype='datetime64')
|
||||
assert_equal(np.triu(arr).dtype, arr.dtype)
|
||||
assert_equal(np.tril(arr).dtype, arr.dtype)
|
||||
|
||||
arr = np.zeros((3,3), dtype='f4,f4')
|
||||
assert_equal(np.triu(arr).dtype, arr.dtype)
|
||||
assert_equal(np.tril(arr).dtype, arr.dtype)
|
||||
|
||||
|
||||
def test_mask_indices():
|
||||
# simple test without offset
|
||||
iu = mask_indices(3, np.triu)
|
||||
a = np.arange(9).reshape(3, 3)
|
||||
assert_array_equal(a[iu], array([0, 1, 2, 4, 5, 8]))
|
||||
# Now with an offset
|
||||
iu1 = mask_indices(3, np.triu, 1)
|
||||
assert_array_equal(a[iu1], array([1, 2, 5]))
|
||||
|
||||
|
||||
def test_tril_indices():
|
||||
# indices without and with offset
|
||||
il1 = tril_indices(4)
|
||||
il2 = tril_indices(4, k=2)
|
||||
il3 = tril_indices(4, m=5)
|
||||
il4 = tril_indices(4, k=2, m=5)
|
||||
|
||||
a = np.array([[1, 2, 3, 4],
|
||||
[5, 6, 7, 8],
|
||||
[9, 10, 11, 12],
|
||||
[13, 14, 15, 16]])
|
||||
b = np.arange(1, 21).reshape(4, 5)
|
||||
|
||||
# indexing:
|
||||
assert_array_equal(a[il1],
|
||||
array([1, 5, 6, 9, 10, 11, 13, 14, 15, 16]))
|
||||
assert_array_equal(b[il3],
|
||||
array([1, 6, 7, 11, 12, 13, 16, 17, 18, 19]))
|
||||
|
||||
# And for assigning values:
|
||||
a[il1] = -1
|
||||
assert_array_equal(a,
|
||||
array([[-1, 2, 3, 4],
|
||||
[-1, -1, 7, 8],
|
||||
[-1, -1, -1, 12],
|
||||
[-1, -1, -1, -1]]))
|
||||
b[il3] = -1
|
||||
assert_array_equal(b,
|
||||
array([[-1, 2, 3, 4, 5],
|
||||
[-1, -1, 8, 9, 10],
|
||||
[-1, -1, -1, 14, 15],
|
||||
[-1, -1, -1, -1, 20]]))
|
||||
# These cover almost the whole array (two diagonals right of the main one):
|
||||
a[il2] = -10
|
||||
assert_array_equal(a,
|
||||
array([[-10, -10, -10, 4],
|
||||
[-10, -10, -10, -10],
|
||||
[-10, -10, -10, -10],
|
||||
[-10, -10, -10, -10]]))
|
||||
b[il4] = -10
|
||||
assert_array_equal(b,
|
||||
array([[-10, -10, -10, 4, 5],
|
||||
[-10, -10, -10, -10, 10],
|
||||
[-10, -10, -10, -10, -10],
|
||||
[-10, -10, -10, -10, -10]]))
|
||||
|
||||
|
||||
class TestTriuIndices:
|
||||
def test_triu_indices(self):
|
||||
iu1 = triu_indices(4)
|
||||
iu2 = triu_indices(4, k=2)
|
||||
iu3 = triu_indices(4, m=5)
|
||||
iu4 = triu_indices(4, k=2, m=5)
|
||||
|
||||
a = np.array([[1, 2, 3, 4],
|
||||
[5, 6, 7, 8],
|
||||
[9, 10, 11, 12],
|
||||
[13, 14, 15, 16]])
|
||||
b = np.arange(1, 21).reshape(4, 5)
|
||||
|
||||
# Both for indexing:
|
||||
assert_array_equal(a[iu1],
|
||||
array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16]))
|
||||
assert_array_equal(b[iu3],
|
||||
array([1, 2, 3, 4, 5, 7, 8, 9,
|
||||
10, 13, 14, 15, 19, 20]))
|
||||
|
||||
# And for assigning values:
|
||||
a[iu1] = -1
|
||||
assert_array_equal(a,
|
||||
array([[-1, -1, -1, -1],
|
||||
[5, -1, -1, -1],
|
||||
[9, 10, -1, -1],
|
||||
[13, 14, 15, -1]]))
|
||||
b[iu3] = -1
|
||||
assert_array_equal(b,
|
||||
array([[-1, -1, -1, -1, -1],
|
||||
[6, -1, -1, -1, -1],
|
||||
[11, 12, -1, -1, -1],
|
||||
[16, 17, 18, -1, -1]]))
|
||||
|
||||
# These cover almost the whole array (two diagonals right of the
|
||||
# main one):
|
||||
a[iu2] = -10
|
||||
assert_array_equal(a,
|
||||
array([[-1, -1, -10, -10],
|
||||
[5, -1, -1, -10],
|
||||
[9, 10, -1, -1],
|
||||
[13, 14, 15, -1]]))
|
||||
b[iu4] = -10
|
||||
assert_array_equal(b,
|
||||
array([[-1, -1, -10, -10, -10],
|
||||
[6, -1, -1, -10, -10],
|
||||
[11, 12, -1, -1, -10],
|
||||
[16, 17, 18, -1, -1]]))
|
||||
|
||||
|
||||
class TestTrilIndicesFrom:
|
||||
def test_exceptions(self):
|
||||
assert_raises(ValueError, tril_indices_from, np.ones((2,)))
|
||||
assert_raises(ValueError, tril_indices_from, np.ones((2, 2, 2)))
|
||||
# assert_raises(ValueError, tril_indices_from, np.ones((2, 3)))
|
||||
|
||||
|
||||
class TestTriuIndicesFrom:
|
||||
def test_exceptions(self):
|
||||
assert_raises(ValueError, triu_indices_from, np.ones((2,)))
|
||||
assert_raises(ValueError, triu_indices_from, np.ones((2, 2, 2)))
|
||||
# assert_raises(ValueError, triu_indices_from, np.ones((2, 3)))
|
||||
|
||||
|
||||
class TestVander:
|
||||
def test_basic(self):
|
||||
c = np.array([0, 1, -2, 3])
|
||||
v = vander(c)
|
||||
powers = np.array([[0, 0, 0, 0, 1],
|
||||
[1, 1, 1, 1, 1],
|
||||
[16, -8, 4, -2, 1],
|
||||
[81, 27, 9, 3, 1]])
|
||||
# Check default value of N:
|
||||
assert_array_equal(v, powers[:, 1:])
|
||||
# Check a range of N values, including 0 and 5 (greater than default)
|
||||
m = powers.shape[1]
|
||||
for n in range(6):
|
||||
v = vander(c, N=n)
|
||||
assert_array_equal(v, powers[:, m-n:m])
|
||||
|
||||
def test_dtypes(self):
|
||||
c = array([11, -12, 13], dtype=np.int8)
|
||||
v = vander(c)
|
||||
expected = np.array([[121, 11, 1],
|
||||
[144, -12, 1],
|
||||
[169, 13, 1]])
|
||||
assert_array_equal(v, expected)
|
||||
|
||||
c = array([1.0+1j, 1.0-1j])
|
||||
v = vander(c, N=3)
|
||||
expected = np.array([[2j, 1+1j, 1],
|
||||
[-2j, 1-1j, 1]])
|
||||
# The data is floating point, but the values are small integers,
|
||||
# so assert_array_equal *should* be safe here (rather than, say,
|
||||
# assert_array_almost_equal).
|
||||
assert_array_equal(v, expected)
|
||||
478
venv/Lib/site-packages/numpy/lib/tests/test_type_check.py
Normal file
478
venv/Lib/site-packages/numpy/lib/tests/test_type_check.py
Normal file
|
|
@ -0,0 +1,478 @@
|
|||
import numpy as np
|
||||
from numpy.testing import (
|
||||
assert_, assert_equal, assert_array_equal, assert_raises
|
||||
)
|
||||
from numpy.lib.type_check import (
|
||||
common_type, mintypecode, isreal, iscomplex, isposinf, isneginf,
|
||||
nan_to_num, isrealobj, iscomplexobj, asfarray, real_if_close
|
||||
)
|
||||
|
||||
|
||||
def assert_all(x):
|
||||
assert_(np.all(x), x)
|
||||
|
||||
|
||||
class TestCommonType:
|
||||
def test_basic(self):
|
||||
ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32)
|
||||
af16 = np.array([[1, 2], [3, 4]], dtype=np.float16)
|
||||
af32 = np.array([[1, 2], [3, 4]], dtype=np.float32)
|
||||
af64 = np.array([[1, 2], [3, 4]], dtype=np.float64)
|
||||
acs = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.csingle)
|
||||
acd = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.cdouble)
|
||||
assert_(common_type(ai32) == np.float64)
|
||||
assert_(common_type(af16) == np.float16)
|
||||
assert_(common_type(af32) == np.float32)
|
||||
assert_(common_type(af64) == np.float64)
|
||||
assert_(common_type(acs) == np.csingle)
|
||||
assert_(common_type(acd) == np.cdouble)
|
||||
|
||||
|
||||
class TestMintypecode:
|
||||
|
||||
def test_default_1(self):
|
||||
for itype in '1bcsuwil':
|
||||
assert_equal(mintypecode(itype), 'd')
|
||||
assert_equal(mintypecode('f'), 'f')
|
||||
assert_equal(mintypecode('d'), 'd')
|
||||
assert_equal(mintypecode('F'), 'F')
|
||||
assert_equal(mintypecode('D'), 'D')
|
||||
|
||||
def test_default_2(self):
|
||||
for itype in '1bcsuwil':
|
||||
assert_equal(mintypecode(itype+'f'), 'f')
|
||||
assert_equal(mintypecode(itype+'d'), 'd')
|
||||
assert_equal(mintypecode(itype+'F'), 'F')
|
||||
assert_equal(mintypecode(itype+'D'), 'D')
|
||||
assert_equal(mintypecode('ff'), 'f')
|
||||
assert_equal(mintypecode('fd'), 'd')
|
||||
assert_equal(mintypecode('fF'), 'F')
|
||||
assert_equal(mintypecode('fD'), 'D')
|
||||
assert_equal(mintypecode('df'), 'd')
|
||||
assert_equal(mintypecode('dd'), 'd')
|
||||
#assert_equal(mintypecode('dF',savespace=1),'F')
|
||||
assert_equal(mintypecode('dF'), 'D')
|
||||
assert_equal(mintypecode('dD'), 'D')
|
||||
assert_equal(mintypecode('Ff'), 'F')
|
||||
#assert_equal(mintypecode('Fd',savespace=1),'F')
|
||||
assert_equal(mintypecode('Fd'), 'D')
|
||||
assert_equal(mintypecode('FF'), 'F')
|
||||
assert_equal(mintypecode('FD'), 'D')
|
||||
assert_equal(mintypecode('Df'), 'D')
|
||||
assert_equal(mintypecode('Dd'), 'D')
|
||||
assert_equal(mintypecode('DF'), 'D')
|
||||
assert_equal(mintypecode('DD'), 'D')
|
||||
|
||||
def test_default_3(self):
|
||||
assert_equal(mintypecode('fdF'), 'D')
|
||||
#assert_equal(mintypecode('fdF',savespace=1),'F')
|
||||
assert_equal(mintypecode('fdD'), 'D')
|
||||
assert_equal(mintypecode('fFD'), 'D')
|
||||
assert_equal(mintypecode('dFD'), 'D')
|
||||
|
||||
assert_equal(mintypecode('ifd'), 'd')
|
||||
assert_equal(mintypecode('ifF'), 'F')
|
||||
assert_equal(mintypecode('ifD'), 'D')
|
||||
assert_equal(mintypecode('idF'), 'D')
|
||||
#assert_equal(mintypecode('idF',savespace=1),'F')
|
||||
assert_equal(mintypecode('idD'), 'D')
|
||||
|
||||
|
||||
class TestIsscalar:
|
||||
|
||||
def test_basic(self):
|
||||
assert_(np.isscalar(3))
|
||||
assert_(not np.isscalar([3]))
|
||||
assert_(not np.isscalar((3,)))
|
||||
assert_(np.isscalar(3j))
|
||||
assert_(np.isscalar(4.0))
|
||||
|
||||
|
||||
class TestReal:
|
||||
|
||||
def test_real(self):
|
||||
y = np.random.rand(10,)
|
||||
assert_array_equal(y, np.real(y))
|
||||
|
||||
y = np.array(1)
|
||||
out = np.real(y)
|
||||
assert_array_equal(y, out)
|
||||
assert_(isinstance(out, np.ndarray))
|
||||
|
||||
y = 1
|
||||
out = np.real(y)
|
||||
assert_equal(y, out)
|
||||
assert_(not isinstance(out, np.ndarray))
|
||||
|
||||
def test_cmplx(self):
|
||||
y = np.random.rand(10,)+1j*np.random.rand(10,)
|
||||
assert_array_equal(y.real, np.real(y))
|
||||
|
||||
y = np.array(1 + 1j)
|
||||
out = np.real(y)
|
||||
assert_array_equal(y.real, out)
|
||||
assert_(isinstance(out, np.ndarray))
|
||||
|
||||
y = 1 + 1j
|
||||
out = np.real(y)
|
||||
assert_equal(1.0, out)
|
||||
assert_(not isinstance(out, np.ndarray))
|
||||
|
||||
|
||||
class TestImag:
|
||||
|
||||
def test_real(self):
|
||||
y = np.random.rand(10,)
|
||||
assert_array_equal(0, np.imag(y))
|
||||
|
||||
y = np.array(1)
|
||||
out = np.imag(y)
|
||||
assert_array_equal(0, out)
|
||||
assert_(isinstance(out, np.ndarray))
|
||||
|
||||
y = 1
|
||||
out = np.imag(y)
|
||||
assert_equal(0, out)
|
||||
assert_(not isinstance(out, np.ndarray))
|
||||
|
||||
def test_cmplx(self):
|
||||
y = np.random.rand(10,)+1j*np.random.rand(10,)
|
||||
assert_array_equal(y.imag, np.imag(y))
|
||||
|
||||
y = np.array(1 + 1j)
|
||||
out = np.imag(y)
|
||||
assert_array_equal(y.imag, out)
|
||||
assert_(isinstance(out, np.ndarray))
|
||||
|
||||
y = 1 + 1j
|
||||
out = np.imag(y)
|
||||
assert_equal(1.0, out)
|
||||
assert_(not isinstance(out, np.ndarray))
|
||||
|
||||
|
||||
class TestIscomplex:
|
||||
|
||||
def test_fail(self):
|
||||
z = np.array([-1, 0, 1])
|
||||
res = iscomplex(z)
|
||||
assert_(not np.sometrue(res, axis=0))
|
||||
|
||||
def test_pass(self):
|
||||
z = np.array([-1j, 1, 0])
|
||||
res = iscomplex(z)
|
||||
assert_array_equal(res, [1, 0, 0])
|
||||
|
||||
|
||||
class TestIsreal:
|
||||
|
||||
def test_pass(self):
|
||||
z = np.array([-1, 0, 1j])
|
||||
res = isreal(z)
|
||||
assert_array_equal(res, [1, 1, 0])
|
||||
|
||||
def test_fail(self):
|
||||
z = np.array([-1j, 1, 0])
|
||||
res = isreal(z)
|
||||
assert_array_equal(res, [0, 1, 1])
|
||||
|
||||
|
||||
class TestIscomplexobj:
|
||||
|
||||
def test_basic(self):
|
||||
z = np.array([-1, 0, 1])
|
||||
assert_(not iscomplexobj(z))
|
||||
z = np.array([-1j, 0, -1])
|
||||
assert_(iscomplexobj(z))
|
||||
|
||||
def test_scalar(self):
|
||||
assert_(not iscomplexobj(1.0))
|
||||
assert_(iscomplexobj(1+0j))
|
||||
|
||||
def test_list(self):
|
||||
assert_(iscomplexobj([3, 1+0j, True]))
|
||||
assert_(not iscomplexobj([3, 1, True]))
|
||||
|
||||
def test_duck(self):
|
||||
class DummyComplexArray:
|
||||
@property
|
||||
def dtype(self):
|
||||
return np.dtype(complex)
|
||||
dummy = DummyComplexArray()
|
||||
assert_(iscomplexobj(dummy))
|
||||
|
||||
def test_pandas_duck(self):
|
||||
# This tests a custom np.dtype duck-typed class, such as used by pandas
|
||||
# (pandas.core.dtypes)
|
||||
class PdComplex(np.complex128):
|
||||
pass
|
||||
class PdDtype:
|
||||
name = 'category'
|
||||
names = None
|
||||
type = PdComplex
|
||||
kind = 'c'
|
||||
str = '<c16'
|
||||
base = np.dtype('complex128')
|
||||
class DummyPd:
|
||||
@property
|
||||
def dtype(self):
|
||||
return PdDtype
|
||||
dummy = DummyPd()
|
||||
assert_(iscomplexobj(dummy))
|
||||
|
||||
def test_custom_dtype_duck(self):
|
||||
class MyArray(list):
|
||||
@property
|
||||
def dtype(self):
|
||||
return complex
|
||||
|
||||
a = MyArray([1+0j, 2+0j, 3+0j])
|
||||
assert_(iscomplexobj(a))
|
||||
|
||||
|
||||
class TestIsrealobj:
|
||||
def test_basic(self):
|
||||
z = np.array([-1, 0, 1])
|
||||
assert_(isrealobj(z))
|
||||
z = np.array([-1j, 0, -1])
|
||||
assert_(not isrealobj(z))
|
||||
|
||||
|
||||
class TestIsnan:
|
||||
|
||||
def test_goodvalues(self):
|
||||
z = np.array((-1., 0., 1.))
|
||||
res = np.isnan(z) == 0
|
||||
assert_all(np.all(res, axis=0))
|
||||
|
||||
def test_posinf(self):
|
||||
with np.errstate(divide='ignore'):
|
||||
assert_all(np.isnan(np.array((1.,))/0.) == 0)
|
||||
|
||||
def test_neginf(self):
|
||||
with np.errstate(divide='ignore'):
|
||||
assert_all(np.isnan(np.array((-1.,))/0.) == 0)
|
||||
|
||||
def test_ind(self):
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
assert_all(np.isnan(np.array((0.,))/0.) == 1)
|
||||
|
||||
def test_integer(self):
|
||||
assert_all(np.isnan(1) == 0)
|
||||
|
||||
def test_complex(self):
|
||||
assert_all(np.isnan(1+1j) == 0)
|
||||
|
||||
def test_complex1(self):
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
assert_all(np.isnan(np.array(0+0j)/0.) == 1)
|
||||
|
||||
|
||||
class TestIsfinite:
|
||||
# Fixme, wrong place, isfinite now ufunc
|
||||
|
||||
def test_goodvalues(self):
|
||||
z = np.array((-1., 0., 1.))
|
||||
res = np.isfinite(z) == 1
|
||||
assert_all(np.all(res, axis=0))
|
||||
|
||||
def test_posinf(self):
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
assert_all(np.isfinite(np.array((1.,))/0.) == 0)
|
||||
|
||||
def test_neginf(self):
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
assert_all(np.isfinite(np.array((-1.,))/0.) == 0)
|
||||
|
||||
def test_ind(self):
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
assert_all(np.isfinite(np.array((0.,))/0.) == 0)
|
||||
|
||||
def test_integer(self):
|
||||
assert_all(np.isfinite(1) == 1)
|
||||
|
||||
def test_complex(self):
|
||||
assert_all(np.isfinite(1+1j) == 1)
|
||||
|
||||
def test_complex1(self):
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
assert_all(np.isfinite(np.array(1+1j)/0.) == 0)
|
||||
|
||||
|
||||
class TestIsinf:
|
||||
# Fixme, wrong place, isinf now ufunc
|
||||
|
||||
def test_goodvalues(self):
|
||||
z = np.array((-1., 0., 1.))
|
||||
res = np.isinf(z) == 0
|
||||
assert_all(np.all(res, axis=0))
|
||||
|
||||
def test_posinf(self):
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
assert_all(np.isinf(np.array((1.,))/0.) == 1)
|
||||
|
||||
def test_posinf_scalar(self):
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
assert_all(np.isinf(np.array(1.,)/0.) == 1)
|
||||
|
||||
def test_neginf(self):
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
assert_all(np.isinf(np.array((-1.,))/0.) == 1)
|
||||
|
||||
def test_neginf_scalar(self):
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
assert_all(np.isinf(np.array(-1.)/0.) == 1)
|
||||
|
||||
def test_ind(self):
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
assert_all(np.isinf(np.array((0.,))/0.) == 0)
|
||||
|
||||
|
||||
class TestIsposinf:
|
||||
|
||||
def test_generic(self):
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
vals = isposinf(np.array((-1., 0, 1))/0.)
|
||||
assert_(vals[0] == 0)
|
||||
assert_(vals[1] == 0)
|
||||
assert_(vals[2] == 1)
|
||||
|
||||
|
||||
class TestIsneginf:
|
||||
|
||||
def test_generic(self):
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
vals = isneginf(np.array((-1., 0, 1))/0.)
|
||||
assert_(vals[0] == 1)
|
||||
assert_(vals[1] == 0)
|
||||
assert_(vals[2] == 0)
|
||||
|
||||
|
||||
class TestNanToNum:
|
||||
|
||||
def test_generic(self):
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
vals = nan_to_num(np.array((-1., 0, 1))/0.)
|
||||
assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0]))
|
||||
assert_(vals[1] == 0)
|
||||
assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2]))
|
||||
assert_equal(type(vals), np.ndarray)
|
||||
|
||||
# perform the same tests but with nan, posinf and neginf keywords
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
vals = nan_to_num(np.array((-1., 0, 1))/0.,
|
||||
nan=10, posinf=20, neginf=30)
|
||||
assert_equal(vals, [30, 10, 20])
|
||||
assert_all(np.isfinite(vals[[0, 2]]))
|
||||
assert_equal(type(vals), np.ndarray)
|
||||
|
||||
# perform the same test but in-place
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
vals = np.array((-1., 0, 1))/0.
|
||||
result = nan_to_num(vals, copy=False)
|
||||
|
||||
assert_(result is vals)
|
||||
assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0]))
|
||||
assert_(vals[1] == 0)
|
||||
assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2]))
|
||||
assert_equal(type(vals), np.ndarray)
|
||||
|
||||
# perform the same test but in-place
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
vals = np.array((-1., 0, 1))/0.
|
||||
result = nan_to_num(vals, copy=False, nan=10, posinf=20, neginf=30)
|
||||
|
||||
assert_(result is vals)
|
||||
assert_equal(vals, [30, 10, 20])
|
||||
assert_all(np.isfinite(vals[[0, 2]]))
|
||||
assert_equal(type(vals), np.ndarray)
|
||||
|
||||
def test_array(self):
|
||||
vals = nan_to_num([1])
|
||||
assert_array_equal(vals, np.array([1], int))
|
||||
assert_equal(type(vals), np.ndarray)
|
||||
vals = nan_to_num([1], nan=10, posinf=20, neginf=30)
|
||||
assert_array_equal(vals, np.array([1], int))
|
||||
assert_equal(type(vals), np.ndarray)
|
||||
|
||||
def test_integer(self):
|
||||
vals = nan_to_num(1)
|
||||
assert_all(vals == 1)
|
||||
assert_equal(type(vals), np.int_)
|
||||
vals = nan_to_num(1, nan=10, posinf=20, neginf=30)
|
||||
assert_all(vals == 1)
|
||||
assert_equal(type(vals), np.int_)
|
||||
|
||||
def test_float(self):
|
||||
vals = nan_to_num(1.0)
|
||||
assert_all(vals == 1.0)
|
||||
assert_equal(type(vals), np.float_)
|
||||
vals = nan_to_num(1.1, nan=10, posinf=20, neginf=30)
|
||||
assert_all(vals == 1.1)
|
||||
assert_equal(type(vals), np.float_)
|
||||
|
||||
def test_complex_good(self):
|
||||
vals = nan_to_num(1+1j)
|
||||
assert_all(vals == 1+1j)
|
||||
assert_equal(type(vals), np.complex_)
|
||||
vals = nan_to_num(1+1j, nan=10, posinf=20, neginf=30)
|
||||
assert_all(vals == 1+1j)
|
||||
assert_equal(type(vals), np.complex_)
|
||||
|
||||
def test_complex_bad(self):
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
v = 1 + 1j
|
||||
v += np.array(0+1.j)/0.
|
||||
vals = nan_to_num(v)
|
||||
# !! This is actually (unexpectedly) zero
|
||||
assert_all(np.isfinite(vals))
|
||||
assert_equal(type(vals), np.complex_)
|
||||
|
||||
def test_complex_bad2(self):
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
v = 1 + 1j
|
||||
v += np.array(-1+1.j)/0.
|
||||
vals = nan_to_num(v)
|
||||
assert_all(np.isfinite(vals))
|
||||
assert_equal(type(vals), np.complex_)
|
||||
# Fixme
|
||||
#assert_all(vals.imag > 1e10) and assert_all(np.isfinite(vals))
|
||||
# !! This is actually (unexpectedly) positive
|
||||
# !! inf. Comment out for now, and see if it
|
||||
# !! changes
|
||||
#assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals))
|
||||
|
||||
def test_do_not_rewrite_previous_keyword(self):
|
||||
# This is done to test that when, for instance, nan=np.inf then these
|
||||
# values are not rewritten by posinf keyword to the posinf value.
|
||||
with np.errstate(divide='ignore', invalid='ignore'):
|
||||
vals = nan_to_num(np.array((-1., 0, 1))/0., nan=np.inf, posinf=999)
|
||||
assert_all(np.isfinite(vals[[0, 2]]))
|
||||
assert_all(vals[0] < -1e10)
|
||||
assert_equal(vals[[1, 2]], [np.inf, 999])
|
||||
assert_equal(type(vals), np.ndarray)
|
||||
|
||||
|
||||
class TestRealIfClose:
|
||||
|
||||
def test_basic(self):
|
||||
a = np.random.rand(10)
|
||||
b = real_if_close(a+1e-15j)
|
||||
assert_all(isrealobj(b))
|
||||
assert_array_equal(a, b)
|
||||
b = real_if_close(a+1e-7j)
|
||||
assert_all(iscomplexobj(b))
|
||||
b = real_if_close(a+1e-7j, tol=1e-6)
|
||||
assert_all(isrealobj(b))
|
||||
|
||||
|
||||
class TestArrayConversion:
|
||||
|
||||
def test_asfarray(self):
|
||||
a = asfarray(np.array([1, 2, 3]))
|
||||
assert_equal(a.__class__, np.ndarray)
|
||||
assert_(np.issubdtype(a.dtype, np.floating))
|
||||
|
||||
# previously this would infer dtypes from arrays, unlike every single
|
||||
# other numpy function
|
||||
assert_raises(TypeError,
|
||||
asfarray, np.array([1, 2, 3]), dtype=np.array(1.0))
|
||||
104
venv/Lib/site-packages/numpy/lib/tests/test_ufunclike.py
Normal file
104
venv/Lib/site-packages/numpy/lib/tests/test_ufunclike.py
Normal file
|
|
@ -0,0 +1,104 @@
|
|||
import numpy as np
|
||||
import numpy.core as nx
|
||||
import numpy.lib.ufunclike as ufl
|
||||
from numpy.testing import (
|
||||
assert_, assert_equal, assert_array_equal, assert_warns, assert_raises
|
||||
)
|
||||
|
||||
|
||||
class TestUfunclike:
|
||||
|
||||
def test_isposinf(self):
|
||||
a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0])
|
||||
out = nx.zeros(a.shape, bool)
|
||||
tgt = nx.array([True, False, False, False, False, False])
|
||||
|
||||
res = ufl.isposinf(a)
|
||||
assert_equal(res, tgt)
|
||||
res = ufl.isposinf(a, out)
|
||||
assert_equal(res, tgt)
|
||||
assert_equal(out, tgt)
|
||||
|
||||
a = a.astype(np.complex_)
|
||||
with assert_raises(TypeError):
|
||||
ufl.isposinf(a)
|
||||
|
||||
def test_isneginf(self):
|
||||
a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0])
|
||||
out = nx.zeros(a.shape, bool)
|
||||
tgt = nx.array([False, True, False, False, False, False])
|
||||
|
||||
res = ufl.isneginf(a)
|
||||
assert_equal(res, tgt)
|
||||
res = ufl.isneginf(a, out)
|
||||
assert_equal(res, tgt)
|
||||
assert_equal(out, tgt)
|
||||
|
||||
a = a.astype(np.complex_)
|
||||
with assert_raises(TypeError):
|
||||
ufl.isneginf(a)
|
||||
|
||||
def test_fix(self):
|
||||
a = nx.array([[1.0, 1.1, 1.5, 1.8], [-1.0, -1.1, -1.5, -1.8]])
|
||||
out = nx.zeros(a.shape, float)
|
||||
tgt = nx.array([[1., 1., 1., 1.], [-1., -1., -1., -1.]])
|
||||
|
||||
res = ufl.fix(a)
|
||||
assert_equal(res, tgt)
|
||||
res = ufl.fix(a, out)
|
||||
assert_equal(res, tgt)
|
||||
assert_equal(out, tgt)
|
||||
assert_equal(ufl.fix(3.14), 3)
|
||||
|
||||
def test_fix_with_subclass(self):
|
||||
class MyArray(nx.ndarray):
|
||||
def __new__(cls, data, metadata=None):
|
||||
res = nx.array(data, copy=True).view(cls)
|
||||
res.metadata = metadata
|
||||
return res
|
||||
|
||||
def __array_wrap__(self, obj, context=None):
|
||||
if isinstance(obj, MyArray):
|
||||
obj.metadata = self.metadata
|
||||
return obj
|
||||
|
||||
def __array_finalize__(self, obj):
|
||||
self.metadata = getattr(obj, 'metadata', None)
|
||||
return self
|
||||
|
||||
a = nx.array([1.1, -1.1])
|
||||
m = MyArray(a, metadata='foo')
|
||||
f = ufl.fix(m)
|
||||
assert_array_equal(f, nx.array([1, -1]))
|
||||
assert_(isinstance(f, MyArray))
|
||||
assert_equal(f.metadata, 'foo')
|
||||
|
||||
# check 0d arrays don't decay to scalars
|
||||
m0d = m[0,...]
|
||||
m0d.metadata = 'bar'
|
||||
f0d = ufl.fix(m0d)
|
||||
assert_(isinstance(f0d, MyArray))
|
||||
assert_equal(f0d.metadata, 'bar')
|
||||
|
||||
def test_deprecated(self):
|
||||
# NumPy 1.13.0, 2017-04-26
|
||||
assert_warns(DeprecationWarning, ufl.fix, [1, 2], y=nx.empty(2))
|
||||
assert_warns(DeprecationWarning, ufl.isposinf, [1, 2], y=nx.empty(2))
|
||||
assert_warns(DeprecationWarning, ufl.isneginf, [1, 2], y=nx.empty(2))
|
||||
|
||||
def test_scalar(self):
|
||||
x = np.inf
|
||||
actual = np.isposinf(x)
|
||||
expected = np.True_
|
||||
assert_equal(actual, expected)
|
||||
assert_equal(type(actual), type(expected))
|
||||
|
||||
x = -3.4
|
||||
actual = np.fix(x)
|
||||
expected = np.float64(-3.0)
|
||||
assert_equal(actual, expected)
|
||||
assert_equal(type(actual), type(expected))
|
||||
|
||||
out = np.array(0.0)
|
||||
actual = np.fix(x, out=out)
|
||||
assert_(actual is out)
|
||||
142
venv/Lib/site-packages/numpy/lib/tests/test_utils.py
Normal file
142
venv/Lib/site-packages/numpy/lib/tests/test_utils.py
Normal file
|
|
@ -0,0 +1,142 @@
|
|||
import inspect
|
||||
import sys
|
||||
import pytest
|
||||
|
||||
from numpy.core import arange
|
||||
from numpy.testing import assert_, assert_equal, assert_raises_regex
|
||||
from numpy.lib import deprecate
|
||||
import numpy.lib.utils as utils
|
||||
|
||||
from io import StringIO
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO")
|
||||
def test_lookfor():
|
||||
out = StringIO()
|
||||
utils.lookfor('eigenvalue', module='numpy', output=out,
|
||||
import_modules=False)
|
||||
out = out.getvalue()
|
||||
assert_('numpy.linalg.eig' in out)
|
||||
|
||||
|
||||
@deprecate
|
||||
def old_func(self, x):
|
||||
return x
|
||||
|
||||
|
||||
@deprecate(message="Rather use new_func2")
|
||||
def old_func2(self, x):
|
||||
return x
|
||||
|
||||
|
||||
def old_func3(self, x):
|
||||
return x
|
||||
new_func3 = deprecate(old_func3, old_name="old_func3", new_name="new_func3")
|
||||
|
||||
|
||||
def old_func4(self, x):
|
||||
"""Summary.
|
||||
|
||||
Further info.
|
||||
"""
|
||||
return x
|
||||
new_func4 = deprecate(old_func4)
|
||||
|
||||
|
||||
def old_func5(self, x):
|
||||
"""Summary.
|
||||
|
||||
Bizarre indentation.
|
||||
"""
|
||||
return x
|
||||
new_func5 = deprecate(old_func5, message="This function is\ndeprecated.")
|
||||
|
||||
|
||||
def old_func6(self, x):
|
||||
"""
|
||||
Also in PEP-257.
|
||||
"""
|
||||
return x
|
||||
new_func6 = deprecate(old_func6)
|
||||
|
||||
|
||||
def test_deprecate_decorator():
|
||||
assert_('deprecated' in old_func.__doc__)
|
||||
|
||||
|
||||
def test_deprecate_decorator_message():
|
||||
assert_('Rather use new_func2' in old_func2.__doc__)
|
||||
|
||||
|
||||
def test_deprecate_fn():
|
||||
assert_('old_func3' in new_func3.__doc__)
|
||||
assert_('new_func3' in new_func3.__doc__)
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.flags.optimize == 2, reason="-OO discards docstrings")
|
||||
@pytest.mark.parametrize('old_func, new_func', [
|
||||
(old_func4, new_func4),
|
||||
(old_func5, new_func5),
|
||||
(old_func6, new_func6),
|
||||
])
|
||||
def test_deprecate_help_indentation(old_func, new_func):
|
||||
_compare_docs(old_func, new_func)
|
||||
# Ensure we don't mess up the indentation
|
||||
for knd, func in (('old', old_func), ('new', new_func)):
|
||||
for li, line in enumerate(func.__doc__.split('\n')):
|
||||
if li == 0:
|
||||
assert line.startswith(' ') or not line.startswith(' '), knd
|
||||
elif line:
|
||||
assert line.startswith(' '), knd
|
||||
|
||||
|
||||
def _compare_docs(old_func, new_func):
|
||||
old_doc = inspect.getdoc(old_func)
|
||||
new_doc = inspect.getdoc(new_func)
|
||||
index = new_doc.index('\n\n') + 2
|
||||
assert_equal(new_doc[index:], old_doc)
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.flags.optimize == 2, reason="-OO discards docstrings")
|
||||
def test_deprecate_preserve_whitespace():
|
||||
assert_('\n Bizarre' in new_func5.__doc__)
|
||||
|
||||
|
||||
def test_safe_eval_nameconstant():
|
||||
# Test if safe_eval supports Python 3.4 _ast.NameConstant
|
||||
utils.safe_eval('None')
|
||||
|
||||
|
||||
class TestByteBounds:
|
||||
|
||||
def test_byte_bounds(self):
|
||||
# pointer difference matches size * itemsize
|
||||
# due to contiguity
|
||||
a = arange(12).reshape(3, 4)
|
||||
low, high = utils.byte_bounds(a)
|
||||
assert_equal(high - low, a.size * a.itemsize)
|
||||
|
||||
def test_unusual_order_positive_stride(self):
|
||||
a = arange(12).reshape(3, 4)
|
||||
b = a.T
|
||||
low, high = utils.byte_bounds(b)
|
||||
assert_equal(high - low, b.size * b.itemsize)
|
||||
|
||||
def test_unusual_order_negative_stride(self):
|
||||
a = arange(12).reshape(3, 4)
|
||||
b = a.T[::-1]
|
||||
low, high = utils.byte_bounds(b)
|
||||
assert_equal(high - low, b.size * b.itemsize)
|
||||
|
||||
def test_strided(self):
|
||||
a = arange(12)
|
||||
b = a[::2]
|
||||
low, high = utils.byte_bounds(b)
|
||||
# the largest pointer address is lost (even numbers only in the
|
||||
# stride), and compensate addresses for striding by 2
|
||||
assert_equal(high - low, b.size * 2 * b.itemsize - b.itemsize)
|
||||
|
||||
|
||||
def test_assert_raises_regex_context_manager():
|
||||
with assert_raises_regex(ValueError, 'no deprecation warning'):
|
||||
raise ValueError('no deprecation warning')
|
||||
Loading…
Add table
Add a link
Reference in a new issue