Fixed database typo and removed unnecessary class identifier.

This commit is contained in:
Batuhan Berk Başoğlu 2020-10-14 10:10:37 -04:00
parent 00ad49a143
commit 45fb349a7d
5098 changed files with 952558 additions and 85 deletions

View file

@ -0,0 +1,122 @@
"""
Some signal functions implemented using mpmath.
"""
try:
import mpmath # type: ignore[import]
except ImportError:
mpmath = None
def _prod(seq):
"""Returns the product of the elements in the sequence `seq`."""
p = 1
for elem in seq:
p *= elem
return p
def _relative_degree(z, p):
"""
Return relative degree of transfer function from zeros and poles.
This is simply len(p) - len(z), which must be nonnegative.
A ValueError is raised if len(p) < len(z).
"""
degree = len(p) - len(z)
if degree < 0:
raise ValueError("Improper transfer function. "
"Must have at least as many poles as zeros.")
return degree
def _zpkbilinear(z, p, k, fs):
"""Bilinear transformation to convert a filter from analog to digital."""
degree = _relative_degree(z, p)
fs2 = 2*fs
# Bilinear transform the poles and zeros
z_z = [(fs2 + z1) / (fs2 - z1) for z1 in z]
p_z = [(fs2 + p1) / (fs2 - p1) for p1 in p]
# Any zeros that were at infinity get moved to the Nyquist frequency
z_z.extend([-1] * degree)
# Compensate for gain change
numer = _prod(fs2 - z1 for z1 in z)
denom = _prod(fs2 - p1 for p1 in p)
k_z = k * numer / denom
return z_z, p_z, k_z.real
def _zpklp2lp(z, p, k, wo=1):
"""Transform a lowpass filter to a different cutoff frequency."""
degree = _relative_degree(z, p)
# Scale all points radially from origin to shift cutoff frequency
z_lp = [wo * z1 for z1 in z]
p_lp = [wo * p1 for p1 in p]
# Each shifted pole decreases gain by wo, each shifted zero increases it.
# Cancel out the net change to keep overall gain the same
k_lp = k * wo**degree
return z_lp, p_lp, k_lp
def _butter_analog_poles(n):
"""
Poles of an analog Butterworth lowpass filter.
This is the same calculation as scipy.signal.buttap(n) or
scipy.signal.butter(n, 1, analog=True, output='zpk'), but mpmath is used,
and only the poles are returned.
"""
poles = [-mpmath.exp(1j*mpmath.pi*k/(2*n)) for k in range(-n+1, n, 2)]
return poles
def butter_lp(n, Wn):
"""
Lowpass Butterworth digital filter design.
This computes the same result as scipy.signal.butter(n, Wn, output='zpk'),
but it uses mpmath, and the results are returned in lists instead of NumPy
arrays.
"""
zeros = []
poles = _butter_analog_poles(n)
k = 1
fs = 2
warped = 2 * fs * mpmath.tan(mpmath.pi * Wn / fs)
z, p, k = _zpklp2lp(zeros, poles, k, wo=warped)
z, p, k = _zpkbilinear(z, p, k, fs=fs)
return z, p, k
def zpkfreqz(z, p, k, worN=None):
"""
Frequency response of a filter in zpk format, using mpmath.
This is the same calculation as scipy.signal.freqz, but the input is in
zpk format, the calculation is performed using mpath, and the results are
returned in lists instead of NumPy arrays.
"""
if worN is None or isinstance(worN, int):
N = worN or 512
ws = [mpmath.pi * mpmath.mpf(j) / N for j in range(N)]
else:
ws = worN
h = []
for wk in ws:
zm1 = mpmath.exp(1j * wk)
numer = _prod([zm1 - t for t in z])
denom = _prod([zm1 - t for t in p])
hk = k * numer / denom
h.append(hk)
return ws, h

View file

@ -0,0 +1,111 @@
import numpy as np
from numpy.testing import assert_array_equal
from pytest import raises as assert_raises
from scipy.signal._arraytools import (axis_slice, axis_reverse,
odd_ext, even_ext, const_ext, zero_ext)
class TestArrayTools(object):
def test_axis_slice(self):
a = np.arange(12).reshape(3, 4)
s = axis_slice(a, start=0, stop=1, axis=0)
assert_array_equal(s, a[0:1, :])
s = axis_slice(a, start=-1, axis=0)
assert_array_equal(s, a[-1:, :])
s = axis_slice(a, start=0, stop=1, axis=1)
assert_array_equal(s, a[:, 0:1])
s = axis_slice(a, start=-1, axis=1)
assert_array_equal(s, a[:, -1:])
s = axis_slice(a, start=0, step=2, axis=0)
assert_array_equal(s, a[::2, :])
s = axis_slice(a, start=0, step=2, axis=1)
assert_array_equal(s, a[:, ::2])
def test_axis_reverse(self):
a = np.arange(12).reshape(3, 4)
r = axis_reverse(a, axis=0)
assert_array_equal(r, a[::-1, :])
r = axis_reverse(a, axis=1)
assert_array_equal(r, a[:, ::-1])
def test_odd_ext(self):
a = np.array([[1, 2, 3, 4, 5],
[9, 8, 7, 6, 5]])
odd = odd_ext(a, 2, axis=1)
expected = np.array([[-1, 0, 1, 2, 3, 4, 5, 6, 7],
[11, 10, 9, 8, 7, 6, 5, 4, 3]])
assert_array_equal(odd, expected)
odd = odd_ext(a, 1, axis=0)
expected = np.array([[-7, -4, -1, 2, 5],
[1, 2, 3, 4, 5],
[9, 8, 7, 6, 5],
[17, 14, 11, 8, 5]])
assert_array_equal(odd, expected)
assert_raises(ValueError, odd_ext, a, 2, axis=0)
assert_raises(ValueError, odd_ext, a, 5, axis=1)
def test_even_ext(self):
a = np.array([[1, 2, 3, 4, 5],
[9, 8, 7, 6, 5]])
even = even_ext(a, 2, axis=1)
expected = np.array([[3, 2, 1, 2, 3, 4, 5, 4, 3],
[7, 8, 9, 8, 7, 6, 5, 6, 7]])
assert_array_equal(even, expected)
even = even_ext(a, 1, axis=0)
expected = np.array([[9, 8, 7, 6, 5],
[1, 2, 3, 4, 5],
[9, 8, 7, 6, 5],
[1, 2, 3, 4, 5]])
assert_array_equal(even, expected)
assert_raises(ValueError, even_ext, a, 2, axis=0)
assert_raises(ValueError, even_ext, a, 5, axis=1)
def test_const_ext(self):
a = np.array([[1, 2, 3, 4, 5],
[9, 8, 7, 6, 5]])
const = const_ext(a, 2, axis=1)
expected = np.array([[1, 1, 1, 2, 3, 4, 5, 5, 5],
[9, 9, 9, 8, 7, 6, 5, 5, 5]])
assert_array_equal(const, expected)
const = const_ext(a, 1, axis=0)
expected = np.array([[1, 2, 3, 4, 5],
[1, 2, 3, 4, 5],
[9, 8, 7, 6, 5],
[9, 8, 7, 6, 5]])
assert_array_equal(const, expected)
def test_zero_ext(self):
a = np.array([[1, 2, 3, 4, 5],
[9, 8, 7, 6, 5]])
zero = zero_ext(a, 2, axis=1)
expected = np.array([[0, 0, 1, 2, 3, 4, 5, 0, 0],
[0, 0, 9, 8, 7, 6, 5, 0, 0]])
assert_array_equal(zero, expected)
zero = zero_ext(a, 1, axis=0)
expected = np.array([[0, 0, 0, 0, 0],
[1, 2, 3, 4, 5],
[9, 8, 7, 6, 5],
[0, 0, 0, 0, 0]])
assert_array_equal(zero, expected)

View file

@ -0,0 +1,222 @@
# pylint: disable=missing-docstring
import numpy as np
from numpy import array
from numpy.testing import (assert_equal,
assert_allclose, assert_array_equal,
assert_almost_equal)
from pytest import raises
import scipy.signal.bsplines as bsp
class TestBSplines(object):
"""Test behaviors of B-splines. The values tested against were returned as of
SciPy 1.1.0 and are included for regression testing purposes"""
def test_factorial(self):
# can't all be zero state
assert_equal(bsp.factorial(1), 1)
def test_spline_filter(self):
np.random.seed(12457)
# Test the type-error branch
raises(TypeError, bsp.spline_filter, array([0]), 0)
# Test the complex branch
data_array_complex = np.random.rand(7, 7) + np.random.rand(7, 7)*1j
# make the magnitude exceed 1, and make some negative
data_array_complex = 10*(1+1j-2*data_array_complex)
result_array_complex = array(
[[-4.61489230e-01-1.92994022j, 8.33332443+6.25519943j,
6.96300745e-01-9.05576038j, 5.28294849+3.97541356j,
5.92165565+7.68240595j, 6.59493160-1.04542804j,
9.84503460-5.85946894j],
[-8.78262329-8.4295969j, 7.20675516+5.47528982j,
-8.17223072+2.06330729j, -4.38633347-8.65968037j,
9.89916801-8.91720295j, 2.67755103+8.8706522j,
6.24192142+3.76879835j],
[-3.15627527+2.56303072j, 9.87658501-0.82838702j,
-9.96930313+8.72288895j, 3.17193985+6.42474651j,
-4.50919819-6.84576082j, 5.75423431+9.94723988j,
9.65979767+6.90665293j],
[-8.28993416-6.61064005j, 9.71416473e-01-9.44907284j,
-2.38331890+9.25196648j, -7.08868170-0.77403212j,
4.89887714+7.05371094j, -1.37062311-2.73505688j,
7.70705748+2.5395329j],
[2.51528406-1.82964492j, 3.65885472+2.95454836j,
5.16786575-1.66362023j, -8.77737999e-03+5.72478867j,
4.10533333-3.10287571j, 9.04761887+1.54017115j,
-5.77960968e-01-7.87758923j],
[9.86398506-3.98528528j, -4.71444130-2.44316983j,
-1.68038976-1.12708664j, 2.84695053+1.01725709j,
1.14315915-8.89294529j, -3.17127085-5.42145538j,
1.91830420-6.16370344j],
[7.13875294+2.91851187j, -5.35737514+9.64132309j,
-9.66586399+0.70250005j, -9.87717438-2.0262239j,
9.93160629+1.5630846j, 4.71948051-2.22050714j,
9.49550819+7.8995142j]])
# FIXME: for complex types, the computations are done in
# single precision (reason unclear). When this is changed,
# this test needs updating.
assert_allclose(bsp.spline_filter(data_array_complex, 0),
result_array_complex, rtol=1e-6)
# Test the real branch
np.random.seed(12457)
data_array_real = np.random.rand(12, 12)
# make the magnitude exceed 1, and make some negative
data_array_real = 10*(1-2*data_array_real)
result_array_real = array(
[[-.463312621, 8.33391222, .697290949, 5.28390836,
5.92066474, 6.59452137, 9.84406950, -8.78324188,
7.20675750, -8.17222994, -4.38633345, 9.89917069],
[2.67755154, 6.24192170, -3.15730578, 9.87658581,
-9.96930425, 3.17194115, -4.50919947, 5.75423446,
9.65979824, -8.29066885, .971416087, -2.38331897],
[-7.08868346, 4.89887705, -1.37062289, 7.70705838,
2.51526461, 3.65885497, 5.16786604, -8.77715342e-03,
4.10533325, 9.04761993, -.577960351, 9.86382519],
[-4.71444301, -1.68038985, 2.84695116, 1.14315938,
-3.17127091, 1.91830461, 7.13779687, -5.35737482,
-9.66586425, -9.87717456, 9.93160672, 4.71948144],
[9.49551194, -1.92958436, 6.25427993, -9.05582911,
3.97562282, 7.68232426, -1.04514824, -5.86021443,
-8.43007451, 5.47528997, 2.06330736, -8.65968112],
[-8.91720100, 8.87065356, 3.76879937, 2.56222894,
-.828387146, 8.72288903, 6.42474741, -6.84576083,
9.94724115, 6.90665380, -6.61084494, -9.44907391],
[9.25196790, -.774032030, 7.05371046, -2.73505725,
2.53953305, -1.82889155, 2.95454824, -1.66362046,
5.72478916, -3.10287679, 1.54017123, -7.87759020],
[-3.98464539, -2.44316992, -1.12708657, 1.01725672,
-8.89294671, -5.42145629, -6.16370321, 2.91775492,
9.64132208, .702499998, -2.02622392, 1.56308431],
[-2.22050773, 7.89951554, 5.98970713, -7.35861835,
5.45459283, -7.76427957, 3.67280490, -4.05521315,
4.51967507, -3.22738749, -3.65080177, 3.05630155],
[-6.21240584, -.296796126, -8.34800163, 9.21564563,
-3.61958784, -4.77120006, -3.99454057, 1.05021988e-03,
-6.95982829, 6.04380797, 8.43181250, -2.71653339],
[1.19638037, 6.99718842e-02, 6.72020394, -2.13963198,
3.75309875, -5.70076744, 5.92143551, -7.22150575,
-3.77114594, -1.11903194, -5.39151466, 3.06620093],
[9.86326886, 1.05134482, -7.75950607, -3.64429655,
7.81848957, -9.02270373, 3.73399754, -4.71962549,
-7.71144306, 3.78263161, 6.46034818, -4.43444731]])
assert_allclose(bsp.spline_filter(data_array_real, 0),
result_array_real)
def test_bspline(self):
np.random.seed(12458)
assert_allclose(bsp.bspline(np.random.rand(1, 1), 2),
array([[0.73694695]]))
data_array_complex = np.random.rand(4, 4) + np.random.rand(4, 4)*1j
data_array_complex = 0.1*data_array_complex
result_array_complex = array(
[[0.40882362, 0.41021151, 0.40886708, 0.40905103],
[0.40829477, 0.41021230, 0.40966097, 0.40939871],
[0.41036803, 0.40901724, 0.40965331, 0.40879513],
[0.41032862, 0.40925287, 0.41037754, 0.41027477]])
assert_allclose(bsp.bspline(data_array_complex, 10),
result_array_complex)
def test_gauss_spline(self):
np.random.seed(12459)
assert_almost_equal(bsp.gauss_spline(0, 0), 1.381976597885342)
assert_allclose(bsp.gauss_spline(array([1.]), 1), array([0.04865217]))
def test_cubic(self):
np.random.seed(12460)
assert_array_equal(bsp.cubic([0]), array([0]))
data_array_complex = np.random.rand(4, 4) + np.random.rand(4, 4)*1j
data_array_complex = 1+1j-2*data_array_complex
# scaling the magnitude by 10 makes the results close enough to zero,
# that the assertion fails, so just make the elements have a mix of
# positive and negative imaginary components...
result_array_complex = array(
[[0.23056563, 0.38414406, 0.08342987, 0.06904847],
[0.17240848, 0.47055447, 0.63896278, 0.39756424],
[0.12672571, 0.65862632, 0.1116695, 0.09700386],
[0.3544116, 0.17856518, 0.1528841, 0.17285762]])
assert_allclose(bsp.cubic(data_array_complex), result_array_complex)
def test_quadratic(self):
np.random.seed(12461)
assert_array_equal(bsp.quadratic([0]), array([0]))
data_array_complex = np.random.rand(4, 4) + np.random.rand(4, 4)*1j
# scaling the magnitude by 10 makes the results all zero,
# so just make the elements have a mix of positive and negative
# imaginary components...
data_array_complex = (1+1j-2*data_array_complex)
result_array_complex = array(
[[0.23062746, 0.06338176, 0.34902312, 0.31944105],
[0.14701256, 0.13277773, 0.29428615, 0.09814697],
[0.52873842, 0.06484157, 0.09517566, 0.46420389],
[0.09286829, 0.09371954, 0.1422526, 0.16007024]])
assert_allclose(bsp.quadratic(data_array_complex),
result_array_complex)
def test_cspline1d(self):
np.random.seed(12462)
assert_array_equal(bsp.cspline1d(array([0])), [0.])
c1d = array([1.21037185, 1.86293902, 2.98834059, 4.11660378,
4.78893826])
# test lamda != 0
assert_allclose(bsp.cspline1d(array([1., 2, 3, 4, 5]), 1), c1d)
c1d0 = array([0.78683946, 2.05333735, 2.99981113, 3.94741812,
5.21051638])
assert_allclose(bsp.cspline1d(array([1., 2, 3, 4, 5])), c1d0)
def test_qspline1d(self):
np.random.seed(12463)
assert_array_equal(bsp.qspline1d(array([0])), [0.])
# test lamda != 0
raises(ValueError, bsp.qspline1d, array([1., 2, 3, 4, 5]), 1.)
raises(ValueError, bsp.qspline1d, array([1., 2, 3, 4, 5]), -1.)
q1d0 = array([0.85350007, 2.02441743, 2.99999534, 3.97561055,
5.14634135])
assert_allclose(bsp.qspline1d(array([1., 2, 3, 4, 5])), q1d0)
def test_cspline1d_eval(self):
np.random.seed(12464)
assert_allclose(bsp.cspline1d_eval(array([0., 0]), [0.]), array([0.]))
assert_array_equal(bsp.cspline1d_eval(array([1., 0, 1]), []),
array([]))
x = [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6]
dx = x[1]-x[0]
newx = [-6., -5.5, -5., -4.5, -4., -3.5, -3., -2.5, -2., -1.5, -1.,
-0.5, 0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5, 5., 5.5, 6.,
6.5, 7., 7.5, 8., 8.5, 9., 9.5, 10., 10.5, 11., 11.5, 12.,
12.5]
y = array([4.216, 6.864, 3.514, 6.203, 6.759, 7.433, 7.874, 5.879,
1.396, 4.094])
cj = bsp.cspline1d(y)
newy = array([6.203, 4.41570658, 3.514, 5.16924703, 6.864, 6.04643068,
4.21600281, 6.04643068, 6.864, 5.16924703, 3.514,
4.41570658, 6.203, 6.80717667, 6.759, 6.98971173, 7.433,
7.79560142, 7.874, 7.41525761, 5.879, 3.18686814, 1.396,
2.24889482, 4.094, 2.24889482, 1.396, 3.18686814, 5.879,
7.41525761, 7.874, 7.79560142, 7.433, 6.98971173, 6.759,
6.80717667, 6.203, 4.41570658])
assert_allclose(bsp.cspline1d_eval(cj, newx, dx=dx, x0=x[0]), newy)
def test_qspline1d_eval(self):
np.random.seed(12465)
assert_allclose(bsp.qspline1d_eval(array([0., 0]), [0.]), array([0.]))
assert_array_equal(bsp.qspline1d_eval(array([1., 0, 1]), []),
array([]))
x = [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6]
dx = x[1]-x[0]
newx = [-6., -5.5, -5., -4.5, -4., -3.5, -3., -2.5, -2., -1.5, -1.,
-0.5, 0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5, 5., 5.5, 6.,
6.5, 7., 7.5, 8., 8.5, 9., 9.5, 10., 10.5, 11., 11.5, 12.,
12.5]
y = array([4.216, 6.864, 3.514, 6.203, 6.759, 7.433, 7.874, 5.879,
1.396, 4.094])
cj = bsp.qspline1d(y)
newy = array([6.203, 4.49418159, 3.514, 5.18390821, 6.864, 5.91436915,
4.21600002, 5.91436915, 6.864, 5.18390821, 3.514,
4.49418159, 6.203, 6.71900226, 6.759, 7.03980488, 7.433,
7.81016848, 7.874, 7.32718426, 5.879, 3.23872593, 1.396,
2.34046013, 4.094, 2.34046013, 1.396, 3.23872593, 5.879,
7.32718426, 7.874, 7.81016848, 7.433, 7.03980488, 6.759,
6.71900226, 6.203, 4.49418159])
assert_allclose(bsp.qspline1d_eval(cj, newx, dx=dx, x0=x[0]), newy)

View file

@ -0,0 +1,420 @@
import numpy as np
from numpy.testing import \
assert_array_almost_equal, assert_almost_equal, \
assert_allclose, assert_equal
import pytest
from scipy.signal import cont2discrete as c2d
from scipy.signal import dlsim, ss2tf, ss2zpk, lsim2, lti
from scipy.signal import tf2ss, impulse2, dimpulse, step2, dstep
# Author: Jeffrey Armstrong <jeff@approximatrix.com>
# March 29, 2011
class TestC2D(object):
def test_zoh(self):
ac = np.eye(2)
bc = np.full((2, 1), 0.5)
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
dc = np.array([[0.0], [0.0], [-0.33]])
ad_truth = 1.648721270700128 * np.eye(2)
bd_truth = np.full((2, 1), 0.324360635350064)
# c and d in discrete should be equal to their continuous counterparts
dt_requested = 0.5
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, method='zoh')
assert_array_almost_equal(ad_truth, ad)
assert_array_almost_equal(bd_truth, bd)
assert_array_almost_equal(cc, cd)
assert_array_almost_equal(dc, dd)
assert_almost_equal(dt_requested, dt)
def test_foh(self):
ac = np.eye(2)
bc = np.full((2, 1), 0.5)
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
dc = np.array([[0.0], [0.0], [-0.33]])
# True values are verified with Matlab
ad_truth = 1.648721270700128 * np.eye(2)
bd_truth = np.full((2, 1), 0.420839287058789)
cd_truth = cc
dd_truth = np.array([[0.260262223725224],
[0.297442541400256],
[-0.144098411624840]])
dt_requested = 0.5
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, method='foh')
assert_array_almost_equal(ad_truth, ad)
assert_array_almost_equal(bd_truth, bd)
assert_array_almost_equal(cd_truth, cd)
assert_array_almost_equal(dd_truth, dd)
assert_almost_equal(dt_requested, dt)
def test_impulse(self):
ac = np.eye(2)
bc = np.full((2, 1), 0.5)
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
dc = np.array([[0.0], [0.0], [0.0]])
# True values are verified with Matlab
ad_truth = 1.648721270700128 * np.eye(2)
bd_truth = np.full((2, 1), 0.412180317675032)
cd_truth = cc
dd_truth = np.array([[0.4375], [0.5], [0.3125]])
dt_requested = 0.5
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
method='impulse')
assert_array_almost_equal(ad_truth, ad)
assert_array_almost_equal(bd_truth, bd)
assert_array_almost_equal(cd_truth, cd)
assert_array_almost_equal(dd_truth, dd)
assert_almost_equal(dt_requested, dt)
def test_gbt(self):
ac = np.eye(2)
bc = np.full((2, 1), 0.5)
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
dc = np.array([[0.0], [0.0], [-0.33]])
dt_requested = 0.5
alpha = 1.0 / 3.0
ad_truth = 1.6 * np.eye(2)
bd_truth = np.full((2, 1), 0.3)
cd_truth = np.array([[0.9, 1.2],
[1.2, 1.2],
[1.2, 0.3]])
dd_truth = np.array([[0.175],
[0.2],
[-0.205]])
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
method='gbt', alpha=alpha)
assert_array_almost_equal(ad_truth, ad)
assert_array_almost_equal(bd_truth, bd)
assert_array_almost_equal(cd_truth, cd)
assert_array_almost_equal(dd_truth, dd)
def test_euler(self):
ac = np.eye(2)
bc = np.full((2, 1), 0.5)
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
dc = np.array([[0.0], [0.0], [-0.33]])
dt_requested = 0.5
ad_truth = 1.5 * np.eye(2)
bd_truth = np.full((2, 1), 0.25)
cd_truth = np.array([[0.75, 1.0],
[1.0, 1.0],
[1.0, 0.25]])
dd_truth = dc
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
method='euler')
assert_array_almost_equal(ad_truth, ad)
assert_array_almost_equal(bd_truth, bd)
assert_array_almost_equal(cd_truth, cd)
assert_array_almost_equal(dd_truth, dd)
assert_almost_equal(dt_requested, dt)
def test_backward_diff(self):
ac = np.eye(2)
bc = np.full((2, 1), 0.5)
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
dc = np.array([[0.0], [0.0], [-0.33]])
dt_requested = 0.5
ad_truth = 2.0 * np.eye(2)
bd_truth = np.full((2, 1), 0.5)
cd_truth = np.array([[1.5, 2.0],
[2.0, 2.0],
[2.0, 0.5]])
dd_truth = np.array([[0.875],
[1.0],
[0.295]])
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
method='backward_diff')
assert_array_almost_equal(ad_truth, ad)
assert_array_almost_equal(bd_truth, bd)
assert_array_almost_equal(cd_truth, cd)
assert_array_almost_equal(dd_truth, dd)
def test_bilinear(self):
ac = np.eye(2)
bc = np.full((2, 1), 0.5)
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
dc = np.array([[0.0], [0.0], [-0.33]])
dt_requested = 0.5
ad_truth = (5.0 / 3.0) * np.eye(2)
bd_truth = np.full((2, 1), 1.0 / 3.0)
cd_truth = np.array([[1.0, 4.0 / 3.0],
[4.0 / 3.0, 4.0 / 3.0],
[4.0 / 3.0, 1.0 / 3.0]])
dd_truth = np.array([[0.291666666666667],
[1.0 / 3.0],
[-0.121666666666667]])
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
method='bilinear')
assert_array_almost_equal(ad_truth, ad)
assert_array_almost_equal(bd_truth, bd)
assert_array_almost_equal(cd_truth, cd)
assert_array_almost_equal(dd_truth, dd)
assert_almost_equal(dt_requested, dt)
# Same continuous system again, but change sampling rate
ad_truth = 1.4 * np.eye(2)
bd_truth = np.full((2, 1), 0.2)
cd_truth = np.array([[0.9, 1.2], [1.2, 1.2], [1.2, 0.3]])
dd_truth = np.array([[0.175], [0.2], [-0.205]])
dt_requested = 1.0 / 3.0
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
method='bilinear')
assert_array_almost_equal(ad_truth, ad)
assert_array_almost_equal(bd_truth, bd)
assert_array_almost_equal(cd_truth, cd)
assert_array_almost_equal(dd_truth, dd)
assert_almost_equal(dt_requested, dt)
def test_transferfunction(self):
numc = np.array([0.25, 0.25, 0.5])
denc = np.array([0.75, 0.75, 1.0])
numd = np.array([[1.0 / 3.0, -0.427419169438754, 0.221654141101125]])
dend = np.array([1.0, -1.351394049721225, 0.606530659712634])
dt_requested = 0.5
num, den, dt = c2d((numc, denc), dt_requested, method='zoh')
assert_array_almost_equal(numd, num)
assert_array_almost_equal(dend, den)
assert_almost_equal(dt_requested, dt)
def test_zerospolesgain(self):
zeros_c = np.array([0.5, -0.5])
poles_c = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)])
k_c = 1.0
zeros_d = [1.23371727305860, 0.735356894461267]
polls_d = [0.938148335039729 + 0.346233593780536j,
0.938148335039729 - 0.346233593780536j]
k_d = 1.0
dt_requested = 0.5
zeros, poles, k, dt = c2d((zeros_c, poles_c, k_c), dt_requested,
method='zoh')
assert_array_almost_equal(zeros_d, zeros)
assert_array_almost_equal(polls_d, poles)
assert_almost_equal(k_d, k)
assert_almost_equal(dt_requested, dt)
def test_gbt_with_sio_tf_and_zpk(self):
"""Test method='gbt' with alpha=0.25 for tf and zpk cases."""
# State space coefficients for the continuous SIO system.
A = -1.0
B = 1.0
C = 1.0
D = 0.5
# The continuous transfer function coefficients.
cnum, cden = ss2tf(A, B, C, D)
# Continuous zpk representation
cz, cp, ck = ss2zpk(A, B, C, D)
h = 1.0
alpha = 0.25
# Explicit formulas, in the scalar case.
Ad = (1 + (1 - alpha) * h * A) / (1 - alpha * h * A)
Bd = h * B / (1 - alpha * h * A)
Cd = C / (1 - alpha * h * A)
Dd = D + alpha * C * Bd
# Convert the explicit solution to tf
dnum, dden = ss2tf(Ad, Bd, Cd, Dd)
# Compute the discrete tf using cont2discrete.
c2dnum, c2dden, dt = c2d((cnum, cden), h, method='gbt', alpha=alpha)
assert_allclose(dnum, c2dnum)
assert_allclose(dden, c2dden)
# Convert explicit solution to zpk.
dz, dp, dk = ss2zpk(Ad, Bd, Cd, Dd)
# Compute the discrete zpk using cont2discrete.
c2dz, c2dp, c2dk, dt = c2d((cz, cp, ck), h, method='gbt', alpha=alpha)
assert_allclose(dz, c2dz)
assert_allclose(dp, c2dp)
assert_allclose(dk, c2dk)
def test_discrete_approx(self):
"""
Test that the solution to the discrete approximation of a continuous
system actually approximates the solution to the continuous system.
This is an indirect test of the correctness of the implementation
of cont2discrete.
"""
def u(t):
return np.sin(2.5 * t)
a = np.array([[-0.01]])
b = np.array([[1.0]])
c = np.array([[1.0]])
d = np.array([[0.2]])
x0 = 1.0
t = np.linspace(0, 10.0, 101)
dt = t[1] - t[0]
u1 = u(t)
# Use lsim2 to compute the solution to the continuous system.
t, yout, xout = lsim2((a, b, c, d), T=t, U=u1, X0=x0,
rtol=1e-9, atol=1e-11)
# Convert the continuous system to a discrete approximation.
dsys = c2d((a, b, c, d), dt, method='bilinear')
# Use dlsim with the pairwise averaged input to compute the output
# of the discrete system.
u2 = 0.5 * (u1[:-1] + u1[1:])
t2 = t[:-1]
td2, yd2, xd2 = dlsim(dsys, u=u2.reshape(-1, 1), t=t2, x0=x0)
# ymid is the average of consecutive terms of the "exact" output
# computed by lsim2. This is what the discrete approximation
# actually approximates.
ymid = 0.5 * (yout[:-1] + yout[1:])
assert_allclose(yd2.ravel(), ymid, rtol=1e-4)
def test_simo_tf(self):
# See gh-5753
tf = ([[1, 0], [1, 1]], [1, 1])
num, den, dt = c2d(tf, 0.01)
assert_equal(dt, 0.01) # sanity check
assert_allclose(den, [1, -0.990404983], rtol=1e-3)
assert_allclose(num, [[1, -1], [1, -0.99004983]], rtol=1e-3)
def test_multioutput(self):
ts = 0.01 # time step
tf = ([[1, -3], [1, 5]], [1, 1])
num, den, dt = c2d(tf, ts)
tf1 = (tf[0][0], tf[1])
num1, den1, dt1 = c2d(tf1, ts)
tf2 = (tf[0][1], tf[1])
num2, den2, dt2 = c2d(tf2, ts)
# Sanity checks
assert_equal(dt, dt1)
assert_equal(dt, dt2)
# Check that we get the same results
assert_allclose(num, np.vstack((num1, num2)), rtol=1e-13)
# Single input, so the denominator should
# not be multidimensional like the numerator
assert_allclose(den, den1, rtol=1e-13)
assert_allclose(den, den2, rtol=1e-13)
class TestC2dLti(object):
def test_c2d_ss(self):
# StateSpace
A = np.array([[-0.3, 0.1], [0.2, -0.7]])
B = np.array([[0], [1]])
C = np.array([[1, 0]])
D = 0
A_res = np.array([[0.985136404135682, 0.004876671474795],
[0.009753342949590, 0.965629718236502]])
B_res = np.array([[0.000122937599964], [0.049135527547844]])
sys_ssc = lti(A, B, C, D)
sys_ssd = sys_ssc.to_discrete(0.05)
assert_allclose(sys_ssd.A, A_res)
assert_allclose(sys_ssd.B, B_res)
assert_allclose(sys_ssd.C, C)
assert_allclose(sys_ssd.D, D)
def test_c2d_tf(self):
sys = lti([0.5, 0.3], [1.0, 0.4])
sys = sys.to_discrete(0.005)
# Matlab results
num_res = np.array([0.5, -0.485149004980066])
den_res = np.array([1.0, -0.980198673306755])
# Somehow a lot of numerical errors
assert_allclose(sys.den, den_res, atol=0.02)
assert_allclose(sys.num, num_res, atol=0.02)
class TestC2dInvariants:
# Some test cases for checking the invariances.
# Array of triplets: (system, sample time, number of samples)
cases = [
(tf2ss([1, 1], [1, 1.5, 1]), 0.25, 10),
(tf2ss([1, 2], [1, 1.5, 3, 1]), 0.5, 10),
(tf2ss(0.1, [1, 1, 2, 1]), 0.5, 10),
]
# Some options for lsim2 and derived routines
tolerances = {'rtol': 1e-9, 'atol': 1e-11}
# Check that systems discretized with the impulse-invariant
# method really hold the invariant
@pytest.mark.parametrize("sys,sample_time,samples_number", cases)
def test_impulse_invariant(self, sys, sample_time, samples_number):
time = np.arange(samples_number) * sample_time
_, yout_cont = impulse2(sys, T=time, **self.tolerances)
_, yout_disc = dimpulse(c2d(sys, sample_time, method='impulse'),
n=len(time))
assert_allclose(sample_time * yout_cont.ravel(), yout_disc[0].ravel())
# Step invariant should hold for ZOH discretized systems
@pytest.mark.parametrize("sys,sample_time,samples_number", cases)
def test_step_invariant(self, sys, sample_time, samples_number):
time = np.arange(samples_number) * sample_time
_, yout_cont = step2(sys, T=time, **self.tolerances)
_, yout_disc = dstep(c2d(sys, sample_time, method='zoh'), n=len(time))
assert_allclose(yout_cont.ravel(), yout_disc[0].ravel())
# Linear invariant should hold for FOH discretized systems
@pytest.mark.parametrize("sys,sample_time,samples_number", cases)
def test_linear_invariant(self, sys, sample_time, samples_number):
time = np.arange(samples_number) * sample_time
_, yout_cont, _ = lsim2(sys, T=time, U=time, **self.tolerances)
_, yout_disc, _ = dlsim(c2d(sys, sample_time, method='foh'), u=time)
assert_allclose(yout_cont.ravel(), yout_disc.ravel())

View file

@ -0,0 +1,598 @@
# Author: Jeffrey Armstrong <jeff@approximatrix.com>
# April 4, 2011
import numpy as np
from numpy.testing import (assert_equal,
assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_, assert_almost_equal,
suppress_warnings)
from pytest import raises as assert_raises
from scipy.signal import (dlsim, dstep, dimpulse, tf2zpk, lti, dlti,
StateSpace, TransferFunction, ZerosPolesGain,
dfreqresp, dbode, BadCoefficients)
class TestDLTI(object):
def test_dlsim(self):
a = np.asarray([[0.9, 0.1], [-0.2, 0.9]])
b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]])
c = np.asarray([[0.1, 0.3]])
d = np.asarray([[0.0, -0.1, 0.0]])
dt = 0.5
# Create an input matrix with inputs down the columns (3 cols) and its
# respective time input vector
u = np.hstack((np.linspace(0, 4.0, num=5)[:, np.newaxis],
np.full((5, 1), 0.01),
np.full((5, 1), -0.002)))
t_in = np.linspace(0, 2.0, num=5)
# Define the known result
yout_truth = np.array([[-0.001,
-0.00073,
0.039446,
0.0915387,
0.13195948]]).T
xout_truth = np.asarray([[0, 0],
[0.0012, 0.0005],
[0.40233, 0.00071],
[1.163368, -0.079327],
[2.2402985, -0.3035679]])
tout, yout, xout = dlsim((a, b, c, d, dt), u, t_in)
assert_array_almost_equal(yout_truth, yout)
assert_array_almost_equal(xout_truth, xout)
assert_array_almost_equal(t_in, tout)
# Make sure input with single-dimension doesn't raise error
dlsim((1, 2, 3), 4)
# Interpolated control - inputs should have different time steps
# than the discrete model uses internally
u_sparse = u[[0, 4], :]
t_sparse = np.asarray([0.0, 2.0])
tout, yout, xout = dlsim((a, b, c, d, dt), u_sparse, t_sparse)
assert_array_almost_equal(yout_truth, yout)
assert_array_almost_equal(xout_truth, xout)
assert_equal(len(tout), yout.shape[0])
# Transfer functions (assume dt = 0.5)
num = np.asarray([1.0, -0.1])
den = np.asarray([0.3, 1.0, 0.2])
yout_truth = np.array([[0.0,
0.0,
3.33333333333333,
-4.77777777777778,
23.0370370370370]]).T
# Assume use of the first column of the control input built earlier
tout, yout = dlsim((num, den, 0.5), u[:, 0], t_in)
assert_array_almost_equal(yout, yout_truth)
assert_array_almost_equal(t_in, tout)
# Retest the same with a 1-D input vector
uflat = np.asarray(u[:, 0])
uflat = uflat.reshape((5,))
tout, yout = dlsim((num, den, 0.5), uflat, t_in)
assert_array_almost_equal(yout, yout_truth)
assert_array_almost_equal(t_in, tout)
# zeros-poles-gain representation
zd = np.array([0.5, -0.5])
pd = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)])
k = 1.0
yout_truth = np.array([[0.0, 1.0, 2.0, 2.25, 2.5]]).T
tout, yout = dlsim((zd, pd, k, 0.5), u[:, 0], t_in)
assert_array_almost_equal(yout, yout_truth)
assert_array_almost_equal(t_in, tout)
# Raise an error for continuous-time systems
system = lti([1], [1, 1])
assert_raises(AttributeError, dlsim, system, u)
def test_dstep(self):
a = np.asarray([[0.9, 0.1], [-0.2, 0.9]])
b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]])
c = np.asarray([[0.1, 0.3]])
d = np.asarray([[0.0, -0.1, 0.0]])
dt = 0.5
# Because b.shape[1] == 3, dstep should result in a tuple of three
# result vectors
yout_step_truth = (np.asarray([0.0, 0.04, 0.052, 0.0404, 0.00956,
-0.036324, -0.093318, -0.15782348,
-0.226628324, -0.2969374948]),
np.asarray([-0.1, -0.075, -0.058, -0.04815,
-0.04453, -0.0461895, -0.0521812,
-0.061588875, -0.073549579,
-0.08727047595]),
np.asarray([0.0, -0.01, -0.013, -0.0101, -0.00239,
0.009081, 0.0233295, 0.03945587,
0.056657081, 0.0742343737]))
tout, yout = dstep((a, b, c, d, dt), n=10)
assert_equal(len(yout), 3)
for i in range(0, len(yout)):
assert_equal(yout[i].shape[0], 10)
assert_array_almost_equal(yout[i].flatten(), yout_step_truth[i])
# Check that the other two inputs (tf, zpk) will work as well
tfin = ([1.0], [1.0, 1.0], 0.5)
yout_tfstep = np.asarray([0.0, 1.0, 0.0])
tout, yout = dstep(tfin, n=3)
assert_equal(len(yout), 1)
assert_array_almost_equal(yout[0].flatten(), yout_tfstep)
zpkin = tf2zpk(tfin[0], tfin[1]) + (0.5,)
tout, yout = dstep(zpkin, n=3)
assert_equal(len(yout), 1)
assert_array_almost_equal(yout[0].flatten(), yout_tfstep)
# Raise an error for continuous-time systems
system = lti([1], [1, 1])
assert_raises(AttributeError, dstep, system)
def test_dimpulse(self):
a = np.asarray([[0.9, 0.1], [-0.2, 0.9]])
b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]])
c = np.asarray([[0.1, 0.3]])
d = np.asarray([[0.0, -0.1, 0.0]])
dt = 0.5
# Because b.shape[1] == 3, dimpulse should result in a tuple of three
# result vectors
yout_imp_truth = (np.asarray([0.0, 0.04, 0.012, -0.0116, -0.03084,
-0.045884, -0.056994, -0.06450548,
-0.068804844, -0.0703091708]),
np.asarray([-0.1, 0.025, 0.017, 0.00985, 0.00362,
-0.0016595, -0.0059917, -0.009407675,
-0.011960704, -0.01372089695]),
np.asarray([0.0, -0.01, -0.003, 0.0029, 0.00771,
0.011471, 0.0142485, 0.01612637,
0.017201211, 0.0175772927]))
tout, yout = dimpulse((a, b, c, d, dt), n=10)
assert_equal(len(yout), 3)
for i in range(0, len(yout)):
assert_equal(yout[i].shape[0], 10)
assert_array_almost_equal(yout[i].flatten(), yout_imp_truth[i])
# Check that the other two inputs (tf, zpk) will work as well
tfin = ([1.0], [1.0, 1.0], 0.5)
yout_tfimpulse = np.asarray([0.0, 1.0, -1.0])
tout, yout = dimpulse(tfin, n=3)
assert_equal(len(yout), 1)
assert_array_almost_equal(yout[0].flatten(), yout_tfimpulse)
zpkin = tf2zpk(tfin[0], tfin[1]) + (0.5,)
tout, yout = dimpulse(zpkin, n=3)
assert_equal(len(yout), 1)
assert_array_almost_equal(yout[0].flatten(), yout_tfimpulse)
# Raise an error for continuous-time systems
system = lti([1], [1, 1])
assert_raises(AttributeError, dimpulse, system)
def test_dlsim_trivial(self):
a = np.array([[0.0]])
b = np.array([[0.0]])
c = np.array([[0.0]])
d = np.array([[0.0]])
n = 5
u = np.zeros(n).reshape(-1, 1)
tout, yout, xout = dlsim((a, b, c, d, 1), u)
assert_array_equal(tout, np.arange(float(n)))
assert_array_equal(yout, np.zeros((n, 1)))
assert_array_equal(xout, np.zeros((n, 1)))
def test_dlsim_simple1d(self):
a = np.array([[0.5]])
b = np.array([[0.0]])
c = np.array([[1.0]])
d = np.array([[0.0]])
n = 5
u = np.zeros(n).reshape(-1, 1)
tout, yout, xout = dlsim((a, b, c, d, 1), u, x0=1)
assert_array_equal(tout, np.arange(float(n)))
expected = (0.5 ** np.arange(float(n))).reshape(-1, 1)
assert_array_equal(yout, expected)
assert_array_equal(xout, expected)
def test_dlsim_simple2d(self):
lambda1 = 0.5
lambda2 = 0.25
a = np.array([[lambda1, 0.0],
[0.0, lambda2]])
b = np.array([[0.0],
[0.0]])
c = np.array([[1.0, 0.0],
[0.0, 1.0]])
d = np.array([[0.0],
[0.0]])
n = 5
u = np.zeros(n).reshape(-1, 1)
tout, yout, xout = dlsim((a, b, c, d, 1), u, x0=1)
assert_array_equal(tout, np.arange(float(n)))
# The analytical solution:
expected = (np.array([lambda1, lambda2]) **
np.arange(float(n)).reshape(-1, 1))
assert_array_equal(yout, expected)
assert_array_equal(xout, expected)
def test_more_step_and_impulse(self):
lambda1 = 0.5
lambda2 = 0.75
a = np.array([[lambda1, 0.0],
[0.0, lambda2]])
b = np.array([[1.0, 0.0],
[0.0, 1.0]])
c = np.array([[1.0, 1.0]])
d = np.array([[0.0, 0.0]])
n = 10
# Check a step response.
ts, ys = dstep((a, b, c, d, 1), n=n)
# Create the exact step response.
stp0 = (1.0 / (1 - lambda1)) * (1.0 - lambda1 ** np.arange(n))
stp1 = (1.0 / (1 - lambda2)) * (1.0 - lambda2 ** np.arange(n))
assert_allclose(ys[0][:, 0], stp0)
assert_allclose(ys[1][:, 0], stp1)
# Check an impulse response with an initial condition.
x0 = np.array([1.0, 1.0])
ti, yi = dimpulse((a, b, c, d, 1), n=n, x0=x0)
# Create the exact impulse response.
imp = (np.array([lambda1, lambda2]) **
np.arange(-1, n + 1).reshape(-1, 1))
imp[0, :] = 0.0
# Analytical solution to impulse response
y0 = imp[:n, 0] + np.dot(imp[1:n + 1, :], x0)
y1 = imp[:n, 1] + np.dot(imp[1:n + 1, :], x0)
assert_allclose(yi[0][:, 0], y0)
assert_allclose(yi[1][:, 0], y1)
# Check that dt=0.1, n=3 gives 3 time values.
system = ([1.0], [1.0, -0.5], 0.1)
t, (y,) = dstep(system, n=3)
assert_allclose(t, [0, 0.1, 0.2])
assert_array_equal(y.T, [[0, 1.0, 1.5]])
t, (y,) = dimpulse(system, n=3)
assert_allclose(t, [0, 0.1, 0.2])
assert_array_equal(y.T, [[0, 1, 0.5]])
class TestDlti(object):
def test_dlti_instantiation(self):
# Test that lti can be instantiated.
dt = 0.05
# TransferFunction
s = dlti([1], [-1], dt=dt)
assert_(isinstance(s, TransferFunction))
assert_(isinstance(s, dlti))
assert_(not isinstance(s, lti))
assert_equal(s.dt, dt)
# ZerosPolesGain
s = dlti(np.array([]), np.array([-1]), 1, dt=dt)
assert_(isinstance(s, ZerosPolesGain))
assert_(isinstance(s, dlti))
assert_(not isinstance(s, lti))
assert_equal(s.dt, dt)
# StateSpace
s = dlti([1], [-1], 1, 3, dt=dt)
assert_(isinstance(s, StateSpace))
assert_(isinstance(s, dlti))
assert_(not isinstance(s, lti))
assert_equal(s.dt, dt)
# Number of inputs
assert_raises(ValueError, dlti, 1)
assert_raises(ValueError, dlti, 1, 1, 1, 1, 1)
class TestStateSpaceDisc(object):
def test_initialization(self):
# Check that all initializations work
dt = 0.05
StateSpace(1, 1, 1, 1, dt=dt)
StateSpace([1], [2], [3], [4], dt=dt)
StateSpace(np.array([[1, 2], [3, 4]]), np.array([[1], [2]]),
np.array([[1, 0]]), np.array([[0]]), dt=dt)
StateSpace(1, 1, 1, 1, dt=True)
def test_conversion(self):
# Check the conversion functions
s = StateSpace(1, 2, 3, 4, dt=0.05)
assert_(isinstance(s.to_ss(), StateSpace))
assert_(isinstance(s.to_tf(), TransferFunction))
assert_(isinstance(s.to_zpk(), ZerosPolesGain))
# Make sure copies work
assert_(StateSpace(s) is not s)
assert_(s.to_ss() is not s)
def test_properties(self):
# Test setters/getters for cross class properties.
# This implicitly tests to_tf() and to_zpk()
# Getters
s = StateSpace(1, 1, 1, 1, dt=0.05)
assert_equal(s.poles, [1])
assert_equal(s.zeros, [0])
class TestTransferFunction(object):
def test_initialization(self):
# Check that all initializations work
dt = 0.05
TransferFunction(1, 1, dt=dt)
TransferFunction([1], [2], dt=dt)
TransferFunction(np.array([1]), np.array([2]), dt=dt)
TransferFunction(1, 1, dt=True)
def test_conversion(self):
# Check the conversion functions
s = TransferFunction([1, 0], [1, -1], dt=0.05)
assert_(isinstance(s.to_ss(), StateSpace))
assert_(isinstance(s.to_tf(), TransferFunction))
assert_(isinstance(s.to_zpk(), ZerosPolesGain))
# Make sure copies work
assert_(TransferFunction(s) is not s)
assert_(s.to_tf() is not s)
def test_properties(self):
# Test setters/getters for cross class properties.
# This implicitly tests to_ss() and to_zpk()
# Getters
s = TransferFunction([1, 0], [1, -1], dt=0.05)
assert_equal(s.poles, [1])
assert_equal(s.zeros, [0])
class TestZerosPolesGain(object):
def test_initialization(self):
# Check that all initializations work
dt = 0.05
ZerosPolesGain(1, 1, 1, dt=dt)
ZerosPolesGain([1], [2], 1, dt=dt)
ZerosPolesGain(np.array([1]), np.array([2]), 1, dt=dt)
ZerosPolesGain(1, 1, 1, dt=True)
def test_conversion(self):
# Check the conversion functions
s = ZerosPolesGain(1, 2, 3, dt=0.05)
assert_(isinstance(s.to_ss(), StateSpace))
assert_(isinstance(s.to_tf(), TransferFunction))
assert_(isinstance(s.to_zpk(), ZerosPolesGain))
# Make sure copies work
assert_(ZerosPolesGain(s) is not s)
assert_(s.to_zpk() is not s)
class Test_dfreqresp(object):
def test_manual(self):
# Test dfreqresp() real part calculation (manual sanity check).
# 1st order low-pass filter: H(z) = 1 / (z - 0.2),
system = TransferFunction(1, [1, -0.2], dt=0.1)
w = [0.1, 1, 10]
w, H = dfreqresp(system, w=w)
# test real
expected_re = [1.2383, 0.4130, -0.7553]
assert_almost_equal(H.real, expected_re, decimal=4)
# test imag
expected_im = [-0.1555, -1.0214, 0.3955]
assert_almost_equal(H.imag, expected_im, decimal=4)
def test_auto(self):
# Test dfreqresp() real part calculation.
# 1st order low-pass filter: H(z) = 1 / (z - 0.2),
system = TransferFunction(1, [1, -0.2], dt=0.1)
w = [0.1, 1, 10, 100]
w, H = dfreqresp(system, w=w)
jw = np.exp(w * 1j)
y = np.polyval(system.num, jw) / np.polyval(system.den, jw)
# test real
expected_re = y.real
assert_almost_equal(H.real, expected_re)
# test imag
expected_im = y.imag
assert_almost_equal(H.imag, expected_im)
def test_freq_range(self):
# Test that freqresp() finds a reasonable frequency range.
# 1st order low-pass filter: H(z) = 1 / (z - 0.2),
# Expected range is from 0.01 to 10.
system = TransferFunction(1, [1, -0.2], dt=0.1)
n = 10
expected_w = np.linspace(0, np.pi, 10, endpoint=False)
w, H = dfreqresp(system, n=n)
assert_almost_equal(w, expected_w)
def test_pole_one(self):
# Test that freqresp() doesn't fail on a system with a pole at 0.
# integrator, pole at zero: H(s) = 1 / s
system = TransferFunction([1], [1, -1], dt=0.1)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, message="divide by zero")
sup.filter(RuntimeWarning, message="invalid value encountered")
w, H = dfreqresp(system, n=2)
assert_equal(w[0], 0.) # a fail would give not-a-number
def test_error(self):
# Raise an error for continuous-time systems
system = lti([1], [1, 1])
assert_raises(AttributeError, dfreqresp, system)
def test_from_state_space(self):
# H(z) = 2 / z^3 - 0.5 * z^2
system_TF = dlti([2], [1, -0.5, 0, 0])
A = np.array([[0.5, 0, 0],
[1, 0, 0],
[0, 1, 0]])
B = np.array([[1, 0, 0]]).T
C = np.array([[0, 0, 2]])
D = 0
system_SS = dlti(A, B, C, D)
w = 10.0**np.arange(-3,0,.5)
with suppress_warnings() as sup:
sup.filter(BadCoefficients)
w1, H1 = dfreqresp(system_TF, w=w)
w2, H2 = dfreqresp(system_SS, w=w)
assert_almost_equal(H1, H2)
def test_from_zpk(self):
# 1st order low-pass filter: H(s) = 0.3 / (z - 0.2),
system_ZPK = dlti([],[0.2],0.3)
system_TF = dlti(0.3, [1, -0.2])
w = [0.1, 1, 10, 100]
w1, H1 = dfreqresp(system_ZPK, w=w)
w2, H2 = dfreqresp(system_TF, w=w)
assert_almost_equal(H1, H2)
class Test_bode(object):
def test_manual(self):
# Test bode() magnitude calculation (manual sanity check).
# 1st order low-pass filter: H(s) = 0.3 / (z - 0.2),
dt = 0.1
system = TransferFunction(0.3, [1, -0.2], dt=dt)
w = [0.1, 0.5, 1, np.pi]
w2, mag, phase = dbode(system, w=w)
# Test mag
expected_mag = [-8.5329, -8.8396, -9.6162, -12.0412]
assert_almost_equal(mag, expected_mag, decimal=4)
# Test phase
expected_phase = [-7.1575, -35.2814, -67.9809, -180.0000]
assert_almost_equal(phase, expected_phase, decimal=4)
# Test frequency
assert_equal(np.array(w) / dt, w2)
def test_auto(self):
# Test bode() magnitude calculation.
# 1st order low-pass filter: H(s) = 0.3 / (z - 0.2),
system = TransferFunction(0.3, [1, -0.2], dt=0.1)
w = np.array([0.1, 0.5, 1, np.pi])
w2, mag, phase = dbode(system, w=w)
jw = np.exp(w * 1j)
y = np.polyval(system.num, jw) / np.polyval(system.den, jw)
# Test mag
expected_mag = 20.0 * np.log10(abs(y))
assert_almost_equal(mag, expected_mag)
# Test phase
expected_phase = np.rad2deg(np.angle(y))
assert_almost_equal(phase, expected_phase)
def test_range(self):
# Test that bode() finds a reasonable frequency range.
# 1st order low-pass filter: H(s) = 0.3 / (z - 0.2),
dt = 0.1
system = TransferFunction(0.3, [1, -0.2], dt=0.1)
n = 10
# Expected range is from 0.01 to 10.
expected_w = np.linspace(0, np.pi, n, endpoint=False) / dt
w, mag, phase = dbode(system, n=n)
assert_almost_equal(w, expected_w)
def test_pole_one(self):
# Test that freqresp() doesn't fail on a system with a pole at 0.
# integrator, pole at zero: H(s) = 1 / s
system = TransferFunction([1], [1, -1], dt=0.1)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, message="divide by zero")
sup.filter(RuntimeWarning, message="invalid value encountered")
w, mag, phase = dbode(system, n=2)
assert_equal(w[0], 0.) # a fail would give not-a-number
def test_imaginary(self):
# bode() should not fail on a system with pure imaginary poles.
# The test passes if bode doesn't raise an exception.
system = TransferFunction([1], [1, 0, 100], dt=0.1)
dbode(system, n=2)
def test_error(self):
# Raise an error for continuous-time systems
system = lti([1], [1, 1])
assert_raises(AttributeError, dbode, system)
class TestTransferFunctionZConversion(object):
"""Test private conversions between 'z' and 'z**-1' polynomials."""
def test_full(self):
# Numerator and denominator same order
num = [2, 3, 4]
den = [5, 6, 7]
num2, den2 = TransferFunction._z_to_zinv(num, den)
assert_equal(num, num2)
assert_equal(den, den2)
num2, den2 = TransferFunction._zinv_to_z(num, den)
assert_equal(num, num2)
assert_equal(den, den2)
def test_numerator(self):
# Numerator lower order than denominator
num = [2, 3]
den = [5, 6, 7]
num2, den2 = TransferFunction._z_to_zinv(num, den)
assert_equal([0, 2, 3], num2)
assert_equal(den, den2)
num2, den2 = TransferFunction._zinv_to_z(num, den)
assert_equal([2, 3, 0], num2)
assert_equal(den, den2)
def test_denominator(self):
# Numerator higher order than denominator
num = [2, 3, 4]
den = [5, 6]
num2, den2 = TransferFunction._z_to_zinv(num, den)
assert_equal(num, num2)
assert_equal([0, 5, 6], den2)
num2, den2 = TransferFunction._zinv_to_z(num, den)
assert_equal(num, num2)
assert_equal([5, 6, 0], den2)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,641 @@
import numpy as np
from numpy.testing import (assert_almost_equal, assert_array_almost_equal,
assert_equal, assert_,
assert_allclose, assert_warns)
from pytest import raises as assert_raises
import pytest
from scipy.fft import fft
from scipy.special import sinc
from scipy.signal import kaiser_beta, kaiser_atten, kaiserord, \
firwin, firwin2, freqz, remez, firls, minimum_phase
def test_kaiser_beta():
b = kaiser_beta(58.7)
assert_almost_equal(b, 0.1102 * 50.0)
b = kaiser_beta(22.0)
assert_almost_equal(b, 0.5842 + 0.07886)
b = kaiser_beta(21.0)
assert_equal(b, 0.0)
b = kaiser_beta(10.0)
assert_equal(b, 0.0)
def test_kaiser_atten():
a = kaiser_atten(1, 1.0)
assert_equal(a, 7.95)
a = kaiser_atten(2, 1/np.pi)
assert_equal(a, 2.285 + 7.95)
def test_kaiserord():
assert_raises(ValueError, kaiserord, 1.0, 1.0)
numtaps, beta = kaiserord(2.285 + 7.95 - 0.001, 1/np.pi)
assert_equal((numtaps, beta), (2, 0.0))
class TestFirwin(object):
def check_response(self, h, expected_response, tol=.05):
N = len(h)
alpha = 0.5 * (N-1)
m = np.arange(0,N) - alpha # time indices of taps
for freq, expected in expected_response:
actual = abs(np.sum(h*np.exp(-1.j*np.pi*m*freq)))
mse = abs(actual-expected)**2
assert_(mse < tol, 'response not as expected, mse=%g > %g'
% (mse, tol))
def test_response(self):
N = 51
f = .5
# increase length just to try even/odd
h = firwin(N, f) # low-pass from 0 to f
self.check_response(h, [(.25,1), (.75,0)])
h = firwin(N+1, f, window='nuttall') # specific window
self.check_response(h, [(.25,1), (.75,0)])
h = firwin(N+2, f, pass_zero=False) # stop from 0 to f --> high-pass
self.check_response(h, [(.25,0), (.75,1)])
f1, f2, f3, f4 = .2, .4, .6, .8
h = firwin(N+3, [f1, f2], pass_zero=False) # band-pass filter
self.check_response(h, [(.1,0), (.3,1), (.5,0)])
h = firwin(N+4, [f1, f2]) # band-stop filter
self.check_response(h, [(.1,1), (.3,0), (.5,1)])
h = firwin(N+5, [f1, f2, f3, f4], pass_zero=False, scale=False)
self.check_response(h, [(.1,0), (.3,1), (.5,0), (.7,1), (.9,0)])
h = firwin(N+6, [f1, f2, f3, f4]) # multiband filter
self.check_response(h, [(.1,1), (.3,0), (.5,1), (.7,0), (.9,1)])
h = firwin(N+7, 0.1, width=.03) # low-pass
self.check_response(h, [(.05,1), (.75,0)])
h = firwin(N+8, 0.1, pass_zero=False) # high-pass
self.check_response(h, [(.05,0), (.75,1)])
def mse(self, h, bands):
"""Compute mean squared error versus ideal response across frequency
band.
h -- coefficients
bands -- list of (left, right) tuples relative to 1==Nyquist of
passbands
"""
w, H = freqz(h, worN=1024)
f = w/np.pi
passIndicator = np.zeros(len(w), bool)
for left, right in bands:
passIndicator |= (f >= left) & (f < right)
Hideal = np.where(passIndicator, 1, 0)
mse = np.mean(abs(abs(H)-Hideal)**2)
return mse
def test_scaling(self):
"""
For one lowpass, bandpass, and highpass example filter, this test
checks two things:
- the mean squared error over the frequency domain of the unscaled
filter is smaller than the scaled filter (true for rectangular
window)
- the response of the scaled filter is exactly unity at the center
of the first passband
"""
N = 11
cases = [
([.5], True, (0, 1)),
([0.2, .6], False, (.4, 1)),
([.5], False, (1, 1)),
]
for cutoff, pass_zero, expected_response in cases:
h = firwin(N, cutoff, scale=False, pass_zero=pass_zero, window='ones')
hs = firwin(N, cutoff, scale=True, pass_zero=pass_zero, window='ones')
if len(cutoff) == 1:
if pass_zero:
cutoff = [0] + cutoff
else:
cutoff = cutoff + [1]
assert_(self.mse(h, [cutoff]) < self.mse(hs, [cutoff]),
'least squares violation')
self.check_response(hs, [expected_response], 1e-12)
class TestFirWinMore(object):
"""Different author, different style, different tests..."""
def test_lowpass(self):
width = 0.04
ntaps, beta = kaiserord(120, width)
kwargs = dict(cutoff=0.5, window=('kaiser', beta), scale=False)
taps = firwin(ntaps, **kwargs)
# Check the symmetry of taps.
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
# Check the gain at a few samples where we know it should be approximately 0 or 1.
freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2, 0.75, 1.0])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5)
taps_str = firwin(ntaps, pass_zero='lowpass', **kwargs)
assert_allclose(taps, taps_str)
def test_highpass(self):
width = 0.04
ntaps, beta = kaiserord(120, width)
# Ensure that ntaps is odd.
ntaps |= 1
kwargs = dict(cutoff=0.5, window=('kaiser', beta), scale=False)
taps = firwin(ntaps, pass_zero=False, **kwargs)
# Check the symmetry of taps.
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
# Check the gain at a few samples where we know it should be approximately 0 or 1.
freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2, 0.75, 1.0])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5)
taps_str = firwin(ntaps, pass_zero='highpass', **kwargs)
assert_allclose(taps, taps_str)
def test_bandpass(self):
width = 0.04
ntaps, beta = kaiserord(120, width)
kwargs = dict(cutoff=[0.3, 0.7], window=('kaiser', beta), scale=False)
taps = firwin(ntaps, pass_zero=False, **kwargs)
# Check the symmetry of taps.
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
# Check the gain at a few samples where we know it should be approximately 0 or 1.
freq_samples = np.array([0.0, 0.2, 0.3-width/2, 0.3+width/2, 0.5,
0.7-width/2, 0.7+width/2, 0.8, 1.0])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5)
taps_str = firwin(ntaps, pass_zero='bandpass', **kwargs)
assert_allclose(taps, taps_str)
def test_bandstop_multi(self):
width = 0.04
ntaps, beta = kaiserord(120, width)
kwargs = dict(cutoff=[0.2, 0.5, 0.8], window=('kaiser', beta),
scale=False)
taps = firwin(ntaps, **kwargs)
# Check the symmetry of taps.
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
# Check the gain at a few samples where we know it should be approximately 0 or 1.
freq_samples = np.array([0.0, 0.1, 0.2-width/2, 0.2+width/2, 0.35,
0.5-width/2, 0.5+width/2, 0.65,
0.8-width/2, 0.8+width/2, 0.9, 1.0])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0],
decimal=5)
taps_str = firwin(ntaps, pass_zero='bandstop', **kwargs)
assert_allclose(taps, taps_str)
def test_fs_nyq(self):
"""Test the fs and nyq keywords."""
nyquist = 1000
width = 40.0
relative_width = width/nyquist
ntaps, beta = kaiserord(120, relative_width)
taps = firwin(ntaps, cutoff=[300, 700], window=('kaiser', beta),
pass_zero=False, scale=False, fs=2*nyquist)
# Check the symmetry of taps.
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
# Check the gain at a few samples where we know it should be approximately 0 or 1.
freq_samples = np.array([0.0, 200, 300-width/2, 300+width/2, 500,
700-width/2, 700+width/2, 800, 1000])
freqs, response = freqz(taps, worN=np.pi*freq_samples/nyquist)
assert_array_almost_equal(np.abs(response),
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5)
taps2 = firwin(ntaps, cutoff=[300, 700], window=('kaiser', beta),
pass_zero=False, scale=False, nyq=nyquist)
assert_allclose(taps2, taps)
def test_bad_cutoff(self):
"""Test that invalid cutoff argument raises ValueError."""
# cutoff values must be greater than 0 and less than 1.
assert_raises(ValueError, firwin, 99, -0.5)
assert_raises(ValueError, firwin, 99, 1.5)
# Don't allow 0 or 1 in cutoff.
assert_raises(ValueError, firwin, 99, [0, 0.5])
assert_raises(ValueError, firwin, 99, [0.5, 1])
# cutoff values must be strictly increasing.
assert_raises(ValueError, firwin, 99, [0.1, 0.5, 0.2])
assert_raises(ValueError, firwin, 99, [0.1, 0.5, 0.5])
# Must have at least one cutoff value.
assert_raises(ValueError, firwin, 99, [])
# 2D array not allowed.
assert_raises(ValueError, firwin, 99, [[0.1, 0.2],[0.3, 0.4]])
# cutoff values must be less than nyq.
assert_raises(ValueError, firwin, 99, 50.0, nyq=40)
assert_raises(ValueError, firwin, 99, [10, 20, 30], nyq=25)
assert_raises(ValueError, firwin, 99, 50.0, fs=80)
assert_raises(ValueError, firwin, 99, [10, 20, 30], fs=50)
def test_even_highpass_raises_value_error(self):
"""Test that attempt to create a highpass filter with an even number
of taps raises a ValueError exception."""
assert_raises(ValueError, firwin, 40, 0.5, pass_zero=False)
assert_raises(ValueError, firwin, 40, [.25, 0.5])
def test_bad_pass_zero(self):
"""Test degenerate pass_zero cases."""
with assert_raises(ValueError, match='pass_zero must be'):
firwin(41, 0.5, pass_zero='foo')
with assert_raises(TypeError, match='cannot be interpreted'):
firwin(41, 0.5, pass_zero=1.)
for pass_zero in ('lowpass', 'highpass'):
with assert_raises(ValueError, match='cutoff must have one'):
firwin(41, [0.5, 0.6], pass_zero=pass_zero)
for pass_zero in ('bandpass', 'bandstop'):
with assert_raises(ValueError, match='must have at least two'):
firwin(41, [0.5], pass_zero=pass_zero)
class TestFirwin2(object):
def test_invalid_args(self):
# `freq` and `gain` have different lengths.
with assert_raises(ValueError, match='must be of same length'):
firwin2(50, [0, 0.5, 1], [0.0, 1.0])
# `nfreqs` is less than `ntaps`.
with assert_raises(ValueError, match='ntaps must be less than nfreqs'):
firwin2(50, [0, 0.5, 1], [0.0, 1.0, 1.0], nfreqs=33)
# Decreasing value in `freq`
with assert_raises(ValueError, match='must be nondecreasing'):
firwin2(50, [0, 0.5, 0.4, 1.0], [0, .25, .5, 1.0])
# Value in `freq` repeated more than once.
with assert_raises(ValueError, match='must not occur more than twice'):
firwin2(50, [0, .1, .1, .1, 1.0], [0.0, 0.5, 0.75, 1.0, 1.0])
# `freq` does not start at 0.0.
with assert_raises(ValueError, match='start with 0'):
firwin2(50, [0.5, 1.0], [0.0, 1.0])
# `freq` does not end at fs/2.
with assert_raises(ValueError, match='end with fs/2'):
firwin2(50, [0.0, 0.5], [0.0, 1.0])
# Value 0 is repeated in `freq`
with assert_raises(ValueError, match='0 must not be repeated'):
firwin2(50, [0.0, 0.0, 0.5, 1.0], [1.0, 1.0, 0.0, 0.0])
# Value fs/2 is repeated in `freq`
with assert_raises(ValueError, match='fs/2 must not be repeated'):
firwin2(50, [0.0, 0.5, 1.0, 1.0], [1.0, 1.0, 0.0, 0.0])
# Value in `freq` that is too close to a repeated number
with assert_raises(ValueError, match='cannot contain numbers '
'that are too close'):
firwin2(50, [0.0, 0.5 - np.finfo(float).eps * 0.5, 0.5, 0.5, 1.0],
[1.0, 1.0, 1.0, 0.0, 0.0])
# Type II filter, but the gain at nyquist frequency is not zero.
with assert_raises(ValueError, match='Type II filter'):
firwin2(16, [0.0, 0.5, 1.0], [0.0, 1.0, 1.0])
# Type III filter, but the gains at nyquist and zero rate are not zero.
with assert_raises(ValueError, match='Type III filter'):
firwin2(17, [0.0, 0.5, 1.0], [0.0, 1.0, 1.0], antisymmetric=True)
with assert_raises(ValueError, match='Type III filter'):
firwin2(17, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0], antisymmetric=True)
with assert_raises(ValueError, match='Type III filter'):
firwin2(17, [0.0, 0.5, 1.0], [1.0, 1.0, 1.0], antisymmetric=True)
# Type IV filter, but the gain at zero rate is not zero.
with assert_raises(ValueError, match='Type IV filter'):
firwin2(16, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0], antisymmetric=True)
def test01(self):
width = 0.04
beta = 12.0
ntaps = 400
# Filter is 1 from w=0 to w=0.5, then decreases linearly from 1 to 0 as w
# increases from w=0.5 to w=1 (w=1 is the Nyquist frequency).
freq = [0.0, 0.5, 1.0]
gain = [1.0, 1.0, 0.0]
taps = firwin2(ntaps, freq, gain, window=('kaiser', beta))
freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2,
0.75, 1.0-width/2])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[1.0, 1.0, 1.0, 1.0-width, 0.5, width], decimal=5)
def test02(self):
width = 0.04
beta = 12.0
# ntaps must be odd for positive gain at Nyquist.
ntaps = 401
# An ideal highpass filter.
freq = [0.0, 0.5, 0.5, 1.0]
gain = [0.0, 0.0, 1.0, 1.0]
taps = firwin2(ntaps, freq, gain, window=('kaiser', beta))
freq_samples = np.array([0.0, 0.25, 0.5-width, 0.5+width, 0.75, 1.0])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5)
def test03(self):
width = 0.02
ntaps, beta = kaiserord(120, width)
# ntaps must be odd for positive gain at Nyquist.
ntaps = int(ntaps) | 1
freq = [0.0, 0.4, 0.4, 0.5, 0.5, 1.0]
gain = [1.0, 1.0, 0.0, 0.0, 1.0, 1.0]
taps = firwin2(ntaps, freq, gain, window=('kaiser', beta))
freq_samples = np.array([0.0, 0.4-width, 0.4+width, 0.45,
0.5-width, 0.5+width, 0.75, 1.0])
freqs, response = freqz(taps, worN=np.pi*freq_samples)
assert_array_almost_equal(np.abs(response),
[1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5)
def test04(self):
"""Test firwin2 when window=None."""
ntaps = 5
# Ideal lowpass: gain is 1 on [0,0.5], and 0 on [0.5, 1.0]
freq = [0.0, 0.5, 0.5, 1.0]
gain = [1.0, 1.0, 0.0, 0.0]
taps = firwin2(ntaps, freq, gain, window=None, nfreqs=8193)
alpha = 0.5 * (ntaps - 1)
m = np.arange(0, ntaps) - alpha
h = 0.5 * sinc(0.5 * m)
assert_array_almost_equal(h, taps)
def test05(self):
"""Test firwin2 for calculating Type IV filters"""
ntaps = 1500
freq = [0.0, 1.0]
gain = [0.0, 1.0]
taps = firwin2(ntaps, freq, gain, window=None, antisymmetric=True)
assert_array_almost_equal(taps[: ntaps // 2], -taps[ntaps // 2:][::-1])
freqs, response = freqz(taps, worN=2048)
assert_array_almost_equal(abs(response), freqs / np.pi, decimal=4)
def test06(self):
"""Test firwin2 for calculating Type III filters"""
ntaps = 1501
freq = [0.0, 0.5, 0.55, 1.0]
gain = [0.0, 0.5, 0.0, 0.0]
taps = firwin2(ntaps, freq, gain, window=None, antisymmetric=True)
assert_equal(taps[ntaps // 2], 0.0)
assert_array_almost_equal(taps[: ntaps // 2], -taps[ntaps // 2 + 1:][::-1])
freqs, response1 = freqz(taps, worN=2048)
response2 = np.interp(freqs / np.pi, freq, gain)
assert_array_almost_equal(abs(response1), response2, decimal=3)
def test_fs_nyq(self):
taps1 = firwin2(80, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
taps2 = firwin2(80, [0.0, 30.0, 60.0], [1.0, 1.0, 0.0], fs=120.0)
assert_array_almost_equal(taps1, taps2)
taps2 = firwin2(80, [0.0, 30.0, 60.0], [1.0, 1.0, 0.0], nyq=60.0)
assert_array_almost_equal(taps1, taps2)
def test_tuple(self):
taps1 = firwin2(150, (0.0, 0.5, 0.5, 1.0), (1.0, 1.0, 0.0, 0.0))
taps2 = firwin2(150, [0.0, 0.5, 0.5, 1.0], [1.0, 1.0, 0.0, 0.0])
assert_array_almost_equal(taps1, taps2)
def test_input_modyfication(self):
freq1 = np.array([0.0, 0.5, 0.5, 1.0])
freq2 = np.array(freq1)
firwin2(80, freq1, [1.0, 1.0, 0.0, 0.0])
assert_equal(freq1, freq2)
class TestRemez(object):
def test_bad_args(self):
assert_raises(ValueError, remez, 11, [0.1, 0.4], [1], type='pooka')
def test_hilbert(self):
N = 11 # number of taps in the filter
a = 0.1 # width of the transition band
# design an unity gain hilbert bandpass filter from w to 0.5-w
h = remez(11, [a, 0.5-a], [1], type='hilbert')
# make sure the filter has correct # of taps
assert_(len(h) == N, "Number of Taps")
# make sure it is type III (anti-symmetric tap coefficients)
assert_array_almost_equal(h[:(N-1)//2], -h[:-(N-1)//2-1:-1])
# Since the requested response is symmetric, all even coefficients
# should be zero (or in this case really small)
assert_((abs(h[1::2]) < 1e-15).all(), "Even Coefficients Equal Zero")
# now check the frequency response
w, H = freqz(h, 1)
f = w/2/np.pi
Hmag = abs(H)
# should have a zero at 0 and pi (in this case close to zero)
assert_((Hmag[[0, -1]] < 0.02).all(), "Zero at zero and pi")
# check that the pass band is close to unity
idx = np.logical_and(f > a, f < 0.5-a)
assert_((abs(Hmag[idx] - 1) < 0.015).all(), "Pass Band Close To Unity")
def test_compare(self):
# test comparison to MATLAB
k = [0.024590270518440, -0.041314581814658, -0.075943803756711,
-0.003530911231040, 0.193140296954975, 0.373400753484939,
0.373400753484939, 0.193140296954975, -0.003530911231040,
-0.075943803756711, -0.041314581814658, 0.024590270518440]
h = remez(12, [0, 0.3, 0.5, 1], [1, 0], Hz=2.)
assert_allclose(h, k)
h = remez(12, [0, 0.3, 0.5, 1], [1, 0], fs=2.)
assert_allclose(h, k)
h = [-0.038976016082299, 0.018704846485491, -0.014644062687875,
0.002879152556419, 0.016849978528150, -0.043276706138248,
0.073641298245579, -0.103908158578635, 0.129770906801075,
-0.147163447297124, 0.153302248456347, -0.147163447297124,
0.129770906801075, -0.103908158578635, 0.073641298245579,
-0.043276706138248, 0.016849978528150, 0.002879152556419,
-0.014644062687875, 0.018704846485491, -0.038976016082299]
assert_allclose(remez(21, [0, 0.8, 0.9, 1], [0, 1], Hz=2.), h)
assert_allclose(remez(21, [0, 0.8, 0.9, 1], [0, 1], fs=2.), h)
class TestFirls(object):
def test_bad_args(self):
# even numtaps
assert_raises(ValueError, firls, 10, [0.1, 0.2], [0, 0])
# odd bands
assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.4], [0, 0, 0])
# len(bands) != len(desired)
assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.3, 0.4], [0, 0, 0])
# non-monotonic bands
assert_raises(ValueError, firls, 11, [0.2, 0.1], [0, 0])
assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.3, 0.3], [0] * 4)
assert_raises(ValueError, firls, 11, [0.3, 0.4, 0.1, 0.2], [0] * 4)
assert_raises(ValueError, firls, 11, [0.1, 0.3, 0.2, 0.4], [0] * 4)
# negative desired
assert_raises(ValueError, firls, 11, [0.1, 0.2], [-1, 1])
# len(weight) != len(pairs)
assert_raises(ValueError, firls, 11, [0.1, 0.2], [0, 0], [1, 2])
# negative weight
assert_raises(ValueError, firls, 11, [0.1, 0.2], [0, 0], [-1])
def test_firls(self):
N = 11 # number of taps in the filter
a = 0.1 # width of the transition band
# design a halfband symmetric low-pass filter
h = firls(11, [0, a, 0.5-a, 0.5], [1, 1, 0, 0], fs=1.0)
# make sure the filter has correct # of taps
assert_equal(len(h), N)
# make sure it is symmetric
midx = (N-1) // 2
assert_array_almost_equal(h[:midx], h[:-midx-1:-1])
# make sure the center tap is 0.5
assert_almost_equal(h[midx], 0.5)
# For halfband symmetric, odd coefficients (except the center)
# should be zero (really small)
hodd = np.hstack((h[1:midx:2], h[-midx+1::2]))
assert_array_almost_equal(hodd, 0)
# now check the frequency response
w, H = freqz(h, 1)
f = w/2/np.pi
Hmag = np.abs(H)
# check that the pass band is close to unity
idx = np.logical_and(f > 0, f < a)
assert_array_almost_equal(Hmag[idx], 1, decimal=3)
# check that the stop band is close to zero
idx = np.logical_and(f > 0.5-a, f < 0.5)
assert_array_almost_equal(Hmag[idx], 0, decimal=3)
def test_compare(self):
# compare to OCTAVE output
taps = firls(9, [0, 0.5, 0.55, 1], [1, 1, 0, 0], [1, 2])
# >> taps = firls(8, [0 0.5 0.55 1], [1 1 0 0], [1, 2]);
known_taps = [-6.26930101730182e-04, -1.03354450635036e-01,
-9.81576747564301e-03, 3.17271686090449e-01,
5.11409425599933e-01, 3.17271686090449e-01,
-9.81576747564301e-03, -1.03354450635036e-01,
-6.26930101730182e-04]
assert_allclose(taps, known_taps)
# compare to MATLAB output
taps = firls(11, [0, 0.5, 0.5, 1], [1, 1, 0, 0], [1, 2])
# >> taps = firls(10, [0 0.5 0.5 1], [1 1 0 0], [1, 2]);
known_taps = [
0.058545300496815, -0.014233383714318, -0.104688258464392,
0.012403323025279, 0.317930861136062, 0.488047220029700,
0.317930861136062, 0.012403323025279, -0.104688258464392,
-0.014233383714318, 0.058545300496815]
assert_allclose(taps, known_taps)
# With linear changes:
taps = firls(7, (0, 1, 2, 3, 4, 5), [1, 0, 0, 1, 1, 0], fs=20)
# >> taps = firls(6, [0, 0.1, 0.2, 0.3, 0.4, 0.5], [1, 0, 0, 1, 1, 0])
known_taps = [
1.156090832768218, -4.1385894727395849, 7.5288619164321826,
-8.5530572592947856, 7.5288619164321826, -4.1385894727395849,
1.156090832768218]
assert_allclose(taps, known_taps)
taps = firls(7, (0, 1, 2, 3, 4, 5), [1, 0, 0, 1, 1, 0], nyq=10)
assert_allclose(taps, known_taps)
with pytest.raises(ValueError, match='between 0 and 1'):
firls(7, [0, 1], [0, 1], nyq=0.5)
def test_rank_deficient(self):
# solve() runs but warns (only sometimes, so here we don't use match)
x = firls(21, [0, 0.1, 0.9, 1], [1, 1, 0, 0])
w, h = freqz(x, fs=2.)
assert_allclose(np.abs(h[:2]), 1., atol=1e-5)
assert_allclose(np.abs(h[-2:]), 0., atol=1e-6)
# switch to pinvh (tolerances could be higher with longer
# filters, but using shorter ones is faster computationally and
# the idea is the same)
x = firls(101, [0, 0.01, 0.99, 1], [1, 1, 0, 0])
w, h = freqz(x, fs=2.)
mask = w < 0.01
assert mask.sum() > 3
assert_allclose(np.abs(h[mask]), 1., atol=1e-4)
mask = w > 0.99
assert mask.sum() > 3
assert_allclose(np.abs(h[mask]), 0., atol=1e-4)
class TestMinimumPhase(object):
def test_bad_args(self):
# not enough taps
assert_raises(ValueError, minimum_phase, [1.])
assert_raises(ValueError, minimum_phase, [1., 1.])
assert_raises(ValueError, minimum_phase, np.full(10, 1j))
assert_raises(ValueError, minimum_phase, 'foo')
assert_raises(ValueError, minimum_phase, np.ones(10), n_fft=8)
assert_raises(ValueError, minimum_phase, np.ones(10), method='foo')
assert_warns(RuntimeWarning, minimum_phase, np.arange(3))
def test_homomorphic(self):
# check that it can recover frequency responses of arbitrary
# linear-phase filters
# for some cases we can get the actual filter back
h = [1, -1]
h_new = minimum_phase(np.convolve(h, h[::-1]))
assert_allclose(h_new, h, rtol=0.05)
# but in general we only guarantee we get the magnitude back
rng = np.random.RandomState(0)
for n in (2, 3, 10, 11, 15, 16, 17, 20, 21, 100, 101):
h = rng.randn(n)
h_new = minimum_phase(np.convolve(h, h[::-1]))
assert_allclose(np.abs(fft(h_new)),
np.abs(fft(h)), rtol=1e-4)
def test_hilbert(self):
# compare to MATLAB output of reference implementation
# f=[0 0.3 0.5 1];
# a=[1 1 0 0];
# h=remez(11,f,a);
h = remez(12, [0, 0.3, 0.5, 1], [1, 0], fs=2.)
k = [0.349585548646686, 0.373552164395447, 0.326082685363438,
0.077152207480935, -0.129943946349364, -0.059355880509749]
m = minimum_phase(h, 'hilbert')
assert_allclose(m, k, rtol=5e-3)
# f=[0 0.8 0.9 1];
# a=[0 0 1 1];
# h=remez(20,f,a);
h = remez(21, [0, 0.8, 0.9, 1], [0, 1], fs=2.)
k = [0.232486803906329, -0.133551833687071, 0.151871456867244,
-0.157957283165866, 0.151739294892963, -0.129293146705090,
0.100787844523204, -0.065832656741252, 0.035361328741024,
-0.014977068692269, -0.158416139047557]
m = minimum_phase(h, 'hilbert', n_fft=2**19)
assert_allclose(m, k, rtol=2e-3)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,65 @@
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from pytest import raises as assert_raises
from numpy.fft import fft, ifft
from scipy.signal import max_len_seq
class TestMLS(object):
def test_mls_inputs(self):
# can't all be zero state
assert_raises(ValueError, max_len_seq,
10, state=np.zeros(10))
# wrong size state
assert_raises(ValueError, max_len_seq, 10,
state=np.ones(3))
# wrong length
assert_raises(ValueError, max_len_seq, 10, length=-1)
assert_array_equal(max_len_seq(10, length=0)[0], [])
# unknown taps
assert_raises(ValueError, max_len_seq, 64)
# bad taps
assert_raises(ValueError, max_len_seq, 10, taps=[-1, 1])
def test_mls_output(self):
# define some alternate working taps
alt_taps = {2: [1], 3: [2], 4: [3], 5: [4, 3, 2], 6: [5, 4, 1], 7: [4],
8: [7, 5, 3]}
# assume the other bit levels work, too slow to test higher orders...
for nbits in range(2, 8):
for state in [None, np.round(np.random.rand(nbits))]:
for taps in [None, alt_taps[nbits]]:
if state is not None and np.all(state == 0):
state[0] = 1 # they can't all be zero
orig_m = max_len_seq(nbits, state=state,
taps=taps)[0]
m = 2. * orig_m - 1. # convert to +/- 1 representation
# First, make sure we got all 1's or -1
err_msg = "mls had non binary terms"
assert_array_equal(np.abs(m), np.ones_like(m),
err_msg=err_msg)
# Test via circular cross-correlation, which is just mult.
# in the frequency domain with one signal conjugated
tester = np.real(ifft(fft(m) * np.conj(fft(m))))
out_len = 2**nbits - 1
# impulse amplitude == test_len
err_msg = "mls impulse has incorrect value"
assert_allclose(tester[0], out_len, err_msg=err_msg)
# steady-state is -1
err_msg = "mls steady-state has incorrect value"
assert_allclose(tester[1:], np.full(out_len - 1, -1),
err_msg=err_msg)
# let's do the split thing using a couple options
for n in (1, 2**(nbits - 1)):
m1, s1 = max_len_seq(nbits, state=state, taps=taps,
length=n)
m2, s2 = max_len_seq(nbits, state=s1, taps=taps,
length=1)
m3, s3 = max_len_seq(nbits, state=s2, taps=taps,
length=out_len - n - 1)
new_m = np.concatenate((m1, m2, m3))
assert_array_equal(orig_m, new_m)

View file

@ -0,0 +1,847 @@
import copy
import numpy as np
from numpy.testing import (
assert_,
assert_equal,
assert_allclose,
assert_array_equal
)
import pytest
from pytest import raises, warns
from scipy.signal._peak_finding import (
argrelmax,
argrelmin,
peak_prominences,
peak_widths,
_unpack_condition_args,
find_peaks,
find_peaks_cwt,
_identify_ridge_lines
)
from scipy.signal._peak_finding_utils import _local_maxima_1d, PeakPropertyWarning
def _gen_gaussians(center_locs, sigmas, total_length):
xdata = np.arange(0, total_length).astype(float)
out_data = np.zeros(total_length, dtype=float)
for ind, sigma in enumerate(sigmas):
tmp = (xdata - center_locs[ind]) / sigma
out_data += np.exp(-(tmp**2))
return out_data
def _gen_gaussians_even(sigmas, total_length):
num_peaks = len(sigmas)
delta = total_length / (num_peaks + 1)
center_locs = np.linspace(delta, total_length - delta, num=num_peaks).astype(int)
out_data = _gen_gaussians(center_locs, sigmas, total_length)
return out_data, center_locs
def _gen_ridge_line(start_locs, max_locs, length, distances, gaps):
"""
Generate coordinates for a ridge line.
Will be a series of coordinates, starting a start_loc (length 2).
The maximum distance between any adjacent columns will be
`max_distance`, the max distance between adjacent rows
will be `map_gap'.
`max_locs` should be the size of the intended matrix. The
ending coordinates are guaranteed to be less than `max_locs`,
although they may not approach `max_locs` at all.
"""
def keep_bounds(num, max_val):
out = max(num, 0)
out = min(out, max_val)
return out
gaps = copy.deepcopy(gaps)
distances = copy.deepcopy(distances)
locs = np.zeros([length, 2], dtype=int)
locs[0, :] = start_locs
total_length = max_locs[0] - start_locs[0] - sum(gaps)
if total_length < length:
raise ValueError('Cannot generate ridge line according to constraints')
dist_int = length / len(distances) - 1
gap_int = length / len(gaps) - 1
for ind in range(1, length):
nextcol = locs[ind - 1, 1]
nextrow = locs[ind - 1, 0] + 1
if (ind % dist_int == 0) and (len(distances) > 0):
nextcol += ((-1)**ind)*distances.pop()
if (ind % gap_int == 0) and (len(gaps) > 0):
nextrow += gaps.pop()
nextrow = keep_bounds(nextrow, max_locs[0])
nextcol = keep_bounds(nextcol, max_locs[1])
locs[ind, :] = [nextrow, nextcol]
return [locs[:, 0], locs[:, 1]]
class TestLocalMaxima1d(object):
def test_empty(self):
"""Test with empty signal."""
x = np.array([], dtype=np.float64)
for array in _local_maxima_1d(x):
assert_equal(array, np.array([]))
assert_(array.base is None)
def test_linear(self):
"""Test with linear signal."""
x = np.linspace(0, 100)
for array in _local_maxima_1d(x):
assert_equal(array, np.array([]))
assert_(array.base is None)
def test_simple(self):
"""Test with simple signal."""
x = np.linspace(-10, 10, 50)
x[2::3] += 1
expected = np.arange(2, 50, 3)
for array in _local_maxima_1d(x):
# For plateaus of size 1, the edges are identical with the
# midpoints
assert_equal(array, expected)
assert_(array.base is None)
def test_flat_maxima(self):
"""Test if flat maxima are detected correctly."""
x = np.array([-1.3, 0, 1, 0, 2, 2, 0, 3, 3, 3, 2.99, 4, 4, 4, 4, -10,
-5, -5, -5, -5, -5, -10])
midpoints, left_edges, right_edges = _local_maxima_1d(x)
assert_equal(midpoints, np.array([2, 4, 8, 12, 18]))
assert_equal(left_edges, np.array([2, 4, 7, 11, 16]))
assert_equal(right_edges, np.array([2, 5, 9, 14, 20]))
@pytest.mark.parametrize('x', [
np.array([1., 0, 2]),
np.array([3., 3, 0, 4, 4]),
np.array([5., 5, 5, 0, 6, 6, 6]),
])
def test_signal_edges(self, x):
"""Test if behavior on signal edges is correct."""
for array in _local_maxima_1d(x):
assert_equal(array, np.array([]))
assert_(array.base is None)
def test_exceptions(self):
"""Test input validation and raised exceptions."""
with raises(ValueError, match="wrong number of dimensions"):
_local_maxima_1d(np.ones((1, 1)))
with raises(ValueError, match="expected 'float64_t'"):
_local_maxima_1d(np.ones(1, dtype=int))
with raises(TypeError, match="list"):
_local_maxima_1d([1., 2.])
with raises(TypeError, match="'x' must not be None"):
_local_maxima_1d(None)
class TestRidgeLines(object):
def test_empty(self):
test_matr = np.zeros([20, 100])
lines = _identify_ridge_lines(test_matr, np.full(20, 2), 1)
assert_(len(lines) == 0)
def test_minimal(self):
test_matr = np.zeros([20, 100])
test_matr[0, 10] = 1
lines = _identify_ridge_lines(test_matr, np.full(20, 2), 1)
assert_(len(lines) == 1)
test_matr = np.zeros([20, 100])
test_matr[0:2, 10] = 1
lines = _identify_ridge_lines(test_matr, np.full(20, 2), 1)
assert_(len(lines) == 1)
def test_single_pass(self):
distances = [0, 1, 2, 5]
gaps = [0, 1, 2, 0, 1]
test_matr = np.zeros([20, 50]) + 1e-12
length = 12
line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps)
test_matr[line[0], line[1]] = 1
max_distances = np.full(20, max(distances))
identified_lines = _identify_ridge_lines(test_matr, max_distances, max(gaps) + 1)
assert_array_equal(identified_lines, [line])
def test_single_bigdist(self):
distances = [0, 1, 2, 5]
gaps = [0, 1, 2, 4]
test_matr = np.zeros([20, 50])
length = 12
line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps)
test_matr[line[0], line[1]] = 1
max_dist = 3
max_distances = np.full(20, max_dist)
#This should get 2 lines, since the distance is too large
identified_lines = _identify_ridge_lines(test_matr, max_distances, max(gaps) + 1)
assert_(len(identified_lines) == 2)
for iline in identified_lines:
adists = np.diff(iline[1])
np.testing.assert_array_less(np.abs(adists), max_dist)
agaps = np.diff(iline[0])
np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1)
def test_single_biggap(self):
distances = [0, 1, 2, 5]
max_gap = 3
gaps = [0, 4, 2, 1]
test_matr = np.zeros([20, 50])
length = 12
line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps)
test_matr[line[0], line[1]] = 1
max_dist = 6
max_distances = np.full(20, max_dist)
#This should get 2 lines, since the gap is too large
identified_lines = _identify_ridge_lines(test_matr, max_distances, max_gap)
assert_(len(identified_lines) == 2)
for iline in identified_lines:
adists = np.diff(iline[1])
np.testing.assert_array_less(np.abs(adists), max_dist)
agaps = np.diff(iline[0])
np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1)
def test_single_biggaps(self):
distances = [0]
max_gap = 1
gaps = [3, 6]
test_matr = np.zeros([50, 50])
length = 30
line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps)
test_matr[line[0], line[1]] = 1
max_dist = 1
max_distances = np.full(50, max_dist)
#This should get 3 lines, since the gaps are too large
identified_lines = _identify_ridge_lines(test_matr, max_distances, max_gap)
assert_(len(identified_lines) == 3)
for iline in identified_lines:
adists = np.diff(iline[1])
np.testing.assert_array_less(np.abs(adists), max_dist)
agaps = np.diff(iline[0])
np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1)
class TestArgrel(object):
def test_empty(self):
# Regression test for gh-2832.
# When there are no relative extrema, make sure that
# the number of empty arrays returned matches the
# dimension of the input.
empty_array = np.array([], dtype=int)
z1 = np.zeros(5)
i = argrelmin(z1)
assert_equal(len(i), 1)
assert_array_equal(i[0], empty_array)
z2 = np.zeros((3,5))
row, col = argrelmin(z2, axis=0)
assert_array_equal(row, empty_array)
assert_array_equal(col, empty_array)
row, col = argrelmin(z2, axis=1)
assert_array_equal(row, empty_array)
assert_array_equal(col, empty_array)
def test_basic(self):
# Note: the docstrings for the argrel{min,max,extrema} functions
# do not give a guarantee of the order of the indices, so we'll
# sort them before testing.
x = np.array([[1, 2, 2, 3, 2],
[2, 1, 2, 2, 3],
[3, 2, 1, 2, 2],
[2, 3, 2, 1, 2],
[1, 2, 3, 2, 1]])
row, col = argrelmax(x, axis=0)
order = np.argsort(row)
assert_equal(row[order], [1, 2, 3])
assert_equal(col[order], [4, 0, 1])
row, col = argrelmax(x, axis=1)
order = np.argsort(row)
assert_equal(row[order], [0, 3, 4])
assert_equal(col[order], [3, 1, 2])
row, col = argrelmin(x, axis=0)
order = np.argsort(row)
assert_equal(row[order], [1, 2, 3])
assert_equal(col[order], [1, 2, 3])
row, col = argrelmin(x, axis=1)
order = np.argsort(row)
assert_equal(row[order], [1, 2, 3])
assert_equal(col[order], [1, 2, 3])
def test_highorder(self):
order = 2
sigmas = [1.0, 2.0, 10.0, 5.0, 15.0]
test_data, act_locs = _gen_gaussians_even(sigmas, 500)
test_data[act_locs + order] = test_data[act_locs]*0.99999
test_data[act_locs - order] = test_data[act_locs]*0.99999
rel_max_locs = argrelmax(test_data, order=order, mode='clip')[0]
assert_(len(rel_max_locs) == len(act_locs))
assert_((rel_max_locs == act_locs).all())
def test_2d_gaussians(self):
sigmas = [1.0, 2.0, 10.0]
test_data, act_locs = _gen_gaussians_even(sigmas, 100)
rot_factor = 20
rot_range = np.arange(0, len(test_data)) - rot_factor
test_data_2 = np.vstack([test_data, test_data[rot_range]])
rel_max_rows, rel_max_cols = argrelmax(test_data_2, axis=1, order=1)
for rw in range(0, test_data_2.shape[0]):
inds = (rel_max_rows == rw)
assert_(len(rel_max_cols[inds]) == len(act_locs))
assert_((act_locs == (rel_max_cols[inds] - rot_factor*rw)).all())
class TestPeakProminences(object):
def test_empty(self):
"""
Test if an empty array is returned if no peaks are provided.
"""
out = peak_prominences([1, 2, 3], [])
for arr, dtype in zip(out, [np.float64, np.intp, np.intp]):
assert_(arr.size == 0)
assert_(arr.dtype == dtype)
out = peak_prominences([], [])
for arr, dtype in zip(out, [np.float64, np.intp, np.intp]):
assert_(arr.size == 0)
assert_(arr.dtype == dtype)
def test_basic(self):
"""
Test if height of prominences is correctly calculated in signal with
rising baseline (peak widths are 1 sample).
"""
# Prepare basic signal
x = np.array([-1, 1.2, 1.2, 1, 3.2, 1.3, 2.88, 2.1])
peaks = np.array([1, 2, 4, 6])
lbases = np.array([0, 0, 0, 5])
rbases = np.array([3, 3, 5, 7])
proms = x[peaks] - np.max([x[lbases], x[rbases]], axis=0)
# Test if calculation matches handcrafted result
out = peak_prominences(x, peaks)
assert_equal(out[0], proms)
assert_equal(out[1], lbases)
assert_equal(out[2], rbases)
def test_edge_cases(self):
"""
Test edge cases.
"""
# Peaks have same height, prominence and bases
x = [0, 2, 1, 2, 1, 2, 0]
peaks = [1, 3, 5]
proms, lbases, rbases = peak_prominences(x, peaks)
assert_equal(proms, [2, 2, 2])
assert_equal(lbases, [0, 0, 0])
assert_equal(rbases, [6, 6, 6])
# Peaks have same height & prominence but different bases
x = [0, 1, 0, 1, 0, 1, 0]
peaks = np.array([1, 3, 5])
proms, lbases, rbases = peak_prominences(x, peaks)
assert_equal(proms, [1, 1, 1])
assert_equal(lbases, peaks - 1)
assert_equal(rbases, peaks + 1)
def test_non_contiguous(self):
"""
Test with non-C-contiguous input arrays.
"""
x = np.repeat([-9, 9, 9, 0, 3, 1], 2)
peaks = np.repeat([1, 2, 4], 2)
proms, lbases, rbases = peak_prominences(x[::2], peaks[::2])
assert_equal(proms, [9, 9, 2])
assert_equal(lbases, [0, 0, 3])
assert_equal(rbases, [3, 3, 5])
def test_wlen(self):
"""
Test if wlen actually shrinks the evaluation range correctly.
"""
x = [0, 1, 2, 3, 1, 0, -1]
peak = [3]
# Test rounding behavior of wlen
assert_equal(peak_prominences(x, peak), [3., 0, 6])
for wlen, i in [(8, 0), (7, 0), (6, 0), (5, 1), (3.2, 1), (3, 2), (1.1, 2)]:
assert_equal(peak_prominences(x, peak, wlen), [3. - i, 0 + i, 6 - i])
def test_exceptions(self):
"""
Verify that exceptions and warnings are raised.
"""
# x with dimension > 1
with raises(ValueError, match='1-D array'):
peak_prominences([[0, 1, 1, 0]], [1, 2])
# peaks with dimension > 1
with raises(ValueError, match='1-D array'):
peak_prominences([0, 1, 1, 0], [[1, 2]])
# x with dimension < 1
with raises(ValueError, match='1-D array'):
peak_prominences(3, [0,])
# empty x with supplied
with raises(ValueError, match='not a valid index'):
peak_prominences([], [0])
# invalid indices with non-empty x
for p in [-100, -1, 3, 1000]:
with raises(ValueError, match='not a valid index'):
peak_prominences([1, 0, 2], [p])
# peaks is not cast-able to np.intp
with raises(TypeError, match='cannot safely cast'):
peak_prominences([0, 1, 1, 0], [1.1, 2.3])
# wlen < 3
with raises(ValueError, match='wlen'):
peak_prominences(np.arange(10), [3, 5], wlen=1)
def test_warnings(self):
"""
Verify that appropriate warnings are raised.
"""
msg = "some peaks have a prominence of 0"
for p in [0, 1, 2]:
with warns(PeakPropertyWarning, match=msg):
peak_prominences([1, 0, 2], [p,])
with warns(PeakPropertyWarning, match=msg):
peak_prominences([0, 1, 1, 1, 0], [2], wlen=2)
class TestPeakWidths(object):
def test_empty(self):
"""
Test if an empty array is returned if no peaks are provided.
"""
widths = peak_widths([], [])[0]
assert_(isinstance(widths, np.ndarray))
assert_equal(widths.size, 0)
widths = peak_widths([1, 2, 3], [])[0]
assert_(isinstance(widths, np.ndarray))
assert_equal(widths.size, 0)
out = peak_widths([], [])
for arr in out:
assert_(isinstance(arr, np.ndarray))
assert_equal(arr.size, 0)
@pytest.mark.filterwarnings("ignore:some peaks have a width of 0")
def test_basic(self):
"""
Test a simple use case with easy to verify results at different relative
heights.
"""
x = np.array([1, 0, 1, 2, 1, 0, -1])
prominence = 2
for rel_height, width_true, lip_true, rip_true in [
(0., 0., 3., 3.), # raises warning
(0.25, 1., 2.5, 3.5),
(0.5, 2., 2., 4.),
(0.75, 3., 1.5, 4.5),
(1., 4., 1., 5.),
(2., 5., 1., 6.),
(3., 5., 1., 6.)
]:
width_calc, height, lip_calc, rip_calc = peak_widths(
x, [3], rel_height)
assert_allclose(width_calc, width_true)
assert_allclose(height, 2 - rel_height * prominence)
assert_allclose(lip_calc, lip_true)
assert_allclose(rip_calc, rip_true)
def test_non_contiguous(self):
"""
Test with non-C-contiguous input arrays.
"""
x = np.repeat([0, 100, 50], 4)
peaks = np.repeat([1], 3)
result = peak_widths(x[::4], peaks[::3])
assert_equal(result, [0.75, 75, 0.75, 1.5])
def test_exceptions(self):
"""
Verify that argument validation works as intended.
"""
with raises(ValueError, match='1-D array'):
# x with dimension > 1
peak_widths(np.zeros((3, 4)), np.ones(3))
with raises(ValueError, match='1-D array'):
# x with dimension < 1
peak_widths(3, [0])
with raises(ValueError, match='1-D array'):
# peaks with dimension > 1
peak_widths(np.arange(10), np.ones((3, 2), dtype=np.intp))
with raises(ValueError, match='1-D array'):
# peaks with dimension < 1
peak_widths(np.arange(10), 3)
with raises(ValueError, match='not a valid index'):
# peak pos exceeds x.size
peak_widths(np.arange(10), [8, 11])
with raises(ValueError, match='not a valid index'):
# empty x with peaks supplied
peak_widths([], [1, 2])
with raises(TypeError, match='cannot safely cast'):
# peak cannot be safely casted to intp
peak_widths(np.arange(10), [1.1, 2.3])
with raises(ValueError, match='rel_height'):
# rel_height is < 0
peak_widths([0, 1, 0, 1, 0], [1, 3], rel_height=-1)
with raises(TypeError, match='None'):
# prominence data contains None
peak_widths([1, 2, 1], [1], prominence_data=(None, None, None))
def test_warnings(self):
"""
Verify that appropriate warnings are raised.
"""
msg = "some peaks have a width of 0"
with warns(PeakPropertyWarning, match=msg):
# Case: rel_height is 0
peak_widths([0, 1, 0], [1], rel_height=0)
with warns(PeakPropertyWarning, match=msg):
# Case: prominence is 0 and bases are identical
peak_widths(
[0, 1, 1, 1, 0], [2],
prominence_data=(np.array([0.], np.float64),
np.array([2], np.intp),
np.array([2], np.intp))
)
def test_mismatching_prominence_data(self):
"""Test with mismatching peak and / or prominence data."""
x = [0, 1, 0]
peak = [1]
for i, (prominences, left_bases, right_bases) in enumerate([
((1.,), (-1,), (2,)), # left base not in x
((1.,), (0,), (3,)), # right base not in x
((1.,), (2,), (0,)), # swapped bases same as peak
((1., 1.), (0, 0), (2, 2)), # array shapes don't match peaks
((1., 1.), (0,), (2,)), # arrays with different shapes
((1.,), (0, 0), (2,)), # arrays with different shapes
((1.,), (0,), (2, 2)) # arrays with different shapes
]):
# Make sure input is matches output of signal.peak_prominences
prominence_data = (np.array(prominences, dtype=np.float64),
np.array(left_bases, dtype=np.intp),
np.array(right_bases, dtype=np.intp))
# Test for correct exception
if i < 3:
match = "prominence data is invalid for peak"
else:
match = "arrays in `prominence_data` must have the same shape"
with raises(ValueError, match=match):
peak_widths(x, peak, prominence_data=prominence_data)
@pytest.mark.filterwarnings("ignore:some peaks have a width of 0")
def test_intersection_rules(self):
"""Test if x == eval_height counts as an intersection."""
# Flatt peak with two possible intersection points if evaluated at 1
x = [0, 1, 2, 1, 3, 3, 3, 1, 2, 1, 0]
# relative height is 0 -> width is 0 as well, raises warning
assert_allclose(peak_widths(x, peaks=[5], rel_height=0),
[(0.,), (3.,), (5.,), (5.,)])
# width_height == x counts as intersection -> nearest 1 is chosen
assert_allclose(peak_widths(x, peaks=[5], rel_height=2/3),
[(4.,), (1.,), (3.,), (7.,)])
def test_unpack_condition_args():
"""
Verify parsing of condition arguments for `scipy.signal.find_peaks` function.
"""
x = np.arange(10)
amin_true = x
amax_true = amin_true + 10
peaks = amin_true[1::2]
# Test unpacking with None or interval
assert_((None, None) == _unpack_condition_args((None, None), x, peaks))
assert_((1, None) == _unpack_condition_args(1, x, peaks))
assert_((1, None) == _unpack_condition_args((1, None), x, peaks))
assert_((None, 2) == _unpack_condition_args((None, 2), x, peaks))
assert_((3., 4.5) == _unpack_condition_args((3., 4.5), x, peaks))
# Test if borders are correctly reduced with `peaks`
amin_calc, amax_calc = _unpack_condition_args((amin_true, amax_true), x, peaks)
assert_equal(amin_calc, amin_true[peaks])
assert_equal(amax_calc, amax_true[peaks])
# Test raises if array borders don't match x
with raises(ValueError, match="array size of lower"):
_unpack_condition_args(amin_true, np.arange(11), peaks)
with raises(ValueError, match="array size of upper"):
_unpack_condition_args((None, amin_true), np.arange(11), peaks)
class TestFindPeaks(object):
# Keys of optionally returned properties
property_keys = {'peak_heights', 'left_thresholds', 'right_thresholds',
'prominences', 'left_bases', 'right_bases', 'widths',
'width_heights', 'left_ips', 'right_ips'}
def test_constant(self):
"""
Test behavior for signal without local maxima.
"""
open_interval = (None, None)
peaks, props = find_peaks(np.ones(10),
height=open_interval, threshold=open_interval,
prominence=open_interval, width=open_interval)
assert_(peaks.size == 0)
for key in self.property_keys:
assert_(props[key].size == 0)
def test_plateau_size(self):
"""
Test plateau size condition for peaks.
"""
# Prepare signal with peaks with peak_height == plateau_size
plateau_sizes = np.array([1, 2, 3, 4, 8, 20, 111])
x = np.zeros(plateau_sizes.size * 2 + 1)
x[1::2] = plateau_sizes
repeats = np.ones(x.size, dtype=int)
repeats[1::2] = x[1::2]
x = np.repeat(x, repeats)
# Test full output
peaks, props = find_peaks(x, plateau_size=(None, None))
assert_equal(peaks, [1, 3, 7, 11, 18, 33, 100])
assert_equal(props["plateau_sizes"], plateau_sizes)
assert_equal(props["left_edges"], peaks - (plateau_sizes - 1) // 2)
assert_equal(props["right_edges"], peaks + plateau_sizes // 2)
# Test conditions
assert_equal(find_peaks(x, plateau_size=4)[0], [11, 18, 33, 100])
assert_equal(find_peaks(x, plateau_size=(None, 3.5))[0], [1, 3, 7])
assert_equal(find_peaks(x, plateau_size=(5, 50))[0], [18, 33])
def test_height_condition(self):
"""
Test height condition for peaks.
"""
x = (0., 1/3, 0., 2.5, 0, 4., 0)
peaks, props = find_peaks(x, height=(None, None))
assert_equal(peaks, np.array([1, 3, 5]))
assert_equal(props['peak_heights'], np.array([1/3, 2.5, 4.]))
assert_equal(find_peaks(x, height=0.5)[0], np.array([3, 5]))
assert_equal(find_peaks(x, height=(None, 3))[0], np.array([1, 3]))
assert_equal(find_peaks(x, height=(2, 3))[0], np.array([3]))
def test_threshold_condition(self):
"""
Test threshold condition for peaks.
"""
x = (0, 2, 1, 4, -1)
peaks, props = find_peaks(x, threshold=(None, None))
assert_equal(peaks, np.array([1, 3]))
assert_equal(props['left_thresholds'], np.array([2, 3]))
assert_equal(props['right_thresholds'], np.array([1, 5]))
assert_equal(find_peaks(x, threshold=2)[0], np.array([3]))
assert_equal(find_peaks(x, threshold=3.5)[0], np.array([]))
assert_equal(find_peaks(x, threshold=(None, 5))[0], np.array([1, 3]))
assert_equal(find_peaks(x, threshold=(None, 4))[0], np.array([1]))
assert_equal(find_peaks(x, threshold=(2, 4))[0], np.array([]))
def test_distance_condition(self):
"""
Test distance condition for peaks.
"""
# Peaks of different height with constant distance 3
peaks_all = np.arange(1, 21, 3)
x = np.zeros(21)
x[peaks_all] += np.linspace(1, 2, peaks_all.size)
# Test if peaks with "minimal" distance are still selected (distance = 3)
assert_equal(find_peaks(x, distance=3)[0], peaks_all)
# Select every second peak (distance > 3)
peaks_subset = find_peaks(x, distance=3.0001)[0]
# Test if peaks_subset is subset of peaks_all
assert_(
np.setdiff1d(peaks_subset, peaks_all, assume_unique=True).size == 0
)
# Test if every second peak was removed
assert_equal(np.diff(peaks_subset), 6)
# Test priority of peak removal
x = [-2, 1, -1, 0, -3]
peaks_subset = find_peaks(x, distance=10)[0] # use distance > x size
assert_(peaks_subset.size == 1 and peaks_subset[0] == 1)
def test_prominence_condition(self):
"""
Test prominence condition for peaks.
"""
x = np.linspace(0, 10, 100)
peaks_true = np.arange(1, 99, 2)
offset = np.linspace(1, 10, peaks_true.size)
x[peaks_true] += offset
prominences = x[peaks_true] - x[peaks_true + 1]
interval = (3, 9)
keep = np.nonzero(
(interval[0] <= prominences) & (prominences <= interval[1]))
peaks_calc, properties = find_peaks(x, prominence=interval)
assert_equal(peaks_calc, peaks_true[keep])
assert_equal(properties['prominences'], prominences[keep])
assert_equal(properties['left_bases'], 0)
assert_equal(properties['right_bases'], peaks_true[keep] + 1)
def test_width_condition(self):
"""
Test width condition for peaks.
"""
x = np.array([1, 0, 1, 2, 1, 0, -1, 4, 0])
peaks, props = find_peaks(x, width=(None, 2), rel_height=0.75)
assert_equal(peaks.size, 1)
assert_equal(peaks, 7)
assert_allclose(props['widths'], 1.35)
assert_allclose(props['width_heights'], 1.)
assert_allclose(props['left_ips'], 6.4)
assert_allclose(props['right_ips'], 7.75)
def test_properties(self):
"""
Test returned properties.
"""
open_interval = (None, None)
x = [0, 1, 0, 2, 1.5, 0, 3, 0, 5, 9]
peaks, props = find_peaks(x,
height=open_interval, threshold=open_interval,
prominence=open_interval, width=open_interval)
assert_(len(props) == len(self.property_keys))
for key in self.property_keys:
assert_(peaks.size == props[key].size)
def test_raises(self):
"""
Test exceptions raised by function.
"""
with raises(ValueError, match="1-D array"):
find_peaks(np.array(1))
with raises(ValueError, match="1-D array"):
find_peaks(np.ones((2, 2)))
with raises(ValueError, match="distance"):
find_peaks(np.arange(10), distance=-1)
@pytest.mark.filterwarnings("ignore:some peaks have a prominence of 0",
"ignore:some peaks have a width of 0")
def test_wlen_smaller_plateau(self):
"""
Test behavior of prominence and width calculation if the given window
length is smaller than a peak's plateau size.
Regression test for gh-9110.
"""
peaks, props = find_peaks([0, 1, 1, 1, 0], prominence=(None, None),
width=(None, None), wlen=2)
assert_equal(peaks, 2)
assert_equal(props["prominences"], 0)
assert_equal(props["widths"], 0)
assert_equal(props["width_heights"], 1)
for key in ("left_bases", "right_bases", "left_ips", "right_ips"):
assert_equal(props[key], peaks)
class TestFindPeaksCwt(object):
def test_find_peaks_exact(self):
"""
Generate a series of gaussians and attempt to find the peak locations.
"""
sigmas = [5.0, 3.0, 10.0, 20.0, 10.0, 50.0]
num_points = 500
test_data, act_locs = _gen_gaussians_even(sigmas, num_points)
widths = np.arange(0.1, max(sigmas))
found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=0,
min_length=None)
np.testing.assert_array_equal(found_locs, act_locs,
"Found maximum locations did not equal those expected")
def test_find_peaks_withnoise(self):
"""
Verify that peak locations are (approximately) found
for a series of gaussians with added noise.
"""
sigmas = [5.0, 3.0, 10.0, 20.0, 10.0, 50.0]
num_points = 500
test_data, act_locs = _gen_gaussians_even(sigmas, num_points)
widths = np.arange(0.1, max(sigmas))
noise_amp = 0.07
np.random.seed(18181911)
test_data += (np.random.rand(num_points) - 0.5)*(2*noise_amp)
found_locs = find_peaks_cwt(test_data, widths, min_length=15,
gap_thresh=1, min_snr=noise_amp / 5)
np.testing.assert_equal(len(found_locs), len(act_locs), 'Different number' +
'of peaks found than expected')
diffs = np.abs(found_locs - act_locs)
max_diffs = np.array(sigmas) / 5
np.testing.assert_array_less(diffs, max_diffs, 'Maximum location differed' +
'by more than %s' % (max_diffs))
def test_find_peaks_nopeak(self):
"""
Verify that no peak is found in
data that's just noise.
"""
noise_amp = 1.0
num_points = 100
np.random.seed(181819141)
test_data = (np.random.rand(num_points) - 0.5)*(2*noise_amp)
widths = np.arange(10, 50)
found_locs = find_peaks_cwt(test_data, widths, min_snr=5, noise_perc=30)
np.testing.assert_equal(len(found_locs), 0)
def test_find_peaks_window_size(self):
"""
Verify that window_size is passed correctly to private function and
affects the result.
"""
sigmas = [2.0, 2.0]
num_points = 1000
test_data, act_locs = _gen_gaussians_even(sigmas, num_points)
widths = np.arange(0.1, max(sigmas), 0.2)
noise_amp = 0.05
np.random.seed(18181911)
test_data += (np.random.rand(num_points) - 0.5)*(2*noise_amp)
# Possibly contrived negative region to throw off peak finding
# when window_size is too large
test_data[250:320] -= 1
found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=3,
min_length=None, window_size=None)
with pytest.raises(AssertionError):
assert found_locs.size == act_locs.size
found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=3,
min_length=None, window_size=20)
assert found_locs.size == act_locs.size

View file

@ -0,0 +1,301 @@
import numpy as np
from numpy.testing import (assert_allclose, assert_equal,
assert_almost_equal, assert_array_equal,
assert_array_almost_equal)
from scipy.ndimage import convolve1d
from scipy.signal import savgol_coeffs, savgol_filter
from scipy.signal._savitzky_golay import _polyder
def check_polyder(p, m, expected):
dp = _polyder(p, m)
assert_array_equal(dp, expected)
def test_polyder():
cases = [
([5], 0, [5]),
([5], 1, [0]),
([3, 2, 1], 0, [3, 2, 1]),
([3, 2, 1], 1, [6, 2]),
([3, 2, 1], 2, [6]),
([3, 2, 1], 3, [0]),
([[3, 2, 1], [5, 6, 7]], 0, [[3, 2, 1], [5, 6, 7]]),
([[3, 2, 1], [5, 6, 7]], 1, [[6, 2], [10, 6]]),
([[3, 2, 1], [5, 6, 7]], 2, [[6], [10]]),
([[3, 2, 1], [5, 6, 7]], 3, [[0], [0]]),
]
for p, m, expected in cases:
check_polyder(np.array(p).T, m, np.array(expected).T)
#--------------------------------------------------------------------
# savgol_coeffs tests
#--------------------------------------------------------------------
def alt_sg_coeffs(window_length, polyorder, pos):
"""This is an alternative implementation of the SG coefficients.
It uses numpy.polyfit and numpy.polyval. The results should be
equivalent to those of savgol_coeffs(), but this implementation
is slower.
window_length should be odd.
"""
if pos is None:
pos = window_length // 2
t = np.arange(window_length)
unit = (t == pos).astype(int)
h = np.polyval(np.polyfit(t, unit, polyorder), t)
return h
def test_sg_coeffs_trivial():
# Test a trivial case of savgol_coeffs: polyorder = window_length - 1
h = savgol_coeffs(1, 0)
assert_allclose(h, [1])
h = savgol_coeffs(3, 2)
assert_allclose(h, [0, 1, 0], atol=1e-10)
h = savgol_coeffs(5, 4)
assert_allclose(h, [0, 0, 1, 0, 0], atol=1e-10)
h = savgol_coeffs(5, 4, pos=1)
assert_allclose(h, [0, 0, 0, 1, 0], atol=1e-10)
h = savgol_coeffs(5, 4, pos=1, use='dot')
assert_allclose(h, [0, 1, 0, 0, 0], atol=1e-10)
def compare_coeffs_to_alt(window_length, order):
# For the given window_length and order, compare the results
# of savgol_coeffs and alt_sg_coeffs for pos from 0 to window_length - 1.
# Also include pos=None.
for pos in [None] + list(range(window_length)):
h1 = savgol_coeffs(window_length, order, pos=pos, use='dot')
h2 = alt_sg_coeffs(window_length, order, pos=pos)
assert_allclose(h1, h2, atol=1e-10,
err_msg=("window_length = %d, order = %d, pos = %s" %
(window_length, order, pos)))
def test_sg_coeffs_compare():
# Compare savgol_coeffs() to alt_sg_coeffs().
for window_length in range(1, 8, 2):
for order in range(window_length):
compare_coeffs_to_alt(window_length, order)
def test_sg_coeffs_exact():
polyorder = 4
window_length = 9
halflen = window_length // 2
x = np.linspace(0, 21, 43)
delta = x[1] - x[0]
# The data is a cubic polynomial. We'll use an order 4
# SG filter, so the filtered values should equal the input data
# (except within half window_length of the edges).
y = 0.5 * x ** 3 - x
h = savgol_coeffs(window_length, polyorder)
y0 = convolve1d(y, h)
assert_allclose(y0[halflen:-halflen], y[halflen:-halflen])
# Check the same input, but use deriv=1. dy is the exact result.
dy = 1.5 * x ** 2 - 1
h = savgol_coeffs(window_length, polyorder, deriv=1, delta=delta)
y1 = convolve1d(y, h)
assert_allclose(y1[halflen:-halflen], dy[halflen:-halflen])
# Check the same input, but use deriv=2. d2y is the exact result.
d2y = 3.0 * x
h = savgol_coeffs(window_length, polyorder, deriv=2, delta=delta)
y2 = convolve1d(y, h)
assert_allclose(y2[halflen:-halflen], d2y[halflen:-halflen])
def test_sg_coeffs_deriv():
# The data in `x` is a sampled parabola, so using savgol_coeffs with an
# order 2 or higher polynomial should give exact results.
i = np.array([-2.0, 0.0, 2.0, 4.0, 6.0])
x = i ** 2 / 4
dx = i / 2
d2x = np.full_like(i, 0.5)
for pos in range(x.size):
coeffs0 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot')
assert_allclose(coeffs0.dot(x), x[pos], atol=1e-10)
coeffs1 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=1)
assert_allclose(coeffs1.dot(x), dx[pos], atol=1e-10)
coeffs2 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=2)
assert_allclose(coeffs2.dot(x), d2x[pos], atol=1e-10)
def test_sg_coeffs_deriv_gt_polyorder():
"""
If deriv > polyorder, the coefficients should be all 0.
This is a regression test for a bug where, e.g.,
savgol_coeffs(5, polyorder=1, deriv=2)
raised an error.
"""
coeffs = savgol_coeffs(5, polyorder=1, deriv=2)
assert_array_equal(coeffs, np.zeros(5))
coeffs = savgol_coeffs(7, polyorder=4, deriv=6)
assert_array_equal(coeffs, np.zeros(7))
def test_sg_coeffs_large():
# Test that for large values of window_length and polyorder the array of
# coefficients returned is symmetric. The aim is to ensure that
# no potential numeric overflow occurs.
coeffs0 = savgol_coeffs(31, 9)
assert_array_almost_equal(coeffs0, coeffs0[::-1])
coeffs1 = savgol_coeffs(31, 9, deriv=1)
assert_array_almost_equal(coeffs1, -coeffs1[::-1])
#--------------------------------------------------------------------
# savgol_filter tests
#--------------------------------------------------------------------
def test_sg_filter_trivial():
""" Test some trivial edge cases for savgol_filter()."""
x = np.array([1.0])
y = savgol_filter(x, 1, 0)
assert_equal(y, [1.0])
# Input is a single value. With a window length of 3 and polyorder 1,
# the value in y is from the straight-line fit of (-1,0), (0,3) and
# (1, 0) at 0. This is just the average of the three values, hence 1.0.
x = np.array([3.0])
y = savgol_filter(x, 3, 1, mode='constant')
assert_almost_equal(y, [1.0], decimal=15)
x = np.array([3.0])
y = savgol_filter(x, 3, 1, mode='nearest')
assert_almost_equal(y, [3.0], decimal=15)
x = np.array([1.0] * 3)
y = savgol_filter(x, 3, 1, mode='wrap')
assert_almost_equal(y, [1.0, 1.0, 1.0], decimal=15)
def test_sg_filter_basic():
# Some basic test cases for savgol_filter().
x = np.array([1.0, 2.0, 1.0])
y = savgol_filter(x, 3, 1, mode='constant')
assert_allclose(y, [1.0, 4.0 / 3, 1.0])
y = savgol_filter(x, 3, 1, mode='mirror')
assert_allclose(y, [5.0 / 3, 4.0 / 3, 5.0 / 3])
y = savgol_filter(x, 3, 1, mode='wrap')
assert_allclose(y, [4.0 / 3, 4.0 / 3, 4.0 / 3])
def test_sg_filter_2d():
x = np.array([[1.0, 2.0, 1.0],
[2.0, 4.0, 2.0]])
expected = np.array([[1.0, 4.0 / 3, 1.0],
[2.0, 8.0 / 3, 2.0]])
y = savgol_filter(x, 3, 1, mode='constant')
assert_allclose(y, expected)
y = savgol_filter(x.T, 3, 1, mode='constant', axis=0)
assert_allclose(y, expected.T)
def test_sg_filter_interp_edges():
# Another test with low degree polynomial data, for which we can easily
# give the exact results. In this test, we use mode='interp', so
# savgol_filter should match the exact solution for the entire data set,
# including the edges.
t = np.linspace(-5, 5, 21)
delta = t[1] - t[0]
# Polynomial test data.
x = np.array([t,
3 * t ** 2,
t ** 3 - t])
dx = np.array([np.ones_like(t),
6 * t,
3 * t ** 2 - 1.0])
d2x = np.array([np.zeros_like(t),
np.full_like(t, 6),
6 * t])
window_length = 7
y = savgol_filter(x, window_length, 3, axis=-1, mode='interp')
assert_allclose(y, x, atol=1e-12)
y1 = savgol_filter(x, window_length, 3, axis=-1, mode='interp',
deriv=1, delta=delta)
assert_allclose(y1, dx, atol=1e-12)
y2 = savgol_filter(x, window_length, 3, axis=-1, mode='interp',
deriv=2, delta=delta)
assert_allclose(y2, d2x, atol=1e-12)
# Transpose everything, and test again with axis=0.
x = x.T
dx = dx.T
d2x = d2x.T
y = savgol_filter(x, window_length, 3, axis=0, mode='interp')
assert_allclose(y, x, atol=1e-12)
y1 = savgol_filter(x, window_length, 3, axis=0, mode='interp',
deriv=1, delta=delta)
assert_allclose(y1, dx, atol=1e-12)
y2 = savgol_filter(x, window_length, 3, axis=0, mode='interp',
deriv=2, delta=delta)
assert_allclose(y2, d2x, atol=1e-12)
def test_sg_filter_interp_edges_3d():
# Test mode='interp' with a 3-D array.
t = np.linspace(-5, 5, 21)
delta = t[1] - t[0]
x1 = np.array([t, -t])
x2 = np.array([t ** 2, 3 * t ** 2 + 5])
x3 = np.array([t ** 3, 2 * t ** 3 + t ** 2 - 0.5 * t])
dx1 = np.array([np.ones_like(t), -np.ones_like(t)])
dx2 = np.array([2 * t, 6 * t])
dx3 = np.array([3 * t ** 2, 6 * t ** 2 + 2 * t - 0.5])
# z has shape (3, 2, 21)
z = np.array([x1, x2, x3])
dz = np.array([dx1, dx2, dx3])
y = savgol_filter(z, 7, 3, axis=-1, mode='interp', delta=delta)
assert_allclose(y, z, atol=1e-10)
dy = savgol_filter(z, 7, 3, axis=-1, mode='interp', deriv=1, delta=delta)
assert_allclose(dy, dz, atol=1e-10)
# z has shape (3, 21, 2)
z = np.array([x1.T, x2.T, x3.T])
dz = np.array([dx1.T, dx2.T, dx3.T])
y = savgol_filter(z, 7, 3, axis=1, mode='interp', delta=delta)
assert_allclose(y, z, atol=1e-10)
dy = savgol_filter(z, 7, 3, axis=1, mode='interp', deriv=1, delta=delta)
assert_allclose(dy, dz, atol=1e-10)
# z has shape (21, 3, 2)
z = z.swapaxes(0, 1).copy()
dz = dz.swapaxes(0, 1).copy()
y = savgol_filter(z, 7, 3, axis=0, mode='interp', delta=delta)
assert_allclose(y, z, atol=1e-10)
dy = savgol_filter(z, 7, 3, axis=0, mode='interp', deriv=1, delta=delta)
assert_allclose(dy, dz, atol=1e-10)

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,273 @@
# Code adapted from "upfirdn" python library with permission:
#
# Copyright (c) 2009, Motorola, Inc
#
# All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Motorola nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from itertools import product
from numpy.testing import assert_equal, assert_allclose
from pytest import raises as assert_raises
import pytest
from scipy.signal import upfirdn, firwin
from scipy.signal._upfirdn import _output_len, _upfirdn_modes
from scipy.signal._upfirdn_apply import _pad_test
def upfirdn_naive(x, h, up=1, down=1):
"""Naive upfirdn processing in Python.
Note: arg order (x, h) differs to facilitate apply_along_axis use.
"""
h = np.asarray(h)
out = np.zeros(len(x) * up, x.dtype)
out[::up] = x
out = np.convolve(h, out)[::down][:_output_len(len(h), len(x), up, down)]
return out
class UpFIRDnCase(object):
"""Test _UpFIRDn object"""
def __init__(self, up, down, h, x_dtype):
self.up = up
self.down = down
self.h = np.atleast_1d(h)
self.x_dtype = x_dtype
self.rng = np.random.RandomState(17)
def __call__(self):
# tiny signal
self.scrub(np.ones(1, self.x_dtype))
# ones
self.scrub(np.ones(10, self.x_dtype)) # ones
# randn
x = self.rng.randn(10).astype(self.x_dtype)
if self.x_dtype in (np.complex64, np.complex128):
x += 1j * self.rng.randn(10)
self.scrub(x)
# ramp
self.scrub(np.arange(10).astype(self.x_dtype))
# 3D, random
size = (2, 3, 5)
x = self.rng.randn(*size).astype(self.x_dtype)
if self.x_dtype in (np.complex64, np.complex128):
x += 1j * self.rng.randn(*size)
for axis in range(len(size)):
self.scrub(x, axis=axis)
x = x[:, ::2, 1::3].T
for axis in range(len(size)):
self.scrub(x, axis=axis)
def scrub(self, x, axis=-1):
yr = np.apply_along_axis(upfirdn_naive, axis, x,
self.h, self.up, self.down)
want_len = _output_len(len(self.h), x.shape[axis], self.up, self.down)
assert yr.shape[axis] == want_len
y = upfirdn(self.h, x, self.up, self.down, axis=axis)
assert y.shape[axis] == want_len
assert y.shape == yr.shape
dtypes = (self.h.dtype, x.dtype)
if all(d == np.complex64 for d in dtypes):
assert_equal(y.dtype, np.complex64)
elif np.complex64 in dtypes and np.float32 in dtypes:
assert_equal(y.dtype, np.complex64)
elif all(d == np.float32 for d in dtypes):
assert_equal(y.dtype, np.float32)
elif np.complex128 in dtypes or np.complex64 in dtypes:
assert_equal(y.dtype, np.complex128)
else:
assert_equal(y.dtype, np.float64)
assert_allclose(yr, y)
_UPFIRDN_TYPES = (int, np.float32, np.complex64, float, complex)
class TestUpfirdn(object):
def test_valid_input(self):
assert_raises(ValueError, upfirdn, [1], [1], 1, 0) # up or down < 1
assert_raises(ValueError, upfirdn, [], [1], 1, 1) # h.ndim != 1
assert_raises(ValueError, upfirdn, [[1]], [1], 1, 1)
@pytest.mark.parametrize('len_h', [1, 2, 3, 4, 5])
@pytest.mark.parametrize('len_x', [1, 2, 3, 4, 5])
def test_singleton(self, len_h, len_x):
# gh-9844: lengths producing expected outputs
h = np.zeros(len_h)
h[len_h // 2] = 1. # make h a delta
x = np.ones(len_x)
y = upfirdn(h, x, 1, 1)
want = np.pad(x, (len_h // 2, (len_h - 1) // 2), 'constant')
assert_allclose(y, want)
def test_shift_x(self):
# gh-9844: shifted x can change values?
y = upfirdn([1, 1], [1.], 1, 1)
assert_allclose(y, [1, 1]) # was [0, 1] in the issue
y = upfirdn([1, 1], [0., 1.], 1, 1)
assert_allclose(y, [0, 1, 1])
# A bunch of lengths/factors chosen because they exposed differences
# between the "old way" and new way of computing length, and then
# got `expected` from MATLAB
@pytest.mark.parametrize('len_h, len_x, up, down, expected', [
(2, 2, 5, 2, [1, 0, 0, 0]),
(2, 3, 6, 3, [1, 0, 1, 0, 1]),
(2, 4, 4, 3, [1, 0, 0, 0, 1]),
(3, 2, 6, 2, [1, 0, 0, 1, 0]),
(4, 11, 3, 5, [1, 0, 0, 1, 0, 0, 1]),
])
def test_length_factors(self, len_h, len_x, up, down, expected):
# gh-9844: weird factors
h = np.zeros(len_h)
h[0] = 1.
x = np.ones(len_x)
y = upfirdn(h, x, up, down)
assert_allclose(y, expected)
@pytest.mark.parametrize('down, want_len', [ # lengths from MATLAB
(2, 5015),
(11, 912),
(79, 127),
])
def test_vs_convolve(self, down, want_len):
# Check that up=1.0 gives same answer as convolve + slicing
random_state = np.random.RandomState(17)
try_types = (int, np.float32, np.complex64, float, complex)
size = 10000
for dtype in try_types:
x = random_state.randn(size).astype(dtype)
if dtype in (np.complex64, np.complex128):
x += 1j * random_state.randn(size)
h = firwin(31, 1. / down, window='hamming')
yl = upfirdn_naive(x, h, 1, down)
y = upfirdn(h, x, up=1, down=down)
assert y.shape == (want_len,)
assert yl.shape[0] == y.shape[0]
assert_allclose(yl, y, atol=1e-7, rtol=1e-7)
@pytest.mark.parametrize('x_dtype', _UPFIRDN_TYPES)
@pytest.mark.parametrize('h', (1., 1j))
@pytest.mark.parametrize('up, down', [(1, 1), (2, 2), (3, 2), (2, 3)])
def test_vs_naive_delta(self, x_dtype, h, up, down):
UpFIRDnCase(up, down, h, x_dtype)()
@pytest.mark.parametrize('x_dtype', _UPFIRDN_TYPES)
@pytest.mark.parametrize('h_dtype', _UPFIRDN_TYPES)
@pytest.mark.parametrize('p_max, q_max',
list(product((10, 100), (10, 100))))
def test_vs_naive(self, x_dtype, h_dtype, p_max, q_max):
tests = self._random_factors(p_max, q_max, h_dtype, x_dtype)
for test in tests:
test()
def _random_factors(self, p_max, q_max, h_dtype, x_dtype):
n_rep = 3
longest_h = 25
random_state = np.random.RandomState(17)
tests = []
for _ in range(n_rep):
# Randomize the up/down factors somewhat
p_add = q_max if p_max > q_max else 1
q_add = p_max if q_max > p_max else 1
p = random_state.randint(p_max) + p_add
q = random_state.randint(q_max) + q_add
# Generate random FIR coefficients
len_h = random_state.randint(longest_h) + 1
h = np.atleast_1d(random_state.randint(len_h))
h = h.astype(h_dtype)
if h_dtype == complex:
h += 1j * random_state.randint(len_h)
tests.append(UpFIRDnCase(p, q, h, x_dtype))
return tests
@pytest.mark.parametrize('mode', _upfirdn_modes)
def test_extensions(self, mode):
"""Test vs. manually computed results for modes not in numpy's pad."""
x = np.array([1, 2, 3, 1], dtype=float)
npre, npost = 6, 6
y = _pad_test(x, npre=npre, npost=npost, mode=mode)
if mode == 'antisymmetric':
y_expected = np.asarray(
[3, 1, -1, -3, -2, -1, 1, 2, 3, 1, -1, -3, -2, -1, 1, 2])
elif mode == 'antireflect':
y_expected = np.asarray(
[1, 2, 3, 1, -1, 0, 1, 2, 3, 1, -1, 0, 1, 2, 3, 1])
elif mode == 'smooth':
y_expected = np.asarray(
[-5, -4, -3, -2, -1, 0, 1, 2, 3, 1, -1, -3, -5, -7, -9, -11])
elif mode == "line":
lin_slope = (x[-1] - x[0]) / (len(x) - 1)
left = x[0] + np.arange(-npre, 0, 1) * lin_slope
right = x[-1] + np.arange(1, npost + 1) * lin_slope
y_expected = np.concatenate((left, x, right))
else:
y_expected = np.pad(x, (npre, npost), mode=mode)
assert_allclose(y, y_expected)
@pytest.mark.parametrize(
'size, h_len, mode, dtype',
product(
[8],
[4, 5, 26], # include cases with h_len > 2*size
_upfirdn_modes,
[np.float32, np.float64, np.complex64, np.complex128],
)
)
def test_modes(self, size, h_len, mode, dtype):
random_state = np.random.RandomState(5)
x = random_state.randn(size).astype(dtype)
if dtype in (np.complex64, np.complex128):
x += 1j * random_state.randn(size)
h = np.arange(1, 1 + h_len, dtype=x.real.dtype)
y = upfirdn(h, x, up=1, down=1, mode=mode)
# expected result: pad the input, filter with zero padding, then crop
npad = h_len - 1
if mode in ['antisymmetric', 'antireflect', 'smooth', 'line']:
# use _pad_test test function for modes not supported by np.pad.
xpad = _pad_test(x, npre=npad, npost=npad, mode=mode)
else:
xpad = np.pad(x, npad, mode=mode)
ypad = upfirdn(h, xpad, up=1, down=1, mode='constant')
y_expected = ypad[npad:-npad]
atol = rtol = np.finfo(dtype).eps * 1e2
assert_allclose(y, y_expected, atol=atol, rtol=rtol)

View file

@ -0,0 +1,351 @@
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal,
assert_, assert_allclose, assert_array_equal)
from pytest import raises as assert_raises
import scipy.signal.waveforms as waveforms
# These chirp_* functions are the instantaneous frequencies of the signals
# returned by chirp().
def chirp_linear(t, f0, f1, t1):
f = f0 + (f1 - f0) * t / t1
return f
def chirp_quadratic(t, f0, f1, t1, vertex_zero=True):
if vertex_zero:
f = f0 + (f1 - f0) * t**2 / t1**2
else:
f = f1 - (f1 - f0) * (t1 - t)**2 / t1**2
return f
def chirp_geometric(t, f0, f1, t1):
f = f0 * (f1/f0)**(t/t1)
return f
def chirp_hyperbolic(t, f0, f1, t1):
f = f0*f1*t1 / ((f0 - f1)*t + f1*t1)
return f
def compute_frequency(t, theta):
"""
Compute theta'(t)/(2*pi), where theta'(t) is the derivative of theta(t).
"""
# Assume theta and t are 1-D NumPy arrays.
# Assume that t is uniformly spaced.
dt = t[1] - t[0]
f = np.diff(theta)/(2*np.pi) / dt
tf = 0.5*(t[1:] + t[:-1])
return tf, f
class TestChirp(object):
def test_linear_at_zero(self):
w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='linear')
assert_almost_equal(w, 1.0)
def test_linear_freq_01(self):
method = 'linear'
f0 = 1.0
f1 = 2.0
t1 = 1.0
t = np.linspace(0, t1, 100)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_linear(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_linear_freq_02(self):
method = 'linear'
f0 = 200.0
f1 = 100.0
t1 = 10.0
t = np.linspace(0, t1, 100)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_linear(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_quadratic_at_zero(self):
w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='quadratic')
assert_almost_equal(w, 1.0)
def test_quadratic_at_zero2(self):
w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='quadratic',
vertex_zero=False)
assert_almost_equal(w, 1.0)
def test_quadratic_freq_01(self):
method = 'quadratic'
f0 = 1.0
f1 = 2.0
t1 = 1.0
t = np.linspace(0, t1, 2000)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_quadratic(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_quadratic_freq_02(self):
method = 'quadratic'
f0 = 20.0
f1 = 10.0
t1 = 10.0
t = np.linspace(0, t1, 2000)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_quadratic(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_logarithmic_at_zero(self):
w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='logarithmic')
assert_almost_equal(w, 1.0)
def test_logarithmic_freq_01(self):
method = 'logarithmic'
f0 = 1.0
f1 = 2.0
t1 = 1.0
t = np.linspace(0, t1, 10000)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_logarithmic_freq_02(self):
method = 'logarithmic'
f0 = 200.0
f1 = 100.0
t1 = 10.0
t = np.linspace(0, t1, 10000)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_logarithmic_freq_03(self):
method = 'logarithmic'
f0 = 100.0
f1 = 100.0
t1 = 10.0
t = np.linspace(0, t1, 10000)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_hyperbolic_at_zero(self):
w = waveforms.chirp(t=0, f0=10.0, f1=1.0, t1=1.0, method='hyperbolic')
assert_almost_equal(w, 1.0)
def test_hyperbolic_freq_01(self):
method = 'hyperbolic'
t1 = 1.0
t = np.linspace(0, t1, 10000)
# f0 f1
cases = [[10.0, 1.0],
[1.0, 10.0],
[-10.0, -1.0],
[-1.0, -10.0]]
for f0, f1 in cases:
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
expected = chirp_hyperbolic(tf, f0, f1, t1)
assert_allclose(f, expected)
def test_hyperbolic_zero_freq(self):
# f0=0 or f1=0 must raise a ValueError.
method = 'hyperbolic'
t1 = 1.0
t = np.linspace(0, t1, 5)
assert_raises(ValueError, waveforms.chirp, t, 0, t1, 1, method)
assert_raises(ValueError, waveforms.chirp, t, 1, t1, 0, method)
def test_unknown_method(self):
method = "foo"
f0 = 10.0
f1 = 20.0
t1 = 1.0
t = np.linspace(0, t1, 10)
assert_raises(ValueError, waveforms.chirp, t, f0, t1, f1, method)
def test_integer_t1(self):
f0 = 10.0
f1 = 20.0
t = np.linspace(-1, 1, 11)
t1 = 3.0
float_result = waveforms.chirp(t, f0, t1, f1)
t1 = 3
int_result = waveforms.chirp(t, f0, t1, f1)
err_msg = "Integer input 't1=3' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
def test_integer_f0(self):
f1 = 20.0
t1 = 3.0
t = np.linspace(-1, 1, 11)
f0 = 10.0
float_result = waveforms.chirp(t, f0, t1, f1)
f0 = 10
int_result = waveforms.chirp(t, f0, t1, f1)
err_msg = "Integer input 'f0=10' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
def test_integer_f1(self):
f0 = 10.0
t1 = 3.0
t = np.linspace(-1, 1, 11)
f1 = 20.0
float_result = waveforms.chirp(t, f0, t1, f1)
f1 = 20
int_result = waveforms.chirp(t, f0, t1, f1)
err_msg = "Integer input 'f1=20' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
def test_integer_all(self):
f0 = 10
t1 = 3
f1 = 20
t = np.linspace(-1, 1, 11)
float_result = waveforms.chirp(t, float(f0), float(t1), float(f1))
int_result = waveforms.chirp(t, f0, t1, f1)
err_msg = "Integer input 'f0=10, t1=3, f1=20' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
class TestSweepPoly(object):
def test_sweep_poly_quad1(self):
p = np.poly1d([1.0, 0.0, 1.0])
t = np.linspace(0, 3.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = p(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
def test_sweep_poly_const(self):
p = np.poly1d(2.0)
t = np.linspace(0, 3.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = p(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
def test_sweep_poly_linear(self):
p = np.poly1d([-1.0, 10.0])
t = np.linspace(0, 3.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = p(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
def test_sweep_poly_quad2(self):
p = np.poly1d([1.0, 0.0, -2.0])
t = np.linspace(0, 3.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = p(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
def test_sweep_poly_cubic(self):
p = np.poly1d([2.0, 1.0, 0.0, -2.0])
t = np.linspace(0, 2.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = p(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
def test_sweep_poly_cubic2(self):
"""Use an array of coefficients instead of a poly1d."""
p = np.array([2.0, 1.0, 0.0, -2.0])
t = np.linspace(0, 2.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = np.poly1d(p)(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
def test_sweep_poly_cubic3(self):
"""Use a list of coefficients instead of a poly1d."""
p = [2.0, 1.0, 0.0, -2.0]
t = np.linspace(0, 2.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = np.poly1d(p)(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
class TestGaussPulse(object):
def test_integer_fc(self):
float_result = waveforms.gausspulse('cutoff', fc=1000.0)
int_result = waveforms.gausspulse('cutoff', fc=1000)
err_msg = "Integer input 'fc=1000' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
def test_integer_bw(self):
float_result = waveforms.gausspulse('cutoff', bw=1.0)
int_result = waveforms.gausspulse('cutoff', bw=1)
err_msg = "Integer input 'bw=1' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
def test_integer_bwr(self):
float_result = waveforms.gausspulse('cutoff', bwr=-6.0)
int_result = waveforms.gausspulse('cutoff', bwr=-6)
err_msg = "Integer input 'bwr=-6' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
def test_integer_tpr(self):
float_result = waveforms.gausspulse('cutoff', tpr=-60.0)
int_result = waveforms.gausspulse('cutoff', tpr=-60)
err_msg = "Integer input 'tpr=-60' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
class TestUnitImpulse(object):
def test_no_index(self):
assert_array_equal(waveforms.unit_impulse(7), [1, 0, 0, 0, 0, 0, 0])
assert_array_equal(waveforms.unit_impulse((3, 3)),
[[1, 0, 0], [0, 0, 0], [0, 0, 0]])
def test_index(self):
assert_array_equal(waveforms.unit_impulse(10, 3),
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0])
assert_array_equal(waveforms.unit_impulse((3, 3), (1, 1)),
[[0, 0, 0], [0, 1, 0], [0, 0, 0]])
# Broadcasting
imp = waveforms.unit_impulse((4, 4), 2)
assert_array_equal(imp, np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 0]]))
def test_mid(self):
assert_array_equal(waveforms.unit_impulse((3, 3), 'mid'),
[[0, 0, 0], [0, 1, 0], [0, 0, 0]])
assert_array_equal(waveforms.unit_impulse(9, 'mid'),
[0, 0, 0, 0, 1, 0, 0, 0, 0])
def test_dtype(self):
imp = waveforms.unit_impulse(7)
assert_(np.issubdtype(imp.dtype, np.floating))
imp = waveforms.unit_impulse(5, 3, dtype=int)
assert_(np.issubdtype(imp.dtype, np.integer))
imp = waveforms.unit_impulse((5, 2), (3, 1), dtype=complex)
assert_(np.issubdtype(imp.dtype, np.complexfloating))

View file

@ -0,0 +1,152 @@
import numpy as np
from numpy.testing import assert_equal, \
assert_array_equal, assert_array_almost_equal, assert_array_less, assert_
from scipy.signal import wavelets
class TestWavelets(object):
def test_qmf(self):
assert_array_equal(wavelets.qmf([1, 1]), [1, -1])
def test_daub(self):
for i in range(1, 15):
assert_equal(len(wavelets.daub(i)), i * 2)
def test_cascade(self):
for J in range(1, 7):
for i in range(1, 5):
lpcoef = wavelets.daub(i)
k = len(lpcoef)
x, phi, psi = wavelets.cascade(lpcoef, J)
assert_(len(x) == len(phi) == len(psi))
assert_equal(len(x), (k - 1) * 2 ** J)
def test_morlet(self):
x = wavelets.morlet(50, 4.1, complete=True)
y = wavelets.morlet(50, 4.1, complete=False)
# Test if complete and incomplete wavelet have same lengths:
assert_equal(len(x), len(y))
# Test if complete wavelet is less than incomplete wavelet:
assert_array_less(x, y)
x = wavelets.morlet(10, 50, complete=False)
y = wavelets.morlet(10, 50, complete=True)
# For large widths complete and incomplete wavelets should be
# identical within numerical precision:
assert_equal(x, y)
# miscellaneous tests:
x = np.array([1.73752399e-09 + 9.84327394e-25j,
6.49471756e-01 + 0.00000000e+00j,
1.73752399e-09 - 9.84327394e-25j])
y = wavelets.morlet(3, w=2, complete=True)
assert_array_almost_equal(x, y)
x = np.array([2.00947715e-09 + 9.84327394e-25j,
7.51125544e-01 + 0.00000000e+00j,
2.00947715e-09 - 9.84327394e-25j])
y = wavelets.morlet(3, w=2, complete=False)
assert_array_almost_equal(x, y, decimal=2)
x = wavelets.morlet(10000, s=4, complete=True)
y = wavelets.morlet(20000, s=8, complete=True)[5000:15000]
assert_array_almost_equal(x, y, decimal=2)
x = wavelets.morlet(10000, s=4, complete=False)
assert_array_almost_equal(y, x, decimal=2)
y = wavelets.morlet(20000, s=8, complete=False)[5000:15000]
assert_array_almost_equal(x, y, decimal=2)
x = wavelets.morlet(10000, w=3, s=5, complete=True)
y = wavelets.morlet(20000, w=3, s=10, complete=True)[5000:15000]
assert_array_almost_equal(x, y, decimal=2)
x = wavelets.morlet(10000, w=3, s=5, complete=False)
assert_array_almost_equal(y, x, decimal=2)
y = wavelets.morlet(20000, w=3, s=10, complete=False)[5000:15000]
assert_array_almost_equal(x, y, decimal=2)
x = wavelets.morlet(10000, w=7, s=10, complete=True)
y = wavelets.morlet(20000, w=7, s=20, complete=True)[5000:15000]
assert_array_almost_equal(x, y, decimal=2)
x = wavelets.morlet(10000, w=7, s=10, complete=False)
assert_array_almost_equal(x, y, decimal=2)
y = wavelets.morlet(20000, w=7, s=20, complete=False)[5000:15000]
assert_array_almost_equal(x, y, decimal=2)
def test_morlet2(self):
w = wavelets.morlet2(1.0, 0.5)
expected = (np.pi**(-0.25) * np.sqrt(1/0.5)).astype(complex)
assert_array_equal(w, expected)
lengths = [5, 11, 15, 51, 101]
for length in lengths:
w = wavelets.morlet2(length, 1.0)
assert_(len(w) == length)
max_loc = np.argmax(w)
assert_(max_loc == (length // 2))
points = 100
w = abs(wavelets.morlet2(points, 2.0))
half_vec = np.arange(0, points // 2)
assert_array_almost_equal(w[half_vec], w[-(half_vec + 1)])
x = np.array([5.03701224e-09 + 2.46742437e-24j,
1.88279253e+00 + 0.00000000e+00j,
5.03701224e-09 - 2.46742437e-24j])
y = wavelets.morlet2(3, s=1/(2*np.pi), w=2)
assert_array_almost_equal(x, y)
def test_ricker(self):
w = wavelets.ricker(1.0, 1)
expected = 2 / (np.sqrt(3 * 1.0) * (np.pi ** 0.25))
assert_array_equal(w, expected)
lengths = [5, 11, 15, 51, 101]
for length in lengths:
w = wavelets.ricker(length, 1.0)
assert_(len(w) == length)
max_loc = np.argmax(w)
assert_(max_loc == (length // 2))
points = 100
w = wavelets.ricker(points, 2.0)
half_vec = np.arange(0, points // 2)
#Wavelet should be symmetric
assert_array_almost_equal(w[half_vec], w[-(half_vec + 1)])
#Check zeros
aas = [5, 10, 15, 20, 30]
points = 99
for a in aas:
w = wavelets.ricker(points, a)
vec = np.arange(0, points) - (points - 1.0) / 2
exp_zero1 = np.argmin(np.abs(vec - a))
exp_zero2 = np.argmin(np.abs(vec + a))
assert_array_almost_equal(w[exp_zero1], 0)
assert_array_almost_equal(w[exp_zero2], 0)
def test_cwt(self):
widths = [1.0]
delta_wavelet = lambda s, t: np.array([1])
len_data = 100
test_data = np.sin(np.pi * np.arange(0, len_data) / 10.0)
#Test delta function input gives same data as output
cwt_dat = wavelets.cwt(test_data, delta_wavelet, widths)
assert_(cwt_dat.shape == (len(widths), len_data))
assert_array_almost_equal(test_data, cwt_dat.flatten())
#Check proper shape on output
widths = [1, 3, 4, 5, 10]
cwt_dat = wavelets.cwt(test_data, wavelets.ricker, widths)
assert_(cwt_dat.shape == (len(widths), len_data))
widths = [len_data * 10]
#Note: this wavelet isn't defined quite right, but is fine for this test
flat_wavelet = lambda l, w: np.full(w, 1 / w)
cwt_dat = wavelets.cwt(test_data, flat_wavelet, widths)
assert_array_almost_equal(cwt_dat, np.mean(test_data))

File diff suppressed because one or more lines are too long