Fixed database typo and removed unnecessary class identifier.
This commit is contained in:
parent
00ad49a143
commit
45fb349a7d
5098 changed files with 952558 additions and 85 deletions
361
venv/Lib/site-packages/scipy/signal/__init__.py
Normal file
361
venv/Lib/site-packages/scipy/signal/__init__.py
Normal file
|
@ -0,0 +1,361 @@
|
|||
"""
|
||||
=======================================
|
||||
Signal processing (:mod:`scipy.signal`)
|
||||
=======================================
|
||||
|
||||
Convolution
|
||||
===========
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
convolve -- N-D convolution.
|
||||
correlate -- N-D correlation.
|
||||
fftconvolve -- N-D convolution using the FFT.
|
||||
oaconvolve -- N-D convolution using the overlap-add method.
|
||||
convolve2d -- 2-D convolution (more options).
|
||||
correlate2d -- 2-D correlation (more options).
|
||||
sepfir2d -- Convolve with a 2-D separable FIR filter.
|
||||
choose_conv_method -- Chooses faster of FFT and direct convolution methods.
|
||||
|
||||
B-splines
|
||||
=========
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
bspline -- B-spline basis function of order n.
|
||||
cubic -- B-spline basis function of order 3.
|
||||
quadratic -- B-spline basis function of order 2.
|
||||
gauss_spline -- Gaussian approximation to the B-spline basis function.
|
||||
cspline1d -- Coefficients for 1-D cubic (3rd order) B-spline.
|
||||
qspline1d -- Coefficients for 1-D quadratic (2nd order) B-spline.
|
||||
cspline2d -- Coefficients for 2-D cubic (3rd order) B-spline.
|
||||
qspline2d -- Coefficients for 2-D quadratic (2nd order) B-spline.
|
||||
cspline1d_eval -- Evaluate a cubic spline at the given points.
|
||||
qspline1d_eval -- Evaluate a quadratic spline at the given points.
|
||||
spline_filter -- Smoothing spline (cubic) filtering of a rank-2 array.
|
||||
|
||||
Filtering
|
||||
=========
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
order_filter -- N-D order filter.
|
||||
medfilt -- N-D median filter.
|
||||
medfilt2d -- 2-D median filter (faster).
|
||||
wiener -- N-D Wiener filter.
|
||||
|
||||
symiirorder1 -- 2nd-order IIR filter (cascade of first-order systems).
|
||||
symiirorder2 -- 4th-order IIR filter (cascade of second-order systems).
|
||||
lfilter -- 1-D FIR and IIR digital linear filtering.
|
||||
lfiltic -- Construct initial conditions for `lfilter`.
|
||||
lfilter_zi -- Compute an initial state zi for the lfilter function that
|
||||
-- corresponds to the steady state of the step response.
|
||||
filtfilt -- A forward-backward filter.
|
||||
savgol_filter -- Filter a signal using the Savitzky-Golay filter.
|
||||
|
||||
deconvolve -- 1-D deconvolution using lfilter.
|
||||
|
||||
sosfilt -- 1-D IIR digital linear filtering using
|
||||
-- a second-order sections filter representation.
|
||||
sosfilt_zi -- Compute an initial state zi for the sosfilt function that
|
||||
-- corresponds to the steady state of the step response.
|
||||
sosfiltfilt -- A forward-backward filter for second-order sections.
|
||||
hilbert -- Compute 1-D analytic signal, using the Hilbert transform.
|
||||
hilbert2 -- Compute 2-D analytic signal, using the Hilbert transform.
|
||||
|
||||
decimate -- Downsample a signal.
|
||||
detrend -- Remove linear and/or constant trends from data.
|
||||
resample -- Resample using Fourier method.
|
||||
resample_poly -- Resample using polyphase filtering method.
|
||||
upfirdn -- Upsample, apply FIR filter, downsample.
|
||||
|
||||
Filter design
|
||||
=============
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
bilinear -- Digital filter from an analog filter using
|
||||
-- the bilinear transform.
|
||||
bilinear_zpk -- Digital filter from an analog filter using
|
||||
-- the bilinear transform.
|
||||
findfreqs -- Find array of frequencies for computing filter response.
|
||||
firls -- FIR filter design using least-squares error minimization.
|
||||
firwin -- Windowed FIR filter design, with frequency response
|
||||
-- defined as pass and stop bands.
|
||||
firwin2 -- Windowed FIR filter design, with arbitrary frequency
|
||||
-- response.
|
||||
freqs -- Analog filter frequency response from TF coefficients.
|
||||
freqs_zpk -- Analog filter frequency response from ZPK coefficients.
|
||||
freqz -- Digital filter frequency response from TF coefficients.
|
||||
freqz_zpk -- Digital filter frequency response from ZPK coefficients.
|
||||
sosfreqz -- Digital filter frequency response for SOS format filter.
|
||||
group_delay -- Digital filter group delay.
|
||||
iirdesign -- IIR filter design given bands and gains.
|
||||
iirfilter -- IIR filter design given order and critical frequencies.
|
||||
kaiser_atten -- Compute the attenuation of a Kaiser FIR filter, given
|
||||
-- the number of taps and the transition width at
|
||||
-- discontinuities in the frequency response.
|
||||
kaiser_beta -- Compute the Kaiser parameter beta, given the desired
|
||||
-- FIR filter attenuation.
|
||||
kaiserord -- Design a Kaiser window to limit ripple and width of
|
||||
-- transition region.
|
||||
minimum_phase -- Convert a linear phase FIR filter to minimum phase.
|
||||
savgol_coeffs -- Compute the FIR filter coefficients for a Savitzky-Golay
|
||||
-- filter.
|
||||
remez -- Optimal FIR filter design.
|
||||
|
||||
unique_roots -- Unique roots and their multiplicities.
|
||||
residue -- Partial fraction expansion of b(s) / a(s).
|
||||
residuez -- Partial fraction expansion of b(z) / a(z).
|
||||
invres -- Inverse partial fraction expansion for analog filter.
|
||||
invresz -- Inverse partial fraction expansion for digital filter.
|
||||
BadCoefficients -- Warning on badly conditioned filter coefficients.
|
||||
|
||||
Lower-level filter design functions:
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
abcd_normalize -- Check state-space matrices and ensure they are rank-2.
|
||||
band_stop_obj -- Band Stop Objective Function for order minimization.
|
||||
besselap -- Return (z,p,k) for analog prototype of Bessel filter.
|
||||
buttap -- Return (z,p,k) for analog prototype of Butterworth filter.
|
||||
cheb1ap -- Return (z,p,k) for type I Chebyshev filter.
|
||||
cheb2ap -- Return (z,p,k) for type II Chebyshev filter.
|
||||
cmplx_sort -- Sort roots based on magnitude.
|
||||
ellipap -- Return (z,p,k) for analog prototype of elliptic filter.
|
||||
lp2bp -- Transform a lowpass filter prototype to a bandpass filter.
|
||||
lp2bp_zpk -- Transform a lowpass filter prototype to a bandpass filter.
|
||||
lp2bs -- Transform a lowpass filter prototype to a bandstop filter.
|
||||
lp2bs_zpk -- Transform a lowpass filter prototype to a bandstop filter.
|
||||
lp2hp -- Transform a lowpass filter prototype to a highpass filter.
|
||||
lp2hp_zpk -- Transform a lowpass filter prototype to a highpass filter.
|
||||
lp2lp -- Transform a lowpass filter prototype to a lowpass filter.
|
||||
lp2lp_zpk -- Transform a lowpass filter prototype to a lowpass filter.
|
||||
normalize -- Normalize polynomial representation of a transfer function.
|
||||
|
||||
|
||||
|
||||
Matlab-style IIR filter design
|
||||
==============================
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
butter -- Butterworth
|
||||
buttord
|
||||
cheby1 -- Chebyshev Type I
|
||||
cheb1ord
|
||||
cheby2 -- Chebyshev Type II
|
||||
cheb2ord
|
||||
ellip -- Elliptic (Cauer)
|
||||
ellipord
|
||||
bessel -- Bessel (no order selection available -- try butterod)
|
||||
iirnotch -- Design second-order IIR notch digital filter.
|
||||
iirpeak -- Design second-order IIR peak (resonant) digital filter.
|
||||
|
||||
Continuous-time linear systems
|
||||
==============================
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
lti -- Continuous-time linear time invariant system base class.
|
||||
StateSpace -- Linear time invariant system in state space form.
|
||||
TransferFunction -- Linear time invariant system in transfer function form.
|
||||
ZerosPolesGain -- Linear time invariant system in zeros, poles, gain form.
|
||||
lsim -- Continuous-time simulation of output to linear system.
|
||||
lsim2 -- Like lsim, but `scipy.integrate.odeint` is used.
|
||||
impulse -- Impulse response of linear, time-invariant (LTI) system.
|
||||
impulse2 -- Like impulse, but `scipy.integrate.odeint` is used.
|
||||
step -- Step response of continuous-time LTI system.
|
||||
step2 -- Like step, but `scipy.integrate.odeint` is used.
|
||||
freqresp -- Frequency response of a continuous-time LTI system.
|
||||
bode -- Bode magnitude and phase data (continuous-time LTI).
|
||||
|
||||
Discrete-time linear systems
|
||||
============================
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
dlti -- Discrete-time linear time invariant system base class.
|
||||
StateSpace -- Linear time invariant system in state space form.
|
||||
TransferFunction -- Linear time invariant system in transfer function form.
|
||||
ZerosPolesGain -- Linear time invariant system in zeros, poles, gain form.
|
||||
dlsim -- Simulation of output to a discrete-time linear system.
|
||||
dimpulse -- Impulse response of a discrete-time LTI system.
|
||||
dstep -- Step response of a discrete-time LTI system.
|
||||
dfreqresp -- Frequency response of a discrete-time LTI system.
|
||||
dbode -- Bode magnitude and phase data (discrete-time LTI).
|
||||
|
||||
LTI representations
|
||||
===================
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
tf2zpk -- Transfer function to zero-pole-gain.
|
||||
tf2sos -- Transfer function to second-order sections.
|
||||
tf2ss -- Transfer function to state-space.
|
||||
zpk2tf -- Zero-pole-gain to transfer function.
|
||||
zpk2sos -- Zero-pole-gain to second-order sections.
|
||||
zpk2ss -- Zero-pole-gain to state-space.
|
||||
ss2tf -- State-pace to transfer function.
|
||||
ss2zpk -- State-space to pole-zero-gain.
|
||||
sos2zpk -- Second-order sections to zero-pole-gain.
|
||||
sos2tf -- Second-order sections to transfer function.
|
||||
cont2discrete -- Continuous-time to discrete-time LTI conversion.
|
||||
place_poles -- Pole placement.
|
||||
|
||||
Waveforms
|
||||
=========
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
chirp -- Frequency swept cosine signal, with several freq functions.
|
||||
gausspulse -- Gaussian modulated sinusoid.
|
||||
max_len_seq -- Maximum length sequence.
|
||||
sawtooth -- Periodic sawtooth.
|
||||
square -- Square wave.
|
||||
sweep_poly -- Frequency swept cosine signal; freq is arbitrary polynomial.
|
||||
unit_impulse -- Discrete unit impulse.
|
||||
|
||||
Window functions
|
||||
================
|
||||
|
||||
For window functions, see the `scipy.signal.windows` namespace.
|
||||
|
||||
In the `scipy.signal` namespace, there is a convenience function to
|
||||
obtain these windows by name:
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
get_window -- Return a window of a given length and type.
|
||||
|
||||
Wavelets
|
||||
========
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
cascade -- Compute scaling function and wavelet from coefficients.
|
||||
daub -- Return low-pass.
|
||||
morlet -- Complex Morlet wavelet.
|
||||
qmf -- Return quadrature mirror filter from low-pass.
|
||||
ricker -- Return ricker wavelet.
|
||||
morlet2 -- Return Morlet wavelet, compatible with cwt.
|
||||
cwt -- Perform continuous wavelet transform.
|
||||
|
||||
Peak finding
|
||||
============
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
argrelmin -- Calculate the relative minima of data.
|
||||
argrelmax -- Calculate the relative maxima of data.
|
||||
argrelextrema -- Calculate the relative extrema of data.
|
||||
find_peaks -- Find a subset of peaks inside a signal.
|
||||
find_peaks_cwt -- Find peaks in a 1-D array with wavelet transformation.
|
||||
peak_prominences -- Calculate the prominence of each peak in a signal.
|
||||
peak_widths -- Calculate the width of each peak in a signal.
|
||||
|
||||
Spectral analysis
|
||||
=================
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
periodogram -- Compute a (modified) periodogram.
|
||||
welch -- Compute a periodogram using Welch's method.
|
||||
csd -- Compute the cross spectral density, using Welch's method.
|
||||
coherence -- Compute the magnitude squared coherence, using Welch's method.
|
||||
spectrogram -- Compute the spectrogram.
|
||||
lombscargle -- Computes the Lomb-Scargle periodogram.
|
||||
vectorstrength -- Computes the vector strength.
|
||||
stft -- Compute the Short Time Fourier Transform.
|
||||
istft -- Compute the Inverse Short Time Fourier Transform.
|
||||
check_COLA -- Check the COLA constraint for iSTFT reconstruction.
|
||||
check_NOLA -- Check the NOLA constraint for iSTFT reconstruction.
|
||||
|
||||
"""
|
||||
from . import sigtools, windows
|
||||
from .waveforms import *
|
||||
from ._max_len_seq import max_len_seq
|
||||
from ._upfirdn import upfirdn
|
||||
|
||||
# The spline module (a C extension) provides:
|
||||
# cspline2d, qspline2d, sepfir2d, symiirord1, symiirord2
|
||||
from .spline import *
|
||||
|
||||
from .bsplines import *
|
||||
from .filter_design import *
|
||||
from .fir_filter_design import *
|
||||
from .ltisys import *
|
||||
from .lti_conversion import *
|
||||
from .signaltools import *
|
||||
from ._savitzky_golay import savgol_coeffs, savgol_filter
|
||||
from .spectral import *
|
||||
from .wavelets import *
|
||||
from ._peak_finding import *
|
||||
from .windows import get_window # keep this one in signal namespace
|
||||
|
||||
|
||||
# deal with * -> windows.* doc-only soft-deprecation
|
||||
deprecated_windows = ('boxcar', 'triang', 'parzen', 'bohman', 'blackman',
|
||||
'nuttall', 'blackmanharris', 'flattop', 'bartlett',
|
||||
'barthann', 'hamming', 'kaiser', 'gaussian',
|
||||
'general_gaussian', 'chebwin', 'slepian', 'cosine',
|
||||
'hann', 'exponential', 'tukey')
|
||||
|
||||
# backward compatibility imports for actually deprecated windows not
|
||||
# in the above list
|
||||
from .windows import hanning
|
||||
|
||||
|
||||
def deco(name):
|
||||
f = getattr(windows, name)
|
||||
# Add deprecation to docstring
|
||||
|
||||
def wrapped(*args, **kwargs):
|
||||
return f(*args, **kwargs)
|
||||
|
||||
wrapped.__name__ = name
|
||||
wrapped.__module__ = 'scipy.signal'
|
||||
if hasattr(f, '__qualname__'):
|
||||
wrapped.__qualname__ = f.__qualname__
|
||||
|
||||
if f.__doc__:
|
||||
lines = f.__doc__.splitlines()
|
||||
for li, line in enumerate(lines):
|
||||
if line.strip() == 'Parameters':
|
||||
break
|
||||
else:
|
||||
raise RuntimeError('dev error: badly formatted doc')
|
||||
spacing = ' ' * line.find('P')
|
||||
lines.insert(li, ('{0}.. warning:: scipy.signal.{1} is deprecated,\n'
|
||||
'{0} use scipy.signal.windows.{1} '
|
||||
'instead.\n'.format(spacing, name)))
|
||||
wrapped.__doc__ = '\n'.join(lines)
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
for name in deprecated_windows:
|
||||
locals()[name] = deco(name)
|
||||
|
||||
del deprecated_windows, name, deco
|
||||
|
||||
|
||||
__all__ = [s for s in dir() if not s.startswith('_')]
|
||||
|
||||
from scipy._lib._testutils import PytestTester
|
||||
test = PytestTester(__name__)
|
||||
del PytestTester
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
241
venv/Lib/site-packages/scipy/signal/_arraytools.py
Normal file
241
venv/Lib/site-packages/scipy/signal/_arraytools.py
Normal file
|
@ -0,0 +1,241 @@
|
|||
"""
|
||||
Functions for acting on a axis of an array.
|
||||
"""
|
||||
import numpy as np
|
||||
|
||||
|
||||
def axis_slice(a, start=None, stop=None, step=None, axis=-1):
|
||||
"""Take a slice along axis 'axis' from 'a'.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
a : numpy.ndarray
|
||||
The array to be sliced.
|
||||
start, stop, step : int or None
|
||||
The slice parameters.
|
||||
axis : int, optional
|
||||
The axis of `a` to be sliced.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
|
||||
>>> axis_slice(a, start=0, stop=1, axis=1)
|
||||
array([[1],
|
||||
[4],
|
||||
[7]])
|
||||
>>> axis_slice(a, start=1, axis=0)
|
||||
array([[4, 5, 6],
|
||||
[7, 8, 9]])
|
||||
|
||||
Notes
|
||||
-----
|
||||
The keyword arguments start, stop and step are used by calling
|
||||
slice(start, stop, step). This implies axis_slice() does not
|
||||
handle its arguments the exactly the same as indexing. To select
|
||||
a single index k, for example, use
|
||||
axis_slice(a, start=k, stop=k+1)
|
||||
In this case, the length of the axis 'axis' in the result will
|
||||
be 1; the trivial dimension is not removed. (Use numpy.squeeze()
|
||||
to remove trivial axes.)
|
||||
"""
|
||||
a_slice = [slice(None)] * a.ndim
|
||||
a_slice[axis] = slice(start, stop, step)
|
||||
b = a[tuple(a_slice)]
|
||||
return b
|
||||
|
||||
|
||||
def axis_reverse(a, axis=-1):
|
||||
"""Reverse the 1-D slices of `a` along axis `axis`.
|
||||
|
||||
Returns axis_slice(a, step=-1, axis=axis).
|
||||
"""
|
||||
return axis_slice(a, step=-1, axis=axis)
|
||||
|
||||
|
||||
def odd_ext(x, n, axis=-1):
|
||||
"""
|
||||
Odd extension at the boundaries of an array
|
||||
|
||||
Generate a new ndarray by making an odd extension of `x` along an axis.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : ndarray
|
||||
The array to be extended.
|
||||
n : int
|
||||
The number of elements by which to extend `x` at each end of the axis.
|
||||
axis : int, optional
|
||||
The axis along which to extend `x`. Default is -1.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.signal._arraytools import odd_ext
|
||||
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
|
||||
>>> odd_ext(a, 2)
|
||||
array([[-1, 0, 1, 2, 3, 4, 5, 6, 7],
|
||||
[-4, -1, 0, 1, 4, 9, 16, 23, 28]])
|
||||
|
||||
Odd extension is a "180 degree rotation" at the endpoints of the original
|
||||
array:
|
||||
|
||||
>>> t = np.linspace(0, 1.5, 100)
|
||||
>>> a = 0.9 * np.sin(2 * np.pi * t**2)
|
||||
>>> b = odd_ext(a, 40)
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='odd extension')
|
||||
>>> plt.plot(arange(100), a, 'r', lw=2, label='original')
|
||||
>>> plt.legend(loc='best')
|
||||
>>> plt.show()
|
||||
"""
|
||||
if n < 1:
|
||||
return x
|
||||
if n > x.shape[axis] - 1:
|
||||
raise ValueError(("The extension length n (%d) is too big. " +
|
||||
"It must not exceed x.shape[axis]-1, which is %d.")
|
||||
% (n, x.shape[axis] - 1))
|
||||
left_end = axis_slice(x, start=0, stop=1, axis=axis)
|
||||
left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)
|
||||
right_end = axis_slice(x, start=-1, axis=axis)
|
||||
right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)
|
||||
ext = np.concatenate((2 * left_end - left_ext,
|
||||
x,
|
||||
2 * right_end - right_ext),
|
||||
axis=axis)
|
||||
return ext
|
||||
|
||||
|
||||
def even_ext(x, n, axis=-1):
|
||||
"""
|
||||
Even extension at the boundaries of an array
|
||||
|
||||
Generate a new ndarray by making an even extension of `x` along an axis.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : ndarray
|
||||
The array to be extended.
|
||||
n : int
|
||||
The number of elements by which to extend `x` at each end of the axis.
|
||||
axis : int, optional
|
||||
The axis along which to extend `x`. Default is -1.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.signal._arraytools import even_ext
|
||||
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
|
||||
>>> even_ext(a, 2)
|
||||
array([[ 3, 2, 1, 2, 3, 4, 5, 4, 3],
|
||||
[ 4, 1, 0, 1, 4, 9, 16, 9, 4]])
|
||||
|
||||
Even extension is a "mirror image" at the boundaries of the original array:
|
||||
|
||||
>>> t = np.linspace(0, 1.5, 100)
|
||||
>>> a = 0.9 * np.sin(2 * np.pi * t**2)
|
||||
>>> b = even_ext(a, 40)
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='even extension')
|
||||
>>> plt.plot(arange(100), a, 'r', lw=2, label='original')
|
||||
>>> plt.legend(loc='best')
|
||||
>>> plt.show()
|
||||
"""
|
||||
if n < 1:
|
||||
return x
|
||||
if n > x.shape[axis] - 1:
|
||||
raise ValueError(("The extension length n (%d) is too big. " +
|
||||
"It must not exceed x.shape[axis]-1, which is %d.")
|
||||
% (n, x.shape[axis] - 1))
|
||||
left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)
|
||||
right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)
|
||||
ext = np.concatenate((left_ext,
|
||||
x,
|
||||
right_ext),
|
||||
axis=axis)
|
||||
return ext
|
||||
|
||||
|
||||
def const_ext(x, n, axis=-1):
|
||||
"""
|
||||
Constant extension at the boundaries of an array
|
||||
|
||||
Generate a new ndarray that is a constant extension of `x` along an axis.
|
||||
|
||||
The extension repeats the values at the first and last element of
|
||||
the axis.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : ndarray
|
||||
The array to be extended.
|
||||
n : int
|
||||
The number of elements by which to extend `x` at each end of the axis.
|
||||
axis : int, optional
|
||||
The axis along which to extend `x`. Default is -1.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.signal._arraytools import const_ext
|
||||
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
|
||||
>>> const_ext(a, 2)
|
||||
array([[ 1, 1, 1, 2, 3, 4, 5, 5, 5],
|
||||
[ 0, 0, 0, 1, 4, 9, 16, 16, 16]])
|
||||
|
||||
Constant extension continues with the same values as the endpoints of the
|
||||
array:
|
||||
|
||||
>>> t = np.linspace(0, 1.5, 100)
|
||||
>>> a = 0.9 * np.sin(2 * np.pi * t**2)
|
||||
>>> b = const_ext(a, 40)
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='constant extension')
|
||||
>>> plt.plot(arange(100), a, 'r', lw=2, label='original')
|
||||
>>> plt.legend(loc='best')
|
||||
>>> plt.show()
|
||||
"""
|
||||
if n < 1:
|
||||
return x
|
||||
left_end = axis_slice(x, start=0, stop=1, axis=axis)
|
||||
ones_shape = [1] * x.ndim
|
||||
ones_shape[axis] = n
|
||||
ones = np.ones(ones_shape, dtype=x.dtype)
|
||||
left_ext = ones * left_end
|
||||
right_end = axis_slice(x, start=-1, axis=axis)
|
||||
right_ext = ones * right_end
|
||||
ext = np.concatenate((left_ext,
|
||||
x,
|
||||
right_ext),
|
||||
axis=axis)
|
||||
return ext
|
||||
|
||||
|
||||
def zero_ext(x, n, axis=-1):
|
||||
"""
|
||||
Zero padding at the boundaries of an array
|
||||
|
||||
Generate a new ndarray that is a zero-padded extension of `x` along
|
||||
an axis.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : ndarray
|
||||
The array to be extended.
|
||||
n : int
|
||||
The number of elements by which to extend `x` at each end of the
|
||||
axis.
|
||||
axis : int, optional
|
||||
The axis along which to extend `x`. Default is -1.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.signal._arraytools import zero_ext
|
||||
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
|
||||
>>> zero_ext(a, 2)
|
||||
array([[ 0, 0, 1, 2, 3, 4, 5, 0, 0],
|
||||
[ 0, 0, 0, 1, 4, 9, 16, 0, 0]])
|
||||
"""
|
||||
if n < 1:
|
||||
return x
|
||||
zeros_shape = list(x.shape)
|
||||
zeros_shape[axis] = n
|
||||
zeros = np.zeros(zeros_shape, dtype=x.dtype)
|
||||
ext = np.concatenate((zeros, x, zeros), axis=axis)
|
||||
return ext
|
137
venv/Lib/site-packages/scipy/signal/_max_len_seq.py
Normal file
137
venv/Lib/site-packages/scipy/signal/_max_len_seq.py
Normal file
|
@ -0,0 +1,137 @@
|
|||
# Author: Eric Larson
|
||||
# 2014
|
||||
|
||||
"""Tools for MLS generation"""
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ._max_len_seq_inner import _max_len_seq_inner
|
||||
|
||||
__all__ = ['max_len_seq']
|
||||
|
||||
|
||||
# These are definitions of linear shift register taps for use in max_len_seq()
|
||||
_mls_taps = {2: [1], 3: [2], 4: [3], 5: [3], 6: [5], 7: [6], 8: [7, 6, 1],
|
||||
9: [5], 10: [7], 11: [9], 12: [11, 10, 4], 13: [12, 11, 8],
|
||||
14: [13, 12, 2], 15: [14], 16: [15, 13, 4], 17: [14],
|
||||
18: [11], 19: [18, 17, 14], 20: [17], 21: [19], 22: [21],
|
||||
23: [18], 24: [23, 22, 17], 25: [22], 26: [25, 24, 20],
|
||||
27: [26, 25, 22], 28: [25], 29: [27], 30: [29, 28, 7],
|
||||
31: [28], 32: [31, 30, 10]}
|
||||
|
||||
def max_len_seq(nbits, state=None, length=None, taps=None):
|
||||
"""
|
||||
Maximum length sequence (MLS) generator.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
nbits : int
|
||||
Number of bits to use. Length of the resulting sequence will
|
||||
be ``(2**nbits) - 1``. Note that generating long sequences
|
||||
(e.g., greater than ``nbits == 16``) can take a long time.
|
||||
state : array_like, optional
|
||||
If array, must be of length ``nbits``, and will be cast to binary
|
||||
(bool) representation. If None, a seed of ones will be used,
|
||||
producing a repeatable representation. If ``state`` is all
|
||||
zeros, an error is raised as this is invalid. Default: None.
|
||||
length : int, optional
|
||||
Number of samples to compute. If None, the entire length
|
||||
``(2**nbits) - 1`` is computed.
|
||||
taps : array_like, optional
|
||||
Polynomial taps to use (e.g., ``[7, 6, 1]`` for an 8-bit sequence).
|
||||
If None, taps will be automatically selected (for up to
|
||||
``nbits == 32``).
|
||||
|
||||
Returns
|
||||
-------
|
||||
seq : array
|
||||
Resulting MLS sequence of 0's and 1's.
|
||||
state : array
|
||||
The final state of the shift register.
|
||||
|
||||
Notes
|
||||
-----
|
||||
The algorithm for MLS generation is generically described in:
|
||||
|
||||
https://en.wikipedia.org/wiki/Maximum_length_sequence
|
||||
|
||||
The default values for taps are specifically taken from the first
|
||||
option listed for each value of ``nbits`` in:
|
||||
|
||||
http://www.newwaveinstruments.com/resources/articles/m_sequence_linear_feedback_shift_register_lfsr.htm
|
||||
|
||||
.. versionadded:: 0.15.0
|
||||
|
||||
Examples
|
||||
--------
|
||||
MLS uses binary convention:
|
||||
|
||||
>>> from scipy.signal import max_len_seq
|
||||
>>> max_len_seq(4)[0]
|
||||
array([1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0], dtype=int8)
|
||||
|
||||
MLS has a white spectrum (except for DC):
|
||||
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> from numpy.fft import fft, ifft, fftshift, fftfreq
|
||||
>>> seq = max_len_seq(6)[0]*2-1 # +1 and -1
|
||||
>>> spec = fft(seq)
|
||||
>>> N = len(seq)
|
||||
>>> plt.plot(fftshift(fftfreq(N)), fftshift(np.abs(spec)), '.-')
|
||||
>>> plt.margins(0.1, 0.1)
|
||||
>>> plt.grid(True)
|
||||
>>> plt.show()
|
||||
|
||||
Circular autocorrelation of MLS is an impulse:
|
||||
|
||||
>>> acorrcirc = ifft(spec * np.conj(spec)).real
|
||||
>>> plt.figure()
|
||||
>>> plt.plot(np.arange(-N/2+1, N/2+1), fftshift(acorrcirc), '.-')
|
||||
>>> plt.margins(0.1, 0.1)
|
||||
>>> plt.grid(True)
|
||||
>>> plt.show()
|
||||
|
||||
Linear autocorrelation of MLS is approximately an impulse:
|
||||
|
||||
>>> acorr = np.correlate(seq, seq, 'full')
|
||||
>>> plt.figure()
|
||||
>>> plt.plot(np.arange(-N+1, N), acorr, '.-')
|
||||
>>> plt.margins(0.1, 0.1)
|
||||
>>> plt.grid(True)
|
||||
>>> plt.show()
|
||||
|
||||
"""
|
||||
if taps is None:
|
||||
if nbits not in _mls_taps:
|
||||
known_taps = np.array(list(_mls_taps.keys()))
|
||||
raise ValueError('nbits must be between %s and %s if taps is None'
|
||||
% (known_taps.min(), known_taps.max()))
|
||||
taps = np.array(_mls_taps[nbits], np.intp)
|
||||
else:
|
||||
taps = np.unique(np.array(taps, np.intp))[::-1]
|
||||
if np.any(taps < 0) or np.any(taps > nbits) or taps.size < 1:
|
||||
raise ValueError('taps must be non-empty with values between '
|
||||
'zero and nbits (inclusive)')
|
||||
taps = np.ascontiguousarray(taps) # needed for Cython
|
||||
n_max = (2**nbits) - 1
|
||||
if length is None:
|
||||
length = n_max
|
||||
else:
|
||||
length = int(length)
|
||||
if length < 0:
|
||||
raise ValueError('length must be greater than or equal to 0')
|
||||
# We use int8 instead of bool here because NumPy arrays of bools
|
||||
# don't seem to work nicely with Cython
|
||||
if state is None:
|
||||
state = np.ones(nbits, dtype=np.int8, order='c')
|
||||
else:
|
||||
# makes a copy if need be, ensuring it's 0's and 1's
|
||||
state = np.array(state, dtype=bool, order='c').astype(np.int8)
|
||||
if state.ndim != 1 or state.size != nbits:
|
||||
raise ValueError('state must be a 1-D array of size nbits')
|
||||
if np.all(state == 0):
|
||||
raise ValueError('state must not be all zeros')
|
||||
|
||||
seq = np.empty(length, dtype=np.int8, order='c')
|
||||
state = _max_len_seq_inner(taps, state, nbits, length, seq)
|
||||
return seq, state
|
Binary file not shown.
1299
venv/Lib/site-packages/scipy/signal/_peak_finding.py
Normal file
1299
venv/Lib/site-packages/scipy/signal/_peak_finding.py
Normal file
File diff suppressed because it is too large
Load diff
Binary file not shown.
351
venv/Lib/site-packages/scipy/signal/_savitzky_golay.py
Normal file
351
venv/Lib/site-packages/scipy/signal/_savitzky_golay.py
Normal file
|
@ -0,0 +1,351 @@
|
|||
import numpy as np
|
||||
from scipy.linalg import lstsq
|
||||
from math import factorial
|
||||
from scipy.ndimage import convolve1d
|
||||
from ._arraytools import axis_slice
|
||||
|
||||
|
||||
def savgol_coeffs(window_length, polyorder, deriv=0, delta=1.0, pos=None,
|
||||
use="conv"):
|
||||
"""Compute the coefficients for a 1-D Savitzky-Golay FIR filter.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
window_length : int
|
||||
The length of the filter window (i.e., the number of coefficients).
|
||||
`window_length` must be an odd positive integer.
|
||||
polyorder : int
|
||||
The order of the polynomial used to fit the samples.
|
||||
`polyorder` must be less than `window_length`.
|
||||
deriv : int, optional
|
||||
The order of the derivative to compute. This must be a
|
||||
nonnegative integer. The default is 0, which means to filter
|
||||
the data without differentiating.
|
||||
delta : float, optional
|
||||
The spacing of the samples to which the filter will be applied.
|
||||
This is only used if deriv > 0.
|
||||
pos : int or None, optional
|
||||
If pos is not None, it specifies evaluation position within the
|
||||
window. The default is the middle of the window.
|
||||
use : str, optional
|
||||
Either 'conv' or 'dot'. This argument chooses the order of the
|
||||
coefficients. The default is 'conv', which means that the
|
||||
coefficients are ordered to be used in a convolution. With
|
||||
use='dot', the order is reversed, so the filter is applied by
|
||||
dotting the coefficients with the data set.
|
||||
|
||||
Returns
|
||||
-------
|
||||
coeffs : 1-D ndarray
|
||||
The filter coefficients.
|
||||
|
||||
References
|
||||
----------
|
||||
A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of Data by
|
||||
Simplified Least Squares Procedures. Analytical Chemistry, 1964, 36 (8),
|
||||
pp 1627-1639.
|
||||
|
||||
See Also
|
||||
--------
|
||||
savgol_filter
|
||||
|
||||
Notes
|
||||
-----
|
||||
|
||||
.. versionadded:: 0.14.0
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.signal import savgol_coeffs
|
||||
>>> savgol_coeffs(5, 2)
|
||||
array([-0.08571429, 0.34285714, 0.48571429, 0.34285714, -0.08571429])
|
||||
>>> savgol_coeffs(5, 2, deriv=1)
|
||||
array([ 2.00000000e-01, 1.00000000e-01, 2.07548111e-16, -1.00000000e-01,
|
||||
-2.00000000e-01])
|
||||
|
||||
Note that use='dot' simply reverses the coefficients.
|
||||
|
||||
>>> savgol_coeffs(5, 2, pos=3)
|
||||
array([ 0.25714286, 0.37142857, 0.34285714, 0.17142857, -0.14285714])
|
||||
>>> savgol_coeffs(5, 2, pos=3, use='dot')
|
||||
array([-0.14285714, 0.17142857, 0.34285714, 0.37142857, 0.25714286])
|
||||
|
||||
`x` contains data from the parabola x = t**2, sampled at
|
||||
t = -1, 0, 1, 2, 3. `c` holds the coefficients that will compute the
|
||||
derivative at the last position. When dotted with `x` the result should
|
||||
be 6.
|
||||
|
||||
>>> x = np.array([1, 0, 1, 4, 9])
|
||||
>>> c = savgol_coeffs(5, 2, pos=4, deriv=1, use='dot')
|
||||
>>> c.dot(x)
|
||||
6.0
|
||||
"""
|
||||
|
||||
# An alternative method for finding the coefficients when deriv=0 is
|
||||
# t = np.arange(window_length)
|
||||
# unit = (t == pos).astype(int)
|
||||
# coeffs = np.polyval(np.polyfit(t, unit, polyorder), t)
|
||||
# The method implemented here is faster.
|
||||
|
||||
# To recreate the table of sample coefficients shown in the chapter on
|
||||
# the Savitzy-Golay filter in the Numerical Recipes book, use
|
||||
# window_length = nL + nR + 1
|
||||
# pos = nL + 1
|
||||
# c = savgol_coeffs(window_length, M, pos=pos, use='dot')
|
||||
|
||||
if polyorder >= window_length:
|
||||
raise ValueError("polyorder must be less than window_length.")
|
||||
|
||||
halflen, rem = divmod(window_length, 2)
|
||||
|
||||
if rem == 0:
|
||||
raise ValueError("window_length must be odd.")
|
||||
|
||||
if pos is None:
|
||||
pos = halflen
|
||||
|
||||
if not (0 <= pos < window_length):
|
||||
raise ValueError("pos must be nonnegative and less than "
|
||||
"window_length.")
|
||||
|
||||
if use not in ['conv', 'dot']:
|
||||
raise ValueError("`use` must be 'conv' or 'dot'")
|
||||
|
||||
if deriv > polyorder:
|
||||
coeffs = np.zeros(window_length)
|
||||
return coeffs
|
||||
|
||||
# Form the design matrix A. The columns of A are powers of the integers
|
||||
# from -pos to window_length - pos - 1. The powers (i.e., rows) range
|
||||
# from 0 to polyorder. (That is, A is a vandermonde matrix, but not
|
||||
# necessarily square.)
|
||||
x = np.arange(-pos, window_length - pos, dtype=float)
|
||||
if use == "conv":
|
||||
# Reverse so that result can be used in a convolution.
|
||||
x = x[::-1]
|
||||
|
||||
order = np.arange(polyorder + 1).reshape(-1, 1)
|
||||
A = x ** order
|
||||
|
||||
# y determines which order derivative is returned.
|
||||
y = np.zeros(polyorder + 1)
|
||||
# The coefficient assigned to y[deriv] scales the result to take into
|
||||
# account the order of the derivative and the sample spacing.
|
||||
y[deriv] = factorial(deriv) / (delta ** deriv)
|
||||
|
||||
# Find the least-squares solution of A*c = y
|
||||
coeffs, _, _, _ = lstsq(A, y)
|
||||
|
||||
return coeffs
|
||||
|
||||
|
||||
def _polyder(p, m):
|
||||
"""Differentiate polynomials represented with coefficients.
|
||||
|
||||
p must be a 1-D or 2-D array. In the 2-D case, each column gives
|
||||
the coefficients of a polynomial; the first row holds the coefficients
|
||||
associated with the highest power. m must be a nonnegative integer.
|
||||
(numpy.polyder doesn't handle the 2-D case.)
|
||||
"""
|
||||
|
||||
if m == 0:
|
||||
result = p
|
||||
else:
|
||||
n = len(p)
|
||||
if n <= m:
|
||||
result = np.zeros_like(p[:1, ...])
|
||||
else:
|
||||
dp = p[:-m].copy()
|
||||
for k in range(m):
|
||||
rng = np.arange(n - k - 1, m - k - 1, -1)
|
||||
dp *= rng.reshape((n - m,) + (1,) * (p.ndim - 1))
|
||||
result = dp
|
||||
return result
|
||||
|
||||
|
||||
def _fit_edge(x, window_start, window_stop, interp_start, interp_stop,
|
||||
axis, polyorder, deriv, delta, y):
|
||||
"""
|
||||
Given an N-d array `x` and the specification of a slice of `x` from
|
||||
`window_start` to `window_stop` along `axis`, create an interpolating
|
||||
polynomial of each 1-D slice, and evaluate that polynomial in the slice
|
||||
from `interp_start` to `interp_stop`. Put the result into the
|
||||
corresponding slice of `y`.
|
||||
"""
|
||||
|
||||
# Get the edge into a (window_length, -1) array.
|
||||
x_edge = axis_slice(x, start=window_start, stop=window_stop, axis=axis)
|
||||
if axis == 0 or axis == -x.ndim:
|
||||
xx_edge = x_edge
|
||||
swapped = False
|
||||
else:
|
||||
xx_edge = x_edge.swapaxes(axis, 0)
|
||||
swapped = True
|
||||
xx_edge = xx_edge.reshape(xx_edge.shape[0], -1)
|
||||
|
||||
# Fit the edges. poly_coeffs has shape (polyorder + 1, -1),
|
||||
# where '-1' is the same as in xx_edge.
|
||||
poly_coeffs = np.polyfit(np.arange(0, window_stop - window_start),
|
||||
xx_edge, polyorder)
|
||||
|
||||
if deriv > 0:
|
||||
poly_coeffs = _polyder(poly_coeffs, deriv)
|
||||
|
||||
# Compute the interpolated values for the edge.
|
||||
i = np.arange(interp_start - window_start, interp_stop - window_start)
|
||||
values = np.polyval(poly_coeffs, i.reshape(-1, 1)) / (delta ** deriv)
|
||||
|
||||
# Now put the values into the appropriate slice of y.
|
||||
# First reshape values to match y.
|
||||
shp = list(y.shape)
|
||||
shp[0], shp[axis] = shp[axis], shp[0]
|
||||
values = values.reshape(interp_stop - interp_start, *shp[1:])
|
||||
if swapped:
|
||||
values = values.swapaxes(0, axis)
|
||||
# Get a view of the data to be replaced by values.
|
||||
y_edge = axis_slice(y, start=interp_start, stop=interp_stop, axis=axis)
|
||||
y_edge[...] = values
|
||||
|
||||
|
||||
def _fit_edges_polyfit(x, window_length, polyorder, deriv, delta, axis, y):
|
||||
"""
|
||||
Use polynomial interpolation of x at the low and high ends of the axis
|
||||
to fill in the halflen values in y.
|
||||
|
||||
This function just calls _fit_edge twice, once for each end of the axis.
|
||||
"""
|
||||
halflen = window_length // 2
|
||||
_fit_edge(x, 0, window_length, 0, halflen, axis,
|
||||
polyorder, deriv, delta, y)
|
||||
n = x.shape[axis]
|
||||
_fit_edge(x, n - window_length, n, n - halflen, n, axis,
|
||||
polyorder, deriv, delta, y)
|
||||
|
||||
|
||||
def savgol_filter(x, window_length, polyorder, deriv=0, delta=1.0,
|
||||
axis=-1, mode='interp', cval=0.0):
|
||||
""" Apply a Savitzky-Golay filter to an array.
|
||||
|
||||
This is a 1-D filter. If `x` has dimension greater than 1, `axis`
|
||||
determines the axis along which the filter is applied.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array_like
|
||||
The data to be filtered. If `x` is not a single or double precision
|
||||
floating point array, it will be converted to type ``numpy.float64``
|
||||
before filtering.
|
||||
window_length : int
|
||||
The length of the filter window (i.e., the number of coefficients).
|
||||
`window_length` must be a positive odd integer. If `mode` is 'interp',
|
||||
`window_length` must be less than or equal to the size of `x`.
|
||||
polyorder : int
|
||||
The order of the polynomial used to fit the samples.
|
||||
`polyorder` must be less than `window_length`.
|
||||
deriv : int, optional
|
||||
The order of the derivative to compute. This must be a
|
||||
nonnegative integer. The default is 0, which means to filter
|
||||
the data without differentiating.
|
||||
delta : float, optional
|
||||
The spacing of the samples to which the filter will be applied.
|
||||
This is only used if deriv > 0. Default is 1.0.
|
||||
axis : int, optional
|
||||
The axis of the array `x` along which the filter is to be applied.
|
||||
Default is -1.
|
||||
mode : str, optional
|
||||
Must be 'mirror', 'constant', 'nearest', 'wrap' or 'interp'. This
|
||||
determines the type of extension to use for the padded signal to
|
||||
which the filter is applied. When `mode` is 'constant', the padding
|
||||
value is given by `cval`. See the Notes for more details on 'mirror',
|
||||
'constant', 'wrap', and 'nearest'.
|
||||
When the 'interp' mode is selected (the default), no extension
|
||||
is used. Instead, a degree `polyorder` polynomial is fit to the
|
||||
last `window_length` values of the edges, and this polynomial is
|
||||
used to evaluate the last `window_length // 2` output values.
|
||||
cval : scalar, optional
|
||||
Value to fill past the edges of the input if `mode` is 'constant'.
|
||||
Default is 0.0.
|
||||
|
||||
Returns
|
||||
-------
|
||||
y : ndarray, same shape as `x`
|
||||
The filtered data.
|
||||
|
||||
See Also
|
||||
--------
|
||||
savgol_coeffs
|
||||
|
||||
Notes
|
||||
-----
|
||||
Details on the `mode` options:
|
||||
|
||||
'mirror':
|
||||
Repeats the values at the edges in reverse order. The value
|
||||
closest to the edge is not included.
|
||||
'nearest':
|
||||
The extension contains the nearest input value.
|
||||
'constant':
|
||||
The extension contains the value given by the `cval` argument.
|
||||
'wrap':
|
||||
The extension contains the values from the other end of the array.
|
||||
|
||||
For example, if the input is [1, 2, 3, 4, 5, 6, 7, 8], and
|
||||
`window_length` is 7, the following shows the extended data for
|
||||
the various `mode` options (assuming `cval` is 0)::
|
||||
|
||||
mode | Ext | Input | Ext
|
||||
-----------+---------+------------------------+---------
|
||||
'mirror' | 4 3 2 | 1 2 3 4 5 6 7 8 | 7 6 5
|
||||
'nearest' | 1 1 1 | 1 2 3 4 5 6 7 8 | 8 8 8
|
||||
'constant' | 0 0 0 | 1 2 3 4 5 6 7 8 | 0 0 0
|
||||
'wrap' | 6 7 8 | 1 2 3 4 5 6 7 8 | 1 2 3
|
||||
|
||||
.. versionadded:: 0.14.0
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.signal import savgol_filter
|
||||
>>> np.set_printoptions(precision=2) # For compact display.
|
||||
>>> x = np.array([2, 2, 5, 2, 1, 0, 1, 4, 9])
|
||||
|
||||
Filter with a window length of 5 and a degree 2 polynomial. Use
|
||||
the defaults for all other parameters.
|
||||
|
||||
>>> savgol_filter(x, 5, 2)
|
||||
array([1.66, 3.17, 3.54, 2.86, 0.66, 0.17, 1. , 4. , 9. ])
|
||||
|
||||
Note that the last five values in x are samples of a parabola, so
|
||||
when mode='interp' (the default) is used with polyorder=2, the last
|
||||
three values are unchanged. Compare that to, for example,
|
||||
`mode='nearest'`:
|
||||
|
||||
>>> savgol_filter(x, 5, 2, mode='nearest')
|
||||
array([1.74, 3.03, 3.54, 2.86, 0.66, 0.17, 1. , 4.6 , 7.97])
|
||||
|
||||
"""
|
||||
if mode not in ["mirror", "constant", "nearest", "interp", "wrap"]:
|
||||
raise ValueError("mode must be 'mirror', 'constant', 'nearest' "
|
||||
"'wrap' or 'interp'.")
|
||||
|
||||
x = np.asarray(x)
|
||||
# Ensure that x is either single or double precision floating point.
|
||||
if x.dtype != np.float64 and x.dtype != np.float32:
|
||||
x = x.astype(np.float64)
|
||||
|
||||
coeffs = savgol_coeffs(window_length, polyorder, deriv=deriv, delta=delta)
|
||||
|
||||
if mode == "interp":
|
||||
if window_length > x.size:
|
||||
raise ValueError("If mode is 'interp', window_length must be less "
|
||||
"than or equal to the size of x.")
|
||||
|
||||
# Do not pad. Instead, for the elements within `window_length // 2`
|
||||
# of the ends of the sequence, use the polynomial that is fitted to
|
||||
# the last `window_length` elements.
|
||||
y = convolve1d(x, coeffs, axis=axis, mode="constant")
|
||||
_fit_edges_polyfit(x, window_length, polyorder, deriv, delta, axis, y)
|
||||
else:
|
||||
# Any mode other than 'interp' is passed on to ndimage.convolve1d.
|
||||
y = convolve1d(x, coeffs, axis=axis, mode=mode, cval=cval)
|
||||
|
||||
return y
|
BIN
venv/Lib/site-packages/scipy/signal/_sosfilt.cp36-win32.pyd
Normal file
BIN
venv/Lib/site-packages/scipy/signal/_sosfilt.cp36-win32.pyd
Normal file
Binary file not shown.
BIN
venv/Lib/site-packages/scipy/signal/_spectral.cp36-win32.pyd
Normal file
BIN
venv/Lib/site-packages/scipy/signal/_spectral.cp36-win32.pyd
Normal file
Binary file not shown.
215
venv/Lib/site-packages/scipy/signal/_upfirdn.py
Normal file
215
venv/Lib/site-packages/scipy/signal/_upfirdn.py
Normal file
|
@ -0,0 +1,215 @@
|
|||
# Code adapted from "upfirdn" python library with permission:
|
||||
#
|
||||
# Copyright (c) 2009, Motorola, Inc
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright notice,
|
||||
# this list of conditions and the following disclaimer.
|
||||
#
|
||||
# * Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution.
|
||||
#
|
||||
# * Neither the name of Motorola nor the names of its contributors may be
|
||||
# used to endorse or promote products derived from this software without
|
||||
# specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
||||
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ._upfirdn_apply import _output_len, _apply, mode_enum
|
||||
|
||||
__all__ = ['upfirdn', '_output_len']
|
||||
|
||||
_upfirdn_modes = [
|
||||
'constant', 'wrap', 'edge', 'smooth', 'symmetric', 'reflect',
|
||||
'antisymmetric', 'antireflect', 'line',
|
||||
]
|
||||
|
||||
|
||||
def _pad_h(h, up):
|
||||
"""Store coefficients in a transposed, flipped arrangement.
|
||||
|
||||
For example, suppose upRate is 3, and the
|
||||
input number of coefficients is 10, represented as h[0], ..., h[9].
|
||||
|
||||
Then the internal buffer will look like this::
|
||||
|
||||
h[9], h[6], h[3], h[0], // flipped phase 0 coefs
|
||||
0, h[7], h[4], h[1], // flipped phase 1 coefs (zero-padded)
|
||||
0, h[8], h[5], h[2], // flipped phase 2 coefs (zero-padded)
|
||||
|
||||
"""
|
||||
h_padlen = len(h) + (-len(h) % up)
|
||||
h_full = np.zeros(h_padlen, h.dtype)
|
||||
h_full[:len(h)] = h
|
||||
h_full = h_full.reshape(-1, up).T[:, ::-1].ravel()
|
||||
return h_full
|
||||
|
||||
|
||||
def _check_mode(mode):
|
||||
mode = mode.lower()
|
||||
enum = mode_enum(mode)
|
||||
return enum
|
||||
|
||||
|
||||
class _UpFIRDn(object):
|
||||
"""Helper for resampling."""
|
||||
|
||||
def __init__(self, h, x_dtype, up, down):
|
||||
h = np.asarray(h)
|
||||
if h.ndim != 1 or h.size == 0:
|
||||
raise ValueError('h must be 1-D with non-zero length')
|
||||
self._output_type = np.result_type(h.dtype, x_dtype, np.float32)
|
||||
h = np.asarray(h, self._output_type)
|
||||
self._up = int(up)
|
||||
self._down = int(down)
|
||||
if self._up < 1 or self._down < 1:
|
||||
raise ValueError('Both up and down must be >= 1')
|
||||
# This both transposes, and "flips" each phase for filtering
|
||||
self._h_trans_flip = _pad_h(h, self._up)
|
||||
self._h_trans_flip = np.ascontiguousarray(self._h_trans_flip)
|
||||
self._h_len_orig = len(h)
|
||||
|
||||
def apply_filter(self, x, axis=-1, mode='constant', cval=0):
|
||||
"""Apply the prepared filter to the specified axis of N-D signal x."""
|
||||
output_len = _output_len(self._h_len_orig, x.shape[axis],
|
||||
self._up, self._down)
|
||||
# Explicit use of np.int64 for output_shape dtype avoids OverflowError
|
||||
# when allocating large array on platforms where np.int_ is 32 bits
|
||||
output_shape = np.asarray(x.shape, dtype=np.int64)
|
||||
output_shape[axis] = output_len
|
||||
out = np.zeros(output_shape, dtype=self._output_type, order='C')
|
||||
axis = axis % x.ndim
|
||||
mode = _check_mode(mode)
|
||||
_apply(np.asarray(x, self._output_type),
|
||||
self._h_trans_flip, out,
|
||||
self._up, self._down, axis, mode, cval)
|
||||
return out
|
||||
|
||||
|
||||
def upfirdn(h, x, up=1, down=1, axis=-1, mode='constant', cval=0):
|
||||
"""Upsample, FIR filter, and downsample.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
h : array_like
|
||||
1-D FIR (finite-impulse response) filter coefficients.
|
||||
x : array_like
|
||||
Input signal array.
|
||||
up : int, optional
|
||||
Upsampling rate. Default is 1.
|
||||
down : int, optional
|
||||
Downsampling rate. Default is 1.
|
||||
axis : int, optional
|
||||
The axis of the input data array along which to apply the
|
||||
linear filter. The filter is applied to each subarray along
|
||||
this axis. Default is -1.
|
||||
mode : str, optional
|
||||
The signal extension mode to use. The set
|
||||
``{"constant", "symmetric", "reflect", "edge", "wrap"}`` correspond to
|
||||
modes provided by `numpy.pad`. ``"smooth"`` implements a smooth
|
||||
extension by extending based on the slope of the last 2 points at each
|
||||
end of the array. ``"antireflect"`` and ``"antisymmetric"`` are
|
||||
anti-symmetric versions of ``"reflect"`` and ``"symmetric"``. The mode
|
||||
`"line"` extends the signal based on a linear trend defined by the
|
||||
first and last points along the ``axis``.
|
||||
|
||||
.. versionadded:: 1.4.0
|
||||
cval : float, optional
|
||||
The constant value to use when ``mode == "constant"``.
|
||||
|
||||
.. versionadded:: 1.4.0
|
||||
|
||||
Returns
|
||||
-------
|
||||
y : ndarray
|
||||
The output signal array. Dimensions will be the same as `x` except
|
||||
for along `axis`, which will change size according to the `h`,
|
||||
`up`, and `down` parameters.
|
||||
|
||||
Notes
|
||||
-----
|
||||
The algorithm is an implementation of the block diagram shown on page 129
|
||||
of the Vaidyanathan text [1]_ (Figure 4.3-8d).
|
||||
|
||||
The direct approach of upsampling by factor of P with zero insertion,
|
||||
FIR filtering of length ``N``, and downsampling by factor of Q is
|
||||
O(N*Q) per output sample. The polyphase implementation used here is
|
||||
O(N/P).
|
||||
|
||||
.. versionadded:: 0.18
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] P. P. Vaidyanathan, Multirate Systems and Filter Banks,
|
||||
Prentice Hall, 1993.
|
||||
|
||||
Examples
|
||||
--------
|
||||
Simple operations:
|
||||
|
||||
>>> from scipy.signal import upfirdn
|
||||
>>> upfirdn([1, 1, 1], [1, 1, 1]) # FIR filter
|
||||
array([ 1., 2., 3., 2., 1.])
|
||||
>>> upfirdn([1], [1, 2, 3], 3) # upsampling with zeros insertion
|
||||
array([ 1., 0., 0., 2., 0., 0., 3., 0., 0.])
|
||||
>>> upfirdn([1, 1, 1], [1, 2, 3], 3) # upsampling with sample-and-hold
|
||||
array([ 1., 1., 1., 2., 2., 2., 3., 3., 3.])
|
||||
>>> upfirdn([.5, 1, .5], [1, 1, 1], 2) # linear interpolation
|
||||
array([ 0.5, 1. , 1. , 1. , 1. , 1. , 0.5, 0. ])
|
||||
>>> upfirdn([1], np.arange(10), 1, 3) # decimation by 3
|
||||
array([ 0., 3., 6., 9.])
|
||||
>>> upfirdn([.5, 1, .5], np.arange(10), 2, 3) # linear interp, rate 2/3
|
||||
array([ 0. , 1. , 2.5, 4. , 5.5, 7. , 8.5, 0. ])
|
||||
|
||||
Apply a single filter to multiple signals:
|
||||
|
||||
>>> x = np.reshape(np.arange(8), (4, 2))
|
||||
>>> x
|
||||
array([[0, 1],
|
||||
[2, 3],
|
||||
[4, 5],
|
||||
[6, 7]])
|
||||
|
||||
Apply along the last dimension of ``x``:
|
||||
|
||||
>>> h = [1, 1]
|
||||
>>> upfirdn(h, x, 2)
|
||||
array([[ 0., 0., 1., 1.],
|
||||
[ 2., 2., 3., 3.],
|
||||
[ 4., 4., 5., 5.],
|
||||
[ 6., 6., 7., 7.]])
|
||||
|
||||
Apply along the 0th dimension of ``x``:
|
||||
|
||||
>>> upfirdn(h, x, 2, axis=0)
|
||||
array([[ 0., 1.],
|
||||
[ 0., 1.],
|
||||
[ 2., 3.],
|
||||
[ 2., 3.],
|
||||
[ 4., 5.],
|
||||
[ 4., 5.],
|
||||
[ 6., 7.],
|
||||
[ 6., 7.]])
|
||||
"""
|
||||
x = np.asarray(x)
|
||||
ufd = _UpFIRDn(h, x.dtype, up, down)
|
||||
# This is equivalent to (but faster than) using np.apply_along_axis
|
||||
return ufd.apply_filter(x, axis, mode, cval)
|
Binary file not shown.
451
venv/Lib/site-packages/scipy/signal/bsplines.py
Normal file
451
venv/Lib/site-packages/scipy/signal/bsplines.py
Normal file
|
@ -0,0 +1,451 @@
|
|||
from numpy import (logical_and, asarray, pi, zeros_like,
|
||||
piecewise, array, arctan2, tan, zeros, arange, floor)
|
||||
from numpy.core.umath import (sqrt, exp, greater, less, cos, add, sin,
|
||||
less_equal, greater_equal)
|
||||
|
||||
# From splinemodule.c
|
||||
from .spline import cspline2d, sepfir2d
|
||||
|
||||
from scipy.special import comb, gamma
|
||||
|
||||
__all__ = ['spline_filter', 'bspline', 'gauss_spline', 'cubic', 'quadratic',
|
||||
'cspline1d', 'qspline1d', 'cspline1d_eval', 'qspline1d_eval']
|
||||
|
||||
|
||||
def factorial(n):
|
||||
return gamma(n + 1)
|
||||
|
||||
|
||||
def spline_filter(Iin, lmbda=5.0):
|
||||
"""Smoothing spline (cubic) filtering of a rank-2 array.
|
||||
|
||||
Filter an input data set, `Iin`, using a (cubic) smoothing spline of
|
||||
fall-off `lmbda`.
|
||||
"""
|
||||
intype = Iin.dtype.char
|
||||
hcol = array([1.0, 4.0, 1.0], 'f') / 6.0
|
||||
if intype in ['F', 'D']:
|
||||
Iin = Iin.astype('F')
|
||||
ckr = cspline2d(Iin.real, lmbda)
|
||||
cki = cspline2d(Iin.imag, lmbda)
|
||||
outr = sepfir2d(ckr, hcol, hcol)
|
||||
outi = sepfir2d(cki, hcol, hcol)
|
||||
out = (outr + 1j * outi).astype(intype)
|
||||
elif intype in ['f', 'd']:
|
||||
ckr = cspline2d(Iin, lmbda)
|
||||
out = sepfir2d(ckr, hcol, hcol)
|
||||
out = out.astype(intype)
|
||||
else:
|
||||
raise TypeError("Invalid data type for Iin")
|
||||
return out
|
||||
|
||||
|
||||
_splinefunc_cache = {}
|
||||
|
||||
|
||||
def _bspline_piecefunctions(order):
|
||||
"""Returns the function defined over the left-side pieces for a bspline of
|
||||
a given order.
|
||||
|
||||
The 0th piece is the first one less than 0. The last piece is a function
|
||||
identical to 0 (returned as the constant 0). (There are order//2 + 2 total
|
||||
pieces).
|
||||
|
||||
Also returns the condition functions that when evaluated return boolean
|
||||
arrays for use with `numpy.piecewise`.
|
||||
"""
|
||||
try:
|
||||
return _splinefunc_cache[order]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def condfuncgen(num, val1, val2):
|
||||
if num == 0:
|
||||
return lambda x: logical_and(less_equal(x, val1),
|
||||
greater_equal(x, val2))
|
||||
elif num == 2:
|
||||
return lambda x: less_equal(x, val2)
|
||||
else:
|
||||
return lambda x: logical_and(less(x, val1),
|
||||
greater_equal(x, val2))
|
||||
|
||||
last = order // 2 + 2
|
||||
if order % 2:
|
||||
startbound = -1.0
|
||||
else:
|
||||
startbound = -0.5
|
||||
condfuncs = [condfuncgen(0, 0, startbound)]
|
||||
bound = startbound
|
||||
for num in range(1, last - 1):
|
||||
condfuncs.append(condfuncgen(1, bound, bound - 1))
|
||||
bound = bound - 1
|
||||
condfuncs.append(condfuncgen(2, 0, -(order + 1) / 2.0))
|
||||
|
||||
# final value of bound is used in piecefuncgen below
|
||||
|
||||
# the functions to evaluate are taken from the left-hand side
|
||||
# in the general expression derived from the central difference
|
||||
# operator (because they involve fewer terms).
|
||||
|
||||
fval = factorial(order)
|
||||
|
||||
def piecefuncgen(num):
|
||||
Mk = order // 2 - num
|
||||
if (Mk < 0):
|
||||
return 0 # final function is 0
|
||||
coeffs = [(1 - 2 * (k % 2)) * float(comb(order + 1, k, exact=1)) / fval
|
||||
for k in range(Mk + 1)]
|
||||
shifts = [-bound - k for k in range(Mk + 1)]
|
||||
|
||||
def thefunc(x):
|
||||
res = 0.0
|
||||
for k in range(Mk + 1):
|
||||
res += coeffs[k] * (x + shifts[k]) ** order
|
||||
return res
|
||||
return thefunc
|
||||
|
||||
funclist = [piecefuncgen(k) for k in range(last)]
|
||||
|
||||
_splinefunc_cache[order] = (funclist, condfuncs)
|
||||
|
||||
return funclist, condfuncs
|
||||
|
||||
|
||||
def bspline(x, n):
|
||||
"""B-spline basis function of order n.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Uses numpy.piecewise and automatic function-generator.
|
||||
|
||||
"""
|
||||
ax = -abs(asarray(x))
|
||||
# number of pieces on the left-side is (n+1)/2
|
||||
funclist, condfuncs = _bspline_piecefunctions(n)
|
||||
condlist = [func(ax) for func in condfuncs]
|
||||
return piecewise(ax, condlist, funclist)
|
||||
|
||||
|
||||
def gauss_spline(x, n):
|
||||
"""Gaussian approximation to B-spline basis function of order n.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
n : int
|
||||
The order of the spline. Must be nonnegative, i.e., n >= 0
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Bouma H., Vilanova A., Bescos J.O., ter Haar Romeny B.M., Gerritsen
|
||||
F.A. (2007) Fast and Accurate Gaussian Derivatives Based on B-Splines. In:
|
||||
Sgallari F., Murli A., Paragios N. (eds) Scale Space and Variational
|
||||
Methods in Computer Vision. SSVM 2007. Lecture Notes in Computer
|
||||
Science, vol 4485. Springer, Berlin, Heidelberg
|
||||
"""
|
||||
signsq = (n + 1) / 12.0
|
||||
return 1 / sqrt(2 * pi * signsq) * exp(-x ** 2 / 2 / signsq)
|
||||
|
||||
|
||||
def cubic(x):
|
||||
"""A cubic B-spline.
|
||||
|
||||
This is a special case of `bspline`, and equivalent to ``bspline(x, 3)``.
|
||||
"""
|
||||
ax = abs(asarray(x))
|
||||
res = zeros_like(ax)
|
||||
cond1 = less(ax, 1)
|
||||
if cond1.any():
|
||||
ax1 = ax[cond1]
|
||||
res[cond1] = 2.0 / 3 - 1.0 / 2 * ax1 ** 2 * (2 - ax1)
|
||||
cond2 = ~cond1 & less(ax, 2)
|
||||
if cond2.any():
|
||||
ax2 = ax[cond2]
|
||||
res[cond2] = 1.0 / 6 * (2 - ax2) ** 3
|
||||
return res
|
||||
|
||||
|
||||
def quadratic(x):
|
||||
"""A quadratic B-spline.
|
||||
|
||||
This is a special case of `bspline`, and equivalent to ``bspline(x, 2)``.
|
||||
"""
|
||||
ax = abs(asarray(x))
|
||||
res = zeros_like(ax)
|
||||
cond1 = less(ax, 0.5)
|
||||
if cond1.any():
|
||||
ax1 = ax[cond1]
|
||||
res[cond1] = 0.75 - ax1 ** 2
|
||||
cond2 = ~cond1 & less(ax, 1.5)
|
||||
if cond2.any():
|
||||
ax2 = ax[cond2]
|
||||
res[cond2] = (ax2 - 1.5) ** 2 / 2.0
|
||||
return res
|
||||
|
||||
|
||||
def _coeff_smooth(lam):
|
||||
xi = 1 - 96 * lam + 24 * lam * sqrt(3 + 144 * lam)
|
||||
omeg = arctan2(sqrt(144 * lam - 1), sqrt(xi))
|
||||
rho = (24 * lam - 1 - sqrt(xi)) / (24 * lam)
|
||||
rho = rho * sqrt((48 * lam + 24 * lam * sqrt(3 + 144 * lam)) / xi)
|
||||
return rho, omeg
|
||||
|
||||
|
||||
def _hc(k, cs, rho, omega):
|
||||
return (cs / sin(omega) * (rho ** k) * sin(omega * (k + 1)) *
|
||||
greater(k, -1))
|
||||
|
||||
|
||||
def _hs(k, cs, rho, omega):
|
||||
c0 = (cs * cs * (1 + rho * rho) / (1 - rho * rho) /
|
||||
(1 - 2 * rho * rho * cos(2 * omega) + rho ** 4))
|
||||
gamma = (1 - rho * rho) / (1 + rho * rho) / tan(omega)
|
||||
ak = abs(k)
|
||||
return c0 * rho ** ak * (cos(omega * ak) + gamma * sin(omega * ak))
|
||||
|
||||
|
||||
def _cubic_smooth_coeff(signal, lamb):
|
||||
rho, omega = _coeff_smooth(lamb)
|
||||
cs = 1 - 2 * rho * cos(omega) + rho * rho
|
||||
K = len(signal)
|
||||
yp = zeros((K,), signal.dtype.char)
|
||||
k = arange(K)
|
||||
yp[0] = (_hc(0, cs, rho, omega) * signal[0] +
|
||||
add.reduce(_hc(k + 1, cs, rho, omega) * signal))
|
||||
|
||||
yp[1] = (_hc(0, cs, rho, omega) * signal[0] +
|
||||
_hc(1, cs, rho, omega) * signal[1] +
|
||||
add.reduce(_hc(k + 2, cs, rho, omega) * signal))
|
||||
|
||||
for n in range(2, K):
|
||||
yp[n] = (cs * signal[n] + 2 * rho * cos(omega) * yp[n - 1] -
|
||||
rho * rho * yp[n - 2])
|
||||
|
||||
y = zeros((K,), signal.dtype.char)
|
||||
|
||||
y[K - 1] = add.reduce((_hs(k, cs, rho, omega) +
|
||||
_hs(k + 1, cs, rho, omega)) * signal[::-1])
|
||||
y[K - 2] = add.reduce((_hs(k - 1, cs, rho, omega) +
|
||||
_hs(k + 2, cs, rho, omega)) * signal[::-1])
|
||||
|
||||
for n in range(K - 3, -1, -1):
|
||||
y[n] = (cs * yp[n] + 2 * rho * cos(omega) * y[n + 1] -
|
||||
rho * rho * y[n + 2])
|
||||
|
||||
return y
|
||||
|
||||
|
||||
def _cubic_coeff(signal):
|
||||
zi = -2 + sqrt(3)
|
||||
K = len(signal)
|
||||
yplus = zeros((K,), signal.dtype.char)
|
||||
powers = zi ** arange(K)
|
||||
yplus[0] = signal[0] + zi * add.reduce(powers * signal)
|
||||
for k in range(1, K):
|
||||
yplus[k] = signal[k] + zi * yplus[k - 1]
|
||||
output = zeros((K,), signal.dtype)
|
||||
output[K - 1] = zi / (zi - 1) * yplus[K - 1]
|
||||
for k in range(K - 2, -1, -1):
|
||||
output[k] = zi * (output[k + 1] - yplus[k])
|
||||
return output * 6.0
|
||||
|
||||
|
||||
def _quadratic_coeff(signal):
|
||||
zi = -3 + 2 * sqrt(2.0)
|
||||
K = len(signal)
|
||||
yplus = zeros((K,), signal.dtype.char)
|
||||
powers = zi ** arange(K)
|
||||
yplus[0] = signal[0] + zi * add.reduce(powers * signal)
|
||||
for k in range(1, K):
|
||||
yplus[k] = signal[k] + zi * yplus[k - 1]
|
||||
output = zeros((K,), signal.dtype.char)
|
||||
output[K - 1] = zi / (zi - 1) * yplus[K - 1]
|
||||
for k in range(K - 2, -1, -1):
|
||||
output[k] = zi * (output[k + 1] - yplus[k])
|
||||
return output * 8.0
|
||||
|
||||
|
||||
def cspline1d(signal, lamb=0.0):
|
||||
"""
|
||||
Compute cubic spline coefficients for rank-1 array.
|
||||
|
||||
Find the cubic spline coefficients for a 1-D signal assuming
|
||||
mirror-symmetric boundary conditions. To obtain the signal back from the
|
||||
spline representation mirror-symmetric-convolve these coefficients with a
|
||||
length 3 FIR window [1.0, 4.0, 1.0]/ 6.0 .
|
||||
|
||||
Parameters
|
||||
----------
|
||||
signal : ndarray
|
||||
A rank-1 array representing samples of a signal.
|
||||
lamb : float, optional
|
||||
Smoothing coefficient, default is 0.0.
|
||||
|
||||
Returns
|
||||
-------
|
||||
c : ndarray
|
||||
Cubic spline coefficients.
|
||||
|
||||
"""
|
||||
if lamb != 0.0:
|
||||
return _cubic_smooth_coeff(signal, lamb)
|
||||
else:
|
||||
return _cubic_coeff(signal)
|
||||
|
||||
|
||||
def qspline1d(signal, lamb=0.0):
|
||||
"""Compute quadratic spline coefficients for rank-1 array.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
signal : ndarray
|
||||
A rank-1 array representing samples of a signal.
|
||||
lamb : float, optional
|
||||
Smoothing coefficient (must be zero for now).
|
||||
|
||||
Returns
|
||||
-------
|
||||
c : ndarray
|
||||
Quadratic spline coefficients.
|
||||
|
||||
See Also
|
||||
--------
|
||||
qspline1d_eval : Evaluate a quadratic spline at the new set of points.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Find the quadratic spline coefficients for a 1-D signal assuming
|
||||
mirror-symmetric boundary conditions. To obtain the signal back from the
|
||||
spline representation mirror-symmetric-convolve these coefficients with a
|
||||
length 3 FIR window [1.0, 6.0, 1.0]/ 8.0 .
|
||||
|
||||
Examples
|
||||
--------
|
||||
We can filter a signal to reduce and smooth out high-frequency noise with
|
||||
a quadratic spline:
|
||||
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> from scipy.signal import qspline1d, qspline1d_eval
|
||||
>>> sig = np.repeat([0., 1., 0.], 100)
|
||||
>>> sig += np.random.randn(len(sig))*0.05 # add noise
|
||||
>>> time = np.linspace(0, len(sig))
|
||||
>>> filtered = qspline1d_eval(qspline1d(sig), time)
|
||||
>>> plt.plot(sig, label="signal")
|
||||
>>> plt.plot(time, filtered, label="filtered")
|
||||
>>> plt.legend()
|
||||
>>> plt.show()
|
||||
|
||||
"""
|
||||
if lamb != 0.0:
|
||||
raise ValueError("Smoothing quadratic splines not supported yet.")
|
||||
else:
|
||||
return _quadratic_coeff(signal)
|
||||
|
||||
|
||||
def cspline1d_eval(cj, newx, dx=1.0, x0=0):
|
||||
"""Evaluate a spline at the new set of points.
|
||||
|
||||
`dx` is the old sample-spacing while `x0` was the old origin. In
|
||||
other-words the old-sample points (knot-points) for which the `cj`
|
||||
represent spline coefficients were at equally-spaced points of:
|
||||
|
||||
oldx = x0 + j*dx j=0...N-1, with N=len(cj)
|
||||
|
||||
Edges are handled using mirror-symmetric boundary conditions.
|
||||
|
||||
"""
|
||||
newx = (asarray(newx) - x0) / float(dx)
|
||||
res = zeros_like(newx, dtype=cj.dtype)
|
||||
if res.size == 0:
|
||||
return res
|
||||
N = len(cj)
|
||||
cond1 = newx < 0
|
||||
cond2 = newx > (N - 1)
|
||||
cond3 = ~(cond1 | cond2)
|
||||
# handle general mirror-symmetry
|
||||
res[cond1] = cspline1d_eval(cj, -newx[cond1])
|
||||
res[cond2] = cspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
|
||||
newx = newx[cond3]
|
||||
if newx.size == 0:
|
||||
return res
|
||||
result = zeros_like(newx, dtype=cj.dtype)
|
||||
jlower = floor(newx - 2).astype(int) + 1
|
||||
for i in range(4):
|
||||
thisj = jlower + i
|
||||
indj = thisj.clip(0, N - 1) # handle edge cases
|
||||
result += cj[indj] * cubic(newx - thisj)
|
||||
res[cond3] = result
|
||||
return res
|
||||
|
||||
|
||||
def qspline1d_eval(cj, newx, dx=1.0, x0=0):
|
||||
"""Evaluate a quadratic spline at the new set of points.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
cj : ndarray
|
||||
Quadratic spline coefficients
|
||||
newx : ndarray
|
||||
New set of points.
|
||||
dx : float, optional
|
||||
Old sample-spacing, the default value is 1.0.
|
||||
x0 : int, optional
|
||||
Old origin, the default value is 0.
|
||||
|
||||
Returns
|
||||
-------
|
||||
res : ndarray
|
||||
Evaluated a quadratic spline points.
|
||||
|
||||
See Also
|
||||
--------
|
||||
qspline1d : Compute quadratic spline coefficients for rank-1 array.
|
||||
|
||||
Notes
|
||||
-----
|
||||
`dx` is the old sample-spacing while `x0` was the old origin. In
|
||||
other-words the old-sample points (knot-points) for which the `cj`
|
||||
represent spline coefficients were at equally-spaced points of::
|
||||
|
||||
oldx = x0 + j*dx j=0...N-1, with N=len(cj)
|
||||
|
||||
Edges are handled using mirror-symmetric boundary conditions.
|
||||
|
||||
Examples
|
||||
--------
|
||||
We can filter a signal to reduce and smooth out high-frequency noise with
|
||||
a quadratic spline:
|
||||
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> from scipy.signal import qspline1d, qspline1d_eval
|
||||
>>> sig = np.repeat([0., 1., 0.], 100)
|
||||
>>> sig += np.random.randn(len(sig))*0.05 # add noise
|
||||
>>> time = np.linspace(0, len(sig))
|
||||
>>> filtered = qspline1d_eval(qspline1d(sig), time)
|
||||
>>> plt.plot(sig, label="signal")
|
||||
>>> plt.plot(time, filtered, label="filtered")
|
||||
>>> plt.legend()
|
||||
>>> plt.show()
|
||||
|
||||
"""
|
||||
newx = (asarray(newx) - x0) / dx
|
||||
res = zeros_like(newx)
|
||||
if res.size == 0:
|
||||
return res
|
||||
N = len(cj)
|
||||
cond1 = newx < 0
|
||||
cond2 = newx > (N - 1)
|
||||
cond3 = ~(cond1 | cond2)
|
||||
# handle general mirror-symmetry
|
||||
res[cond1] = qspline1d_eval(cj, -newx[cond1])
|
||||
res[cond2] = qspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
|
||||
newx = newx[cond3]
|
||||
if newx.size == 0:
|
||||
return res
|
||||
result = zeros_like(newx)
|
||||
jlower = floor(newx - 1.5).astype(int) + 1
|
||||
for i in range(3):
|
||||
thisj = jlower + i
|
||||
indj = thisj.clip(0, N - 1) # handle edge cases
|
||||
result += cj[indj] * quadratic(newx - thisj)
|
||||
res[cond3] = result
|
||||
return res
|
4891
venv/Lib/site-packages/scipy/signal/filter_design.py
Normal file
4891
venv/Lib/site-packages/scipy/signal/filter_design.py
Normal file
File diff suppressed because it is too large
Load diff
1262
venv/Lib/site-packages/scipy/signal/fir_filter_design.py
Normal file
1262
venv/Lib/site-packages/scipy/signal/fir_filter_design.py
Normal file
File diff suppressed because it is too large
Load diff
502
venv/Lib/site-packages/scipy/signal/lti_conversion.py
Normal file
502
venv/Lib/site-packages/scipy/signal/lti_conversion.py
Normal file
|
@ -0,0 +1,502 @@
|
|||
"""
|
||||
ltisys -- a collection of functions to convert linear time invariant systems
|
||||
from one representation to another.
|
||||
"""
|
||||
import numpy
|
||||
import numpy as np
|
||||
from numpy import (r_, eye, atleast_2d, poly, dot,
|
||||
asarray, prod, zeros, array, outer)
|
||||
from scipy import linalg
|
||||
|
||||
from .filter_design import tf2zpk, zpk2tf, normalize
|
||||
|
||||
|
||||
__all__ = ['tf2ss', 'abcd_normalize', 'ss2tf', 'zpk2ss', 'ss2zpk',
|
||||
'cont2discrete']
|
||||
|
||||
|
||||
def tf2ss(num, den):
|
||||
r"""Transfer function to state-space representation.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
num, den : array_like
|
||||
Sequences representing the coefficients of the numerator and
|
||||
denominator polynomials, in order of descending degree. The
|
||||
denominator needs to be at least as long as the numerator.
|
||||
|
||||
Returns
|
||||
-------
|
||||
A, B, C, D : ndarray
|
||||
State space representation of the system, in controller canonical
|
||||
form.
|
||||
|
||||
Examples
|
||||
--------
|
||||
Convert the transfer function:
|
||||
|
||||
.. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}
|
||||
|
||||
>>> num = [1, 3, 3]
|
||||
>>> den = [1, 2, 1]
|
||||
|
||||
to the state-space representation:
|
||||
|
||||
.. math::
|
||||
|
||||
\dot{\textbf{x}}(t) =
|
||||
\begin{bmatrix} -2 & -1 \\ 1 & 0 \end{bmatrix} \textbf{x}(t) +
|
||||
\begin{bmatrix} 1 \\ 0 \end{bmatrix} \textbf{u}(t) \\
|
||||
|
||||
\textbf{y}(t) = \begin{bmatrix} 1 & 2 \end{bmatrix} \textbf{x}(t) +
|
||||
\begin{bmatrix} 1 \end{bmatrix} \textbf{u}(t)
|
||||
|
||||
>>> from scipy.signal import tf2ss
|
||||
>>> A, B, C, D = tf2ss(num, den)
|
||||
>>> A
|
||||
array([[-2., -1.],
|
||||
[ 1., 0.]])
|
||||
>>> B
|
||||
array([[ 1.],
|
||||
[ 0.]])
|
||||
>>> C
|
||||
array([[ 1., 2.]])
|
||||
>>> D
|
||||
array([[ 1.]])
|
||||
"""
|
||||
# Controller canonical state-space representation.
|
||||
# if M+1 = len(num) and K+1 = len(den) then we must have M <= K
|
||||
# states are found by asserting that X(s) = U(s) / D(s)
|
||||
# then Y(s) = N(s) * X(s)
|
||||
#
|
||||
# A, B, C, and D follow quite naturally.
|
||||
#
|
||||
num, den = normalize(num, den) # Strips zeros, checks arrays
|
||||
nn = len(num.shape)
|
||||
if nn == 1:
|
||||
num = asarray([num], num.dtype)
|
||||
M = num.shape[1]
|
||||
K = len(den)
|
||||
if M > K:
|
||||
msg = "Improper transfer function. `num` is longer than `den`."
|
||||
raise ValueError(msg)
|
||||
if M == 0 or K == 0: # Null system
|
||||
return (array([], float), array([], float), array([], float),
|
||||
array([], float))
|
||||
|
||||
# pad numerator to have same number of columns has denominator
|
||||
num = r_['-1', zeros((num.shape[0], K - M), num.dtype), num]
|
||||
|
||||
if num.shape[-1] > 0:
|
||||
D = atleast_2d(num[:, 0])
|
||||
|
||||
else:
|
||||
# We don't assign it an empty array because this system
|
||||
# is not 'null'. It just doesn't have a non-zero D
|
||||
# matrix. Thus, it should have a non-zero shape so that
|
||||
# it can be operated on by functions like 'ss2tf'
|
||||
D = array([[0]], float)
|
||||
|
||||
if K == 1:
|
||||
D = D.reshape(num.shape)
|
||||
|
||||
return (zeros((1, 1)), zeros((1, D.shape[1])),
|
||||
zeros((D.shape[0], 1)), D)
|
||||
|
||||
frow = -array([den[1:]])
|
||||
A = r_[frow, eye(K - 2, K - 1)]
|
||||
B = eye(K - 1, 1)
|
||||
C = num[:, 1:] - outer(num[:, 0], den[1:])
|
||||
D = D.reshape((C.shape[0], B.shape[1]))
|
||||
|
||||
return A, B, C, D
|
||||
|
||||
|
||||
def _none_to_empty_2d(arg):
|
||||
if arg is None:
|
||||
return zeros((0, 0))
|
||||
else:
|
||||
return arg
|
||||
|
||||
|
||||
def _atleast_2d_or_none(arg):
|
||||
if arg is not None:
|
||||
return atleast_2d(arg)
|
||||
|
||||
|
||||
def _shape_or_none(M):
|
||||
if M is not None:
|
||||
return M.shape
|
||||
else:
|
||||
return (None,) * 2
|
||||
|
||||
|
||||
def _choice_not_none(*args):
|
||||
for arg in args:
|
||||
if arg is not None:
|
||||
return arg
|
||||
|
||||
|
||||
def _restore(M, shape):
|
||||
if M.shape == (0, 0):
|
||||
return zeros(shape)
|
||||
else:
|
||||
if M.shape != shape:
|
||||
raise ValueError("The input arrays have incompatible shapes.")
|
||||
return M
|
||||
|
||||
|
||||
def abcd_normalize(A=None, B=None, C=None, D=None):
|
||||
"""Check state-space matrices and ensure they are 2-D.
|
||||
|
||||
If enough information on the system is provided, that is, enough
|
||||
properly-shaped arrays are passed to the function, the missing ones
|
||||
are built from this information, ensuring the correct number of
|
||||
rows and columns. Otherwise a ValueError is raised.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
A, B, C, D : array_like, optional
|
||||
State-space matrices. All of them are None (missing) by default.
|
||||
See `ss2tf` for format.
|
||||
|
||||
Returns
|
||||
-------
|
||||
A, B, C, D : array
|
||||
Properly shaped state-space matrices.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
If not enough information on the system was provided.
|
||||
|
||||
"""
|
||||
A, B, C, D = map(_atleast_2d_or_none, (A, B, C, D))
|
||||
|
||||
MA, NA = _shape_or_none(A)
|
||||
MB, NB = _shape_or_none(B)
|
||||
MC, NC = _shape_or_none(C)
|
||||
MD, ND = _shape_or_none(D)
|
||||
|
||||
p = _choice_not_none(MA, MB, NC)
|
||||
q = _choice_not_none(NB, ND)
|
||||
r = _choice_not_none(MC, MD)
|
||||
if p is None or q is None or r is None:
|
||||
raise ValueError("Not enough information on the system.")
|
||||
|
||||
A, B, C, D = map(_none_to_empty_2d, (A, B, C, D))
|
||||
A = _restore(A, (p, p))
|
||||
B = _restore(B, (p, q))
|
||||
C = _restore(C, (r, p))
|
||||
D = _restore(D, (r, q))
|
||||
|
||||
return A, B, C, D
|
||||
|
||||
|
||||
def ss2tf(A, B, C, D, input=0):
|
||||
r"""State-space to transfer function.
|
||||
|
||||
A, B, C, D defines a linear state-space system with `p` inputs,
|
||||
`q` outputs, and `n` state variables.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
A : array_like
|
||||
State (or system) matrix of shape ``(n, n)``
|
||||
B : array_like
|
||||
Input matrix of shape ``(n, p)``
|
||||
C : array_like
|
||||
Output matrix of shape ``(q, n)``
|
||||
D : array_like
|
||||
Feedthrough (or feedforward) matrix of shape ``(q, p)``
|
||||
input : int, optional
|
||||
For multiple-input systems, the index of the input to use.
|
||||
|
||||
Returns
|
||||
-------
|
||||
num : 2-D ndarray
|
||||
Numerator(s) of the resulting transfer function(s). `num` has one row
|
||||
for each of the system's outputs. Each row is a sequence representation
|
||||
of the numerator polynomial.
|
||||
den : 1-D ndarray
|
||||
Denominator of the resulting transfer function(s). `den` is a sequence
|
||||
representation of the denominator polynomial.
|
||||
|
||||
Examples
|
||||
--------
|
||||
Convert the state-space representation:
|
||||
|
||||
.. math::
|
||||
|
||||
\dot{\textbf{x}}(t) =
|
||||
\begin{bmatrix} -2 & -1 \\ 1 & 0 \end{bmatrix} \textbf{x}(t) +
|
||||
\begin{bmatrix} 1 \\ 0 \end{bmatrix} \textbf{u}(t) \\
|
||||
|
||||
\textbf{y}(t) = \begin{bmatrix} 1 & 2 \end{bmatrix} \textbf{x}(t) +
|
||||
\begin{bmatrix} 1 \end{bmatrix} \textbf{u}(t)
|
||||
|
||||
>>> A = [[-2, -1], [1, 0]]
|
||||
>>> B = [[1], [0]] # 2-D column vector
|
||||
>>> C = [[1, 2]] # 2-D row vector
|
||||
>>> D = 1
|
||||
|
||||
to the transfer function:
|
||||
|
||||
.. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}
|
||||
|
||||
>>> from scipy.signal import ss2tf
|
||||
>>> ss2tf(A, B, C, D)
|
||||
(array([[1, 3, 3]]), array([ 1., 2., 1.]))
|
||||
"""
|
||||
# transfer function is C (sI - A)**(-1) B + D
|
||||
|
||||
# Check consistency and make them all rank-2 arrays
|
||||
A, B, C, D = abcd_normalize(A, B, C, D)
|
||||
|
||||
nout, nin = D.shape
|
||||
if input >= nin:
|
||||
raise ValueError("System does not have the input specified.")
|
||||
|
||||
# make SIMO from possibly MIMO system.
|
||||
B = B[:, input:input + 1]
|
||||
D = D[:, input:input + 1]
|
||||
|
||||
try:
|
||||
den = poly(A)
|
||||
except ValueError:
|
||||
den = 1
|
||||
|
||||
if (prod(B.shape, axis=0) == 0) and (prod(C.shape, axis=0) == 0):
|
||||
num = numpy.ravel(D)
|
||||
if (prod(D.shape, axis=0) == 0) and (prod(A.shape, axis=0) == 0):
|
||||
den = []
|
||||
return num, den
|
||||
|
||||
num_states = A.shape[0]
|
||||
type_test = A[:, 0] + B[:, 0] + C[0, :] + D
|
||||
num = numpy.zeros((nout, num_states + 1), type_test.dtype)
|
||||
for k in range(nout):
|
||||
Ck = atleast_2d(C[k, :])
|
||||
num[k] = poly(A - dot(B, Ck)) + (D[k] - 1) * den
|
||||
|
||||
return num, den
|
||||
|
||||
|
||||
def zpk2ss(z, p, k):
|
||||
"""Zero-pole-gain representation to state-space representation
|
||||
|
||||
Parameters
|
||||
----------
|
||||
z, p : sequence
|
||||
Zeros and poles.
|
||||
k : float
|
||||
System gain.
|
||||
|
||||
Returns
|
||||
-------
|
||||
A, B, C, D : ndarray
|
||||
State space representation of the system, in controller canonical
|
||||
form.
|
||||
|
||||
"""
|
||||
return tf2ss(*zpk2tf(z, p, k))
|
||||
|
||||
|
||||
def ss2zpk(A, B, C, D, input=0):
|
||||
"""State-space representation to zero-pole-gain representation.
|
||||
|
||||
A, B, C, D defines a linear state-space system with `p` inputs,
|
||||
`q` outputs, and `n` state variables.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
A : array_like
|
||||
State (or system) matrix of shape ``(n, n)``
|
||||
B : array_like
|
||||
Input matrix of shape ``(n, p)``
|
||||
C : array_like
|
||||
Output matrix of shape ``(q, n)``
|
||||
D : array_like
|
||||
Feedthrough (or feedforward) matrix of shape ``(q, p)``
|
||||
input : int, optional
|
||||
For multiple-input systems, the index of the input to use.
|
||||
|
||||
Returns
|
||||
-------
|
||||
z, p : sequence
|
||||
Zeros and poles.
|
||||
k : float
|
||||
System gain.
|
||||
|
||||
"""
|
||||
return tf2zpk(*ss2tf(A, B, C, D, input=input))
|
||||
|
||||
|
||||
def cont2discrete(system, dt, method="zoh", alpha=None):
|
||||
"""
|
||||
Transform a continuous to a discrete state-space system.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
system : a tuple describing the system or an instance of `lti`
|
||||
The following gives the number of elements in the tuple and
|
||||
the interpretation:
|
||||
|
||||
* 1: (instance of `lti`)
|
||||
* 2: (num, den)
|
||||
* 3: (zeros, poles, gain)
|
||||
* 4: (A, B, C, D)
|
||||
|
||||
dt : float
|
||||
The discretization time step.
|
||||
method : str, optional
|
||||
Which method to use:
|
||||
|
||||
* gbt: generalized bilinear transformation
|
||||
* bilinear: Tustin's approximation ("gbt" with alpha=0.5)
|
||||
* euler: Euler (or forward differencing) method ("gbt" with alpha=0)
|
||||
* backward_diff: Backwards differencing ("gbt" with alpha=1.0)
|
||||
* zoh: zero-order hold (default)
|
||||
* foh: first-order hold (*versionadded: 1.3.0*)
|
||||
* impulse: equivalent impulse response (*versionadded: 1.3.0*)
|
||||
|
||||
alpha : float within [0, 1], optional
|
||||
The generalized bilinear transformation weighting parameter, which
|
||||
should only be specified with method="gbt", and is ignored otherwise
|
||||
|
||||
Returns
|
||||
-------
|
||||
sysd : tuple containing the discrete system
|
||||
Based on the input type, the output will be of the form
|
||||
|
||||
* (num, den, dt) for transfer function input
|
||||
* (zeros, poles, gain, dt) for zeros-poles-gain input
|
||||
* (A, B, C, D, dt) for state-space system input
|
||||
|
||||
Notes
|
||||
-----
|
||||
By default, the routine uses a Zero-Order Hold (zoh) method to perform
|
||||
the transformation. Alternatively, a generalized bilinear transformation
|
||||
may be used, which includes the common Tustin's bilinear approximation,
|
||||
an Euler's method technique, or a backwards differencing technique.
|
||||
|
||||
The Zero-Order Hold (zoh) method is based on [1]_, the generalized bilinear
|
||||
approximation is based on [2]_ and [3]_, the First-Order Hold (foh) method
|
||||
is based on [4]_.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] https://en.wikipedia.org/wiki/Discretization#Discretization_of_linear_state_space_models
|
||||
|
||||
.. [2] http://techteach.no/publications/discretetime_signals_systems/discrete.pdf
|
||||
|
||||
.. [3] G. Zhang, X. Chen, and T. Chen, Digital redesign via the generalized
|
||||
bilinear transformation, Int. J. Control, vol. 82, no. 4, pp. 741-754,
|
||||
2009.
|
||||
(https://www.mypolyuweb.hk/~magzhang/Research/ZCC09_IJC.pdf)
|
||||
|
||||
.. [4] G. F. Franklin, J. D. Powell, and M. L. Workman, Digital control
|
||||
of dynamic systems, 3rd ed. Menlo Park, Calif: Addison-Wesley,
|
||||
pp. 204-206, 1998.
|
||||
|
||||
"""
|
||||
if len(system) == 1:
|
||||
return system.to_discrete()
|
||||
if len(system) == 2:
|
||||
sysd = cont2discrete(tf2ss(system[0], system[1]), dt, method=method,
|
||||
alpha=alpha)
|
||||
return ss2tf(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,)
|
||||
elif len(system) == 3:
|
||||
sysd = cont2discrete(zpk2ss(system[0], system[1], system[2]), dt,
|
||||
method=method, alpha=alpha)
|
||||
return ss2zpk(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,)
|
||||
elif len(system) == 4:
|
||||
a, b, c, d = system
|
||||
else:
|
||||
raise ValueError("First argument must either be a tuple of 2 (tf), "
|
||||
"3 (zpk), or 4 (ss) arrays.")
|
||||
|
||||
if method == 'gbt':
|
||||
if alpha is None:
|
||||
raise ValueError("Alpha parameter must be specified for the "
|
||||
"generalized bilinear transform (gbt) method")
|
||||
elif alpha < 0 or alpha > 1:
|
||||
raise ValueError("Alpha parameter must be within the interval "
|
||||
"[0,1] for the gbt method")
|
||||
|
||||
if method == 'gbt':
|
||||
# This parameter is used repeatedly - compute once here
|
||||
ima = np.eye(a.shape[0]) - alpha*dt*a
|
||||
ad = linalg.solve(ima, np.eye(a.shape[0]) + (1.0-alpha)*dt*a)
|
||||
bd = linalg.solve(ima, dt*b)
|
||||
|
||||
# Similarly solve for the output equation matrices
|
||||
cd = linalg.solve(ima.transpose(), c.transpose())
|
||||
cd = cd.transpose()
|
||||
dd = d + alpha*np.dot(c, bd)
|
||||
|
||||
elif method == 'bilinear' or method == 'tustin':
|
||||
return cont2discrete(system, dt, method="gbt", alpha=0.5)
|
||||
|
||||
elif method == 'euler' or method == 'forward_diff':
|
||||
return cont2discrete(system, dt, method="gbt", alpha=0.0)
|
||||
|
||||
elif method == 'backward_diff':
|
||||
return cont2discrete(system, dt, method="gbt", alpha=1.0)
|
||||
|
||||
elif method == 'zoh':
|
||||
# Build an exponential matrix
|
||||
em_upper = np.hstack((a, b))
|
||||
|
||||
# Need to stack zeros under the a and b matrices
|
||||
em_lower = np.hstack((np.zeros((b.shape[1], a.shape[0])),
|
||||
np.zeros((b.shape[1], b.shape[1]))))
|
||||
|
||||
em = np.vstack((em_upper, em_lower))
|
||||
ms = linalg.expm(dt * em)
|
||||
|
||||
# Dispose of the lower rows
|
||||
ms = ms[:a.shape[0], :]
|
||||
|
||||
ad = ms[:, 0:a.shape[1]]
|
||||
bd = ms[:, a.shape[1]:]
|
||||
|
||||
cd = c
|
||||
dd = d
|
||||
|
||||
elif method == 'foh':
|
||||
# Size parameters for convenience
|
||||
n = a.shape[0]
|
||||
m = b.shape[1]
|
||||
|
||||
# Build an exponential matrix similar to 'zoh' method
|
||||
em_upper = linalg.block_diag(np.block([a, b]) * dt, np.eye(m))
|
||||
em_lower = zeros((m, n + 2 * m))
|
||||
em = np.block([[em_upper], [em_lower]])
|
||||
|
||||
ms = linalg.expm(em)
|
||||
|
||||
# Get the three blocks from upper rows
|
||||
ms11 = ms[:n, 0:n]
|
||||
ms12 = ms[:n, n:n + m]
|
||||
ms13 = ms[:n, n + m:]
|
||||
|
||||
ad = ms11
|
||||
bd = ms12 - ms13 + ms11 @ ms13
|
||||
cd = c
|
||||
dd = d + c @ ms13
|
||||
|
||||
elif method == 'impulse':
|
||||
if not np.allclose(d, 0):
|
||||
raise ValueError("Impulse method is only applicable"
|
||||
"to strictly proper systems")
|
||||
|
||||
ad = linalg.expm(a * dt)
|
||||
bd = ad @ b * dt
|
||||
cd = c
|
||||
dd = c @ b * dt
|
||||
|
||||
else:
|
||||
raise ValueError("Unknown transformation method '%s'" % method)
|
||||
|
||||
return ad, bd, cd, dd, dt
|
3840
venv/Lib/site-packages/scipy/signal/ltisys.py
Normal file
3840
venv/Lib/site-packages/scipy/signal/ltisys.py
Normal file
File diff suppressed because it is too large
Load diff
42
venv/Lib/site-packages/scipy/signal/setup.py
Normal file
42
venv/Lib/site-packages/scipy/signal/setup.py
Normal file
|
@ -0,0 +1,42 @@
|
|||
from scipy._build_utils import numpy_nodepr_api
|
||||
|
||||
|
||||
def configuration(parent_package='', top_path=None):
|
||||
from numpy.distutils.misc_util import Configuration
|
||||
from scipy._build_utils.compiler_helper import set_c_flags_hook
|
||||
|
||||
config = Configuration('signal', parent_package, top_path)
|
||||
|
||||
config.add_data_dir('tests')
|
||||
|
||||
config.add_subpackage('windows')
|
||||
|
||||
sigtools = config.add_extension('sigtools',
|
||||
sources=['sigtoolsmodule.c', 'firfilter.c',
|
||||
'medianfilter.c', 'lfilter.c.src',
|
||||
'correlate_nd.c.src'],
|
||||
depends=['sigtools.h'],
|
||||
include_dirs=['.'],
|
||||
**numpy_nodepr_api)
|
||||
sigtools._pre_build_hook = set_c_flags_hook
|
||||
|
||||
config.add_extension(
|
||||
'_spectral', sources=['_spectral.c'])
|
||||
config.add_extension(
|
||||
'_max_len_seq_inner', sources=['_max_len_seq_inner.c'])
|
||||
config.add_extension(
|
||||
'_peak_finding_utils', sources=['_peak_finding_utils.c'])
|
||||
config.add_extension(
|
||||
'_sosfilt', sources=['_sosfilt.c'])
|
||||
config.add_extension(
|
||||
'_upfirdn_apply', sources=['_upfirdn_apply.c'])
|
||||
spline_src = ['splinemodule.c', 'S_bspline_util.c', 'D_bspline_util.c',
|
||||
'C_bspline_util.c', 'Z_bspline_util.c', 'bspline_util.c']
|
||||
config.add_extension('spline', sources=spline_src, **numpy_nodepr_api)
|
||||
|
||||
return config
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from numpy.distutils.core import setup
|
||||
setup(**configuration(top_path='').todict())
|
4282
venv/Lib/site-packages/scipy/signal/signaltools.py
Normal file
4282
venv/Lib/site-packages/scipy/signal/signaltools.py
Normal file
File diff suppressed because it is too large
Load diff
BIN
venv/Lib/site-packages/scipy/signal/sigtools.cp36-win32.pyd
Normal file
BIN
venv/Lib/site-packages/scipy/signal/sigtools.cp36-win32.pyd
Normal file
Binary file not shown.
1999
venv/Lib/site-packages/scipy/signal/spectral.py
Normal file
1999
venv/Lib/site-packages/scipy/signal/spectral.py
Normal file
File diff suppressed because it is too large
Load diff
BIN
venv/Lib/site-packages/scipy/signal/spline.cp36-win32.pyd
Normal file
BIN
venv/Lib/site-packages/scipy/signal/spline.cp36-win32.pyd
Normal file
Binary file not shown.
0
venv/Lib/site-packages/scipy/signal/tests/__init__.py
Normal file
0
venv/Lib/site-packages/scipy/signal/tests/__init__.py
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
122
venv/Lib/site-packages/scipy/signal/tests/mpsig.py
Normal file
122
venv/Lib/site-packages/scipy/signal/tests/mpsig.py
Normal file
|
@ -0,0 +1,122 @@
|
|||
"""
|
||||
Some signal functions implemented using mpmath.
|
||||
"""
|
||||
|
||||
try:
|
||||
import mpmath # type: ignore[import]
|
||||
except ImportError:
|
||||
mpmath = None
|
||||
|
||||
|
||||
def _prod(seq):
|
||||
"""Returns the product of the elements in the sequence `seq`."""
|
||||
p = 1
|
||||
for elem in seq:
|
||||
p *= elem
|
||||
return p
|
||||
|
||||
|
||||
def _relative_degree(z, p):
|
||||
"""
|
||||
Return relative degree of transfer function from zeros and poles.
|
||||
|
||||
This is simply len(p) - len(z), which must be nonnegative.
|
||||
A ValueError is raised if len(p) < len(z).
|
||||
"""
|
||||
degree = len(p) - len(z)
|
||||
if degree < 0:
|
||||
raise ValueError("Improper transfer function. "
|
||||
"Must have at least as many poles as zeros.")
|
||||
return degree
|
||||
|
||||
|
||||
def _zpkbilinear(z, p, k, fs):
|
||||
"""Bilinear transformation to convert a filter from analog to digital."""
|
||||
|
||||
degree = _relative_degree(z, p)
|
||||
|
||||
fs2 = 2*fs
|
||||
|
||||
# Bilinear transform the poles and zeros
|
||||
z_z = [(fs2 + z1) / (fs2 - z1) for z1 in z]
|
||||
p_z = [(fs2 + p1) / (fs2 - p1) for p1 in p]
|
||||
|
||||
# Any zeros that were at infinity get moved to the Nyquist frequency
|
||||
z_z.extend([-1] * degree)
|
||||
|
||||
# Compensate for gain change
|
||||
numer = _prod(fs2 - z1 for z1 in z)
|
||||
denom = _prod(fs2 - p1 for p1 in p)
|
||||
k_z = k * numer / denom
|
||||
|
||||
return z_z, p_z, k_z.real
|
||||
|
||||
|
||||
def _zpklp2lp(z, p, k, wo=1):
|
||||
"""Transform a lowpass filter to a different cutoff frequency."""
|
||||
|
||||
degree = _relative_degree(z, p)
|
||||
|
||||
# Scale all points radially from origin to shift cutoff frequency
|
||||
z_lp = [wo * z1 for z1 in z]
|
||||
p_lp = [wo * p1 for p1 in p]
|
||||
|
||||
# Each shifted pole decreases gain by wo, each shifted zero increases it.
|
||||
# Cancel out the net change to keep overall gain the same
|
||||
k_lp = k * wo**degree
|
||||
|
||||
return z_lp, p_lp, k_lp
|
||||
|
||||
|
||||
def _butter_analog_poles(n):
|
||||
"""
|
||||
Poles of an analog Butterworth lowpass filter.
|
||||
|
||||
This is the same calculation as scipy.signal.buttap(n) or
|
||||
scipy.signal.butter(n, 1, analog=True, output='zpk'), but mpmath is used,
|
||||
and only the poles are returned.
|
||||
"""
|
||||
poles = [-mpmath.exp(1j*mpmath.pi*k/(2*n)) for k in range(-n+1, n, 2)]
|
||||
return poles
|
||||
|
||||
|
||||
def butter_lp(n, Wn):
|
||||
"""
|
||||
Lowpass Butterworth digital filter design.
|
||||
|
||||
This computes the same result as scipy.signal.butter(n, Wn, output='zpk'),
|
||||
but it uses mpmath, and the results are returned in lists instead of NumPy
|
||||
arrays.
|
||||
"""
|
||||
zeros = []
|
||||
poles = _butter_analog_poles(n)
|
||||
k = 1
|
||||
fs = 2
|
||||
warped = 2 * fs * mpmath.tan(mpmath.pi * Wn / fs)
|
||||
z, p, k = _zpklp2lp(zeros, poles, k, wo=warped)
|
||||
z, p, k = _zpkbilinear(z, p, k, fs=fs)
|
||||
return z, p, k
|
||||
|
||||
|
||||
def zpkfreqz(z, p, k, worN=None):
|
||||
"""
|
||||
Frequency response of a filter in zpk format, using mpmath.
|
||||
|
||||
This is the same calculation as scipy.signal.freqz, but the input is in
|
||||
zpk format, the calculation is performed using mpath, and the results are
|
||||
returned in lists instead of NumPy arrays.
|
||||
"""
|
||||
if worN is None or isinstance(worN, int):
|
||||
N = worN or 512
|
||||
ws = [mpmath.pi * mpmath.mpf(j) / N for j in range(N)]
|
||||
else:
|
||||
ws = worN
|
||||
|
||||
h = []
|
||||
for wk in ws:
|
||||
zm1 = mpmath.exp(1j * wk)
|
||||
numer = _prod([zm1 - t for t in z])
|
||||
denom = _prod([zm1 - t for t in p])
|
||||
hk = k * numer / denom
|
||||
h.append(hk)
|
||||
return ws, h
|
111
venv/Lib/site-packages/scipy/signal/tests/test_array_tools.py
Normal file
111
venv/Lib/site-packages/scipy/signal/tests/test_array_tools.py
Normal file
|
@ -0,0 +1,111 @@
|
|||
import numpy as np
|
||||
|
||||
from numpy.testing import assert_array_equal
|
||||
from pytest import raises as assert_raises
|
||||
|
||||
from scipy.signal._arraytools import (axis_slice, axis_reverse,
|
||||
odd_ext, even_ext, const_ext, zero_ext)
|
||||
|
||||
|
||||
class TestArrayTools(object):
|
||||
|
||||
def test_axis_slice(self):
|
||||
a = np.arange(12).reshape(3, 4)
|
||||
|
||||
s = axis_slice(a, start=0, stop=1, axis=0)
|
||||
assert_array_equal(s, a[0:1, :])
|
||||
|
||||
s = axis_slice(a, start=-1, axis=0)
|
||||
assert_array_equal(s, a[-1:, :])
|
||||
|
||||
s = axis_slice(a, start=0, stop=1, axis=1)
|
||||
assert_array_equal(s, a[:, 0:1])
|
||||
|
||||
s = axis_slice(a, start=-1, axis=1)
|
||||
assert_array_equal(s, a[:, -1:])
|
||||
|
||||
s = axis_slice(a, start=0, step=2, axis=0)
|
||||
assert_array_equal(s, a[::2, :])
|
||||
|
||||
s = axis_slice(a, start=0, step=2, axis=1)
|
||||
assert_array_equal(s, a[:, ::2])
|
||||
|
||||
def test_axis_reverse(self):
|
||||
a = np.arange(12).reshape(3, 4)
|
||||
|
||||
r = axis_reverse(a, axis=0)
|
||||
assert_array_equal(r, a[::-1, :])
|
||||
|
||||
r = axis_reverse(a, axis=1)
|
||||
assert_array_equal(r, a[:, ::-1])
|
||||
|
||||
def test_odd_ext(self):
|
||||
a = np.array([[1, 2, 3, 4, 5],
|
||||
[9, 8, 7, 6, 5]])
|
||||
|
||||
odd = odd_ext(a, 2, axis=1)
|
||||
expected = np.array([[-1, 0, 1, 2, 3, 4, 5, 6, 7],
|
||||
[11, 10, 9, 8, 7, 6, 5, 4, 3]])
|
||||
assert_array_equal(odd, expected)
|
||||
|
||||
odd = odd_ext(a, 1, axis=0)
|
||||
expected = np.array([[-7, -4, -1, 2, 5],
|
||||
[1, 2, 3, 4, 5],
|
||||
[9, 8, 7, 6, 5],
|
||||
[17, 14, 11, 8, 5]])
|
||||
assert_array_equal(odd, expected)
|
||||
|
||||
assert_raises(ValueError, odd_ext, a, 2, axis=0)
|
||||
assert_raises(ValueError, odd_ext, a, 5, axis=1)
|
||||
|
||||
def test_even_ext(self):
|
||||
a = np.array([[1, 2, 3, 4, 5],
|
||||
[9, 8, 7, 6, 5]])
|
||||
|
||||
even = even_ext(a, 2, axis=1)
|
||||
expected = np.array([[3, 2, 1, 2, 3, 4, 5, 4, 3],
|
||||
[7, 8, 9, 8, 7, 6, 5, 6, 7]])
|
||||
assert_array_equal(even, expected)
|
||||
|
||||
even = even_ext(a, 1, axis=0)
|
||||
expected = np.array([[9, 8, 7, 6, 5],
|
||||
[1, 2, 3, 4, 5],
|
||||
[9, 8, 7, 6, 5],
|
||||
[1, 2, 3, 4, 5]])
|
||||
assert_array_equal(even, expected)
|
||||
|
||||
assert_raises(ValueError, even_ext, a, 2, axis=0)
|
||||
assert_raises(ValueError, even_ext, a, 5, axis=1)
|
||||
|
||||
def test_const_ext(self):
|
||||
a = np.array([[1, 2, 3, 4, 5],
|
||||
[9, 8, 7, 6, 5]])
|
||||
|
||||
const = const_ext(a, 2, axis=1)
|
||||
expected = np.array([[1, 1, 1, 2, 3, 4, 5, 5, 5],
|
||||
[9, 9, 9, 8, 7, 6, 5, 5, 5]])
|
||||
assert_array_equal(const, expected)
|
||||
|
||||
const = const_ext(a, 1, axis=0)
|
||||
expected = np.array([[1, 2, 3, 4, 5],
|
||||
[1, 2, 3, 4, 5],
|
||||
[9, 8, 7, 6, 5],
|
||||
[9, 8, 7, 6, 5]])
|
||||
assert_array_equal(const, expected)
|
||||
|
||||
def test_zero_ext(self):
|
||||
a = np.array([[1, 2, 3, 4, 5],
|
||||
[9, 8, 7, 6, 5]])
|
||||
|
||||
zero = zero_ext(a, 2, axis=1)
|
||||
expected = np.array([[0, 0, 1, 2, 3, 4, 5, 0, 0],
|
||||
[0, 0, 9, 8, 7, 6, 5, 0, 0]])
|
||||
assert_array_equal(zero, expected)
|
||||
|
||||
zero = zero_ext(a, 1, axis=0)
|
||||
expected = np.array([[0, 0, 0, 0, 0],
|
||||
[1, 2, 3, 4, 5],
|
||||
[9, 8, 7, 6, 5],
|
||||
[0, 0, 0, 0, 0]])
|
||||
assert_array_equal(zero, expected)
|
||||
|
222
venv/Lib/site-packages/scipy/signal/tests/test_bsplines.py
Normal file
222
venv/Lib/site-packages/scipy/signal/tests/test_bsplines.py
Normal file
|
@ -0,0 +1,222 @@
|
|||
# pylint: disable=missing-docstring
|
||||
import numpy as np
|
||||
from numpy import array
|
||||
from numpy.testing import (assert_equal,
|
||||
assert_allclose, assert_array_equal,
|
||||
assert_almost_equal)
|
||||
from pytest import raises
|
||||
|
||||
import scipy.signal.bsplines as bsp
|
||||
|
||||
|
||||
class TestBSplines(object):
|
||||
"""Test behaviors of B-splines. The values tested against were returned as of
|
||||
SciPy 1.1.0 and are included for regression testing purposes"""
|
||||
|
||||
def test_factorial(self):
|
||||
# can't all be zero state
|
||||
assert_equal(bsp.factorial(1), 1)
|
||||
|
||||
def test_spline_filter(self):
|
||||
np.random.seed(12457)
|
||||
# Test the type-error branch
|
||||
raises(TypeError, bsp.spline_filter, array([0]), 0)
|
||||
# Test the complex branch
|
||||
data_array_complex = np.random.rand(7, 7) + np.random.rand(7, 7)*1j
|
||||
# make the magnitude exceed 1, and make some negative
|
||||
data_array_complex = 10*(1+1j-2*data_array_complex)
|
||||
result_array_complex = array(
|
||||
[[-4.61489230e-01-1.92994022j, 8.33332443+6.25519943j,
|
||||
6.96300745e-01-9.05576038j, 5.28294849+3.97541356j,
|
||||
5.92165565+7.68240595j, 6.59493160-1.04542804j,
|
||||
9.84503460-5.85946894j],
|
||||
[-8.78262329-8.4295969j, 7.20675516+5.47528982j,
|
||||
-8.17223072+2.06330729j, -4.38633347-8.65968037j,
|
||||
9.89916801-8.91720295j, 2.67755103+8.8706522j,
|
||||
6.24192142+3.76879835j],
|
||||
[-3.15627527+2.56303072j, 9.87658501-0.82838702j,
|
||||
-9.96930313+8.72288895j, 3.17193985+6.42474651j,
|
||||
-4.50919819-6.84576082j, 5.75423431+9.94723988j,
|
||||
9.65979767+6.90665293j],
|
||||
[-8.28993416-6.61064005j, 9.71416473e-01-9.44907284j,
|
||||
-2.38331890+9.25196648j, -7.08868170-0.77403212j,
|
||||
4.89887714+7.05371094j, -1.37062311-2.73505688j,
|
||||
7.70705748+2.5395329j],
|
||||
[2.51528406-1.82964492j, 3.65885472+2.95454836j,
|
||||
5.16786575-1.66362023j, -8.77737999e-03+5.72478867j,
|
||||
4.10533333-3.10287571j, 9.04761887+1.54017115j,
|
||||
-5.77960968e-01-7.87758923j],
|
||||
[9.86398506-3.98528528j, -4.71444130-2.44316983j,
|
||||
-1.68038976-1.12708664j, 2.84695053+1.01725709j,
|
||||
1.14315915-8.89294529j, -3.17127085-5.42145538j,
|
||||
1.91830420-6.16370344j],
|
||||
[7.13875294+2.91851187j, -5.35737514+9.64132309j,
|
||||
-9.66586399+0.70250005j, -9.87717438-2.0262239j,
|
||||
9.93160629+1.5630846j, 4.71948051-2.22050714j,
|
||||
9.49550819+7.8995142j]])
|
||||
# FIXME: for complex types, the computations are done in
|
||||
# single precision (reason unclear). When this is changed,
|
||||
# this test needs updating.
|
||||
assert_allclose(bsp.spline_filter(data_array_complex, 0),
|
||||
result_array_complex, rtol=1e-6)
|
||||
# Test the real branch
|
||||
np.random.seed(12457)
|
||||
data_array_real = np.random.rand(12, 12)
|
||||
# make the magnitude exceed 1, and make some negative
|
||||
data_array_real = 10*(1-2*data_array_real)
|
||||
result_array_real = array(
|
||||
[[-.463312621, 8.33391222, .697290949, 5.28390836,
|
||||
5.92066474, 6.59452137, 9.84406950, -8.78324188,
|
||||
7.20675750, -8.17222994, -4.38633345, 9.89917069],
|
||||
[2.67755154, 6.24192170, -3.15730578, 9.87658581,
|
||||
-9.96930425, 3.17194115, -4.50919947, 5.75423446,
|
||||
9.65979824, -8.29066885, .971416087, -2.38331897],
|
||||
[-7.08868346, 4.89887705, -1.37062289, 7.70705838,
|
||||
2.51526461, 3.65885497, 5.16786604, -8.77715342e-03,
|
||||
4.10533325, 9.04761993, -.577960351, 9.86382519],
|
||||
[-4.71444301, -1.68038985, 2.84695116, 1.14315938,
|
||||
-3.17127091, 1.91830461, 7.13779687, -5.35737482,
|
||||
-9.66586425, -9.87717456, 9.93160672, 4.71948144],
|
||||
[9.49551194, -1.92958436, 6.25427993, -9.05582911,
|
||||
3.97562282, 7.68232426, -1.04514824, -5.86021443,
|
||||
-8.43007451, 5.47528997, 2.06330736, -8.65968112],
|
||||
[-8.91720100, 8.87065356, 3.76879937, 2.56222894,
|
||||
-.828387146, 8.72288903, 6.42474741, -6.84576083,
|
||||
9.94724115, 6.90665380, -6.61084494, -9.44907391],
|
||||
[9.25196790, -.774032030, 7.05371046, -2.73505725,
|
||||
2.53953305, -1.82889155, 2.95454824, -1.66362046,
|
||||
5.72478916, -3.10287679, 1.54017123, -7.87759020],
|
||||
[-3.98464539, -2.44316992, -1.12708657, 1.01725672,
|
||||
-8.89294671, -5.42145629, -6.16370321, 2.91775492,
|
||||
9.64132208, .702499998, -2.02622392, 1.56308431],
|
||||
[-2.22050773, 7.89951554, 5.98970713, -7.35861835,
|
||||
5.45459283, -7.76427957, 3.67280490, -4.05521315,
|
||||
4.51967507, -3.22738749, -3.65080177, 3.05630155],
|
||||
[-6.21240584, -.296796126, -8.34800163, 9.21564563,
|
||||
-3.61958784, -4.77120006, -3.99454057, 1.05021988e-03,
|
||||
-6.95982829, 6.04380797, 8.43181250, -2.71653339],
|
||||
[1.19638037, 6.99718842e-02, 6.72020394, -2.13963198,
|
||||
3.75309875, -5.70076744, 5.92143551, -7.22150575,
|
||||
-3.77114594, -1.11903194, -5.39151466, 3.06620093],
|
||||
[9.86326886, 1.05134482, -7.75950607, -3.64429655,
|
||||
7.81848957, -9.02270373, 3.73399754, -4.71962549,
|
||||
-7.71144306, 3.78263161, 6.46034818, -4.43444731]])
|
||||
assert_allclose(bsp.spline_filter(data_array_real, 0),
|
||||
result_array_real)
|
||||
|
||||
def test_bspline(self):
|
||||
np.random.seed(12458)
|
||||
assert_allclose(bsp.bspline(np.random.rand(1, 1), 2),
|
||||
array([[0.73694695]]))
|
||||
data_array_complex = np.random.rand(4, 4) + np.random.rand(4, 4)*1j
|
||||
data_array_complex = 0.1*data_array_complex
|
||||
result_array_complex = array(
|
||||
[[0.40882362, 0.41021151, 0.40886708, 0.40905103],
|
||||
[0.40829477, 0.41021230, 0.40966097, 0.40939871],
|
||||
[0.41036803, 0.40901724, 0.40965331, 0.40879513],
|
||||
[0.41032862, 0.40925287, 0.41037754, 0.41027477]])
|
||||
assert_allclose(bsp.bspline(data_array_complex, 10),
|
||||
result_array_complex)
|
||||
|
||||
def test_gauss_spline(self):
|
||||
np.random.seed(12459)
|
||||
assert_almost_equal(bsp.gauss_spline(0, 0), 1.381976597885342)
|
||||
assert_allclose(bsp.gauss_spline(array([1.]), 1), array([0.04865217]))
|
||||
|
||||
def test_cubic(self):
|
||||
np.random.seed(12460)
|
||||
assert_array_equal(bsp.cubic([0]), array([0]))
|
||||
data_array_complex = np.random.rand(4, 4) + np.random.rand(4, 4)*1j
|
||||
data_array_complex = 1+1j-2*data_array_complex
|
||||
# scaling the magnitude by 10 makes the results close enough to zero,
|
||||
# that the assertion fails, so just make the elements have a mix of
|
||||
# positive and negative imaginary components...
|
||||
result_array_complex = array(
|
||||
[[0.23056563, 0.38414406, 0.08342987, 0.06904847],
|
||||
[0.17240848, 0.47055447, 0.63896278, 0.39756424],
|
||||
[0.12672571, 0.65862632, 0.1116695, 0.09700386],
|
||||
[0.3544116, 0.17856518, 0.1528841, 0.17285762]])
|
||||
assert_allclose(bsp.cubic(data_array_complex), result_array_complex)
|
||||
|
||||
def test_quadratic(self):
|
||||
np.random.seed(12461)
|
||||
assert_array_equal(bsp.quadratic([0]), array([0]))
|
||||
data_array_complex = np.random.rand(4, 4) + np.random.rand(4, 4)*1j
|
||||
# scaling the magnitude by 10 makes the results all zero,
|
||||
# so just make the elements have a mix of positive and negative
|
||||
# imaginary components...
|
||||
data_array_complex = (1+1j-2*data_array_complex)
|
||||
result_array_complex = array(
|
||||
[[0.23062746, 0.06338176, 0.34902312, 0.31944105],
|
||||
[0.14701256, 0.13277773, 0.29428615, 0.09814697],
|
||||
[0.52873842, 0.06484157, 0.09517566, 0.46420389],
|
||||
[0.09286829, 0.09371954, 0.1422526, 0.16007024]])
|
||||
assert_allclose(bsp.quadratic(data_array_complex),
|
||||
result_array_complex)
|
||||
|
||||
def test_cspline1d(self):
|
||||
np.random.seed(12462)
|
||||
assert_array_equal(bsp.cspline1d(array([0])), [0.])
|
||||
c1d = array([1.21037185, 1.86293902, 2.98834059, 4.11660378,
|
||||
4.78893826])
|
||||
# test lamda != 0
|
||||
assert_allclose(bsp.cspline1d(array([1., 2, 3, 4, 5]), 1), c1d)
|
||||
c1d0 = array([0.78683946, 2.05333735, 2.99981113, 3.94741812,
|
||||
5.21051638])
|
||||
assert_allclose(bsp.cspline1d(array([1., 2, 3, 4, 5])), c1d0)
|
||||
|
||||
def test_qspline1d(self):
|
||||
np.random.seed(12463)
|
||||
assert_array_equal(bsp.qspline1d(array([0])), [0.])
|
||||
# test lamda != 0
|
||||
raises(ValueError, bsp.qspline1d, array([1., 2, 3, 4, 5]), 1.)
|
||||
raises(ValueError, bsp.qspline1d, array([1., 2, 3, 4, 5]), -1.)
|
||||
q1d0 = array([0.85350007, 2.02441743, 2.99999534, 3.97561055,
|
||||
5.14634135])
|
||||
assert_allclose(bsp.qspline1d(array([1., 2, 3, 4, 5])), q1d0)
|
||||
|
||||
def test_cspline1d_eval(self):
|
||||
np.random.seed(12464)
|
||||
assert_allclose(bsp.cspline1d_eval(array([0., 0]), [0.]), array([0.]))
|
||||
assert_array_equal(bsp.cspline1d_eval(array([1., 0, 1]), []),
|
||||
array([]))
|
||||
x = [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6]
|
||||
dx = x[1]-x[0]
|
||||
newx = [-6., -5.5, -5., -4.5, -4., -3.5, -3., -2.5, -2., -1.5, -1.,
|
||||
-0.5, 0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5, 5., 5.5, 6.,
|
||||
6.5, 7., 7.5, 8., 8.5, 9., 9.5, 10., 10.5, 11., 11.5, 12.,
|
||||
12.5]
|
||||
y = array([4.216, 6.864, 3.514, 6.203, 6.759, 7.433, 7.874, 5.879,
|
||||
1.396, 4.094])
|
||||
cj = bsp.cspline1d(y)
|
||||
newy = array([6.203, 4.41570658, 3.514, 5.16924703, 6.864, 6.04643068,
|
||||
4.21600281, 6.04643068, 6.864, 5.16924703, 3.514,
|
||||
4.41570658, 6.203, 6.80717667, 6.759, 6.98971173, 7.433,
|
||||
7.79560142, 7.874, 7.41525761, 5.879, 3.18686814, 1.396,
|
||||
2.24889482, 4.094, 2.24889482, 1.396, 3.18686814, 5.879,
|
||||
7.41525761, 7.874, 7.79560142, 7.433, 6.98971173, 6.759,
|
||||
6.80717667, 6.203, 4.41570658])
|
||||
assert_allclose(bsp.cspline1d_eval(cj, newx, dx=dx, x0=x[0]), newy)
|
||||
|
||||
def test_qspline1d_eval(self):
|
||||
np.random.seed(12465)
|
||||
assert_allclose(bsp.qspline1d_eval(array([0., 0]), [0.]), array([0.]))
|
||||
assert_array_equal(bsp.qspline1d_eval(array([1., 0, 1]), []),
|
||||
array([]))
|
||||
x = [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6]
|
||||
dx = x[1]-x[0]
|
||||
newx = [-6., -5.5, -5., -4.5, -4., -3.5, -3., -2.5, -2., -1.5, -1.,
|
||||
-0.5, 0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5, 5., 5.5, 6.,
|
||||
6.5, 7., 7.5, 8., 8.5, 9., 9.5, 10., 10.5, 11., 11.5, 12.,
|
||||
12.5]
|
||||
y = array([4.216, 6.864, 3.514, 6.203, 6.759, 7.433, 7.874, 5.879,
|
||||
1.396, 4.094])
|
||||
cj = bsp.qspline1d(y)
|
||||
newy = array([6.203, 4.49418159, 3.514, 5.18390821, 6.864, 5.91436915,
|
||||
4.21600002, 5.91436915, 6.864, 5.18390821, 3.514,
|
||||
4.49418159, 6.203, 6.71900226, 6.759, 7.03980488, 7.433,
|
||||
7.81016848, 7.874, 7.32718426, 5.879, 3.23872593, 1.396,
|
||||
2.34046013, 4.094, 2.34046013, 1.396, 3.23872593, 5.879,
|
||||
7.32718426, 7.874, 7.81016848, 7.433, 7.03980488, 6.759,
|
||||
6.71900226, 6.203, 4.49418159])
|
||||
assert_allclose(bsp.qspline1d_eval(cj, newx, dx=dx, x0=x[0]), newy)
|
420
venv/Lib/site-packages/scipy/signal/tests/test_cont2discrete.py
Normal file
420
venv/Lib/site-packages/scipy/signal/tests/test_cont2discrete.py
Normal file
|
@ -0,0 +1,420 @@
|
|||
import numpy as np
|
||||
from numpy.testing import \
|
||||
assert_array_almost_equal, assert_almost_equal, \
|
||||
assert_allclose, assert_equal
|
||||
|
||||
import pytest
|
||||
from scipy.signal import cont2discrete as c2d
|
||||
from scipy.signal import dlsim, ss2tf, ss2zpk, lsim2, lti
|
||||
from scipy.signal import tf2ss, impulse2, dimpulse, step2, dstep
|
||||
|
||||
# Author: Jeffrey Armstrong <jeff@approximatrix.com>
|
||||
# March 29, 2011
|
||||
|
||||
|
||||
class TestC2D(object):
|
||||
def test_zoh(self):
|
||||
ac = np.eye(2)
|
||||
bc = np.full((2, 1), 0.5)
|
||||
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
|
||||
dc = np.array([[0.0], [0.0], [-0.33]])
|
||||
|
||||
ad_truth = 1.648721270700128 * np.eye(2)
|
||||
bd_truth = np.full((2, 1), 0.324360635350064)
|
||||
# c and d in discrete should be equal to their continuous counterparts
|
||||
dt_requested = 0.5
|
||||
|
||||
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, method='zoh')
|
||||
|
||||
assert_array_almost_equal(ad_truth, ad)
|
||||
assert_array_almost_equal(bd_truth, bd)
|
||||
assert_array_almost_equal(cc, cd)
|
||||
assert_array_almost_equal(dc, dd)
|
||||
assert_almost_equal(dt_requested, dt)
|
||||
|
||||
def test_foh(self):
|
||||
ac = np.eye(2)
|
||||
bc = np.full((2, 1), 0.5)
|
||||
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
|
||||
dc = np.array([[0.0], [0.0], [-0.33]])
|
||||
|
||||
# True values are verified with Matlab
|
||||
ad_truth = 1.648721270700128 * np.eye(2)
|
||||
bd_truth = np.full((2, 1), 0.420839287058789)
|
||||
cd_truth = cc
|
||||
dd_truth = np.array([[0.260262223725224],
|
||||
[0.297442541400256],
|
||||
[-0.144098411624840]])
|
||||
dt_requested = 0.5
|
||||
|
||||
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, method='foh')
|
||||
|
||||
assert_array_almost_equal(ad_truth, ad)
|
||||
assert_array_almost_equal(bd_truth, bd)
|
||||
assert_array_almost_equal(cd_truth, cd)
|
||||
assert_array_almost_equal(dd_truth, dd)
|
||||
assert_almost_equal(dt_requested, dt)
|
||||
|
||||
def test_impulse(self):
|
||||
ac = np.eye(2)
|
||||
bc = np.full((2, 1), 0.5)
|
||||
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
|
||||
dc = np.array([[0.0], [0.0], [0.0]])
|
||||
|
||||
# True values are verified with Matlab
|
||||
ad_truth = 1.648721270700128 * np.eye(2)
|
||||
bd_truth = np.full((2, 1), 0.412180317675032)
|
||||
cd_truth = cc
|
||||
dd_truth = np.array([[0.4375], [0.5], [0.3125]])
|
||||
dt_requested = 0.5
|
||||
|
||||
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
|
||||
method='impulse')
|
||||
|
||||
assert_array_almost_equal(ad_truth, ad)
|
||||
assert_array_almost_equal(bd_truth, bd)
|
||||
assert_array_almost_equal(cd_truth, cd)
|
||||
assert_array_almost_equal(dd_truth, dd)
|
||||
assert_almost_equal(dt_requested, dt)
|
||||
|
||||
def test_gbt(self):
|
||||
ac = np.eye(2)
|
||||
bc = np.full((2, 1), 0.5)
|
||||
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
|
||||
dc = np.array([[0.0], [0.0], [-0.33]])
|
||||
|
||||
dt_requested = 0.5
|
||||
alpha = 1.0 / 3.0
|
||||
|
||||
ad_truth = 1.6 * np.eye(2)
|
||||
bd_truth = np.full((2, 1), 0.3)
|
||||
cd_truth = np.array([[0.9, 1.2],
|
||||
[1.2, 1.2],
|
||||
[1.2, 0.3]])
|
||||
dd_truth = np.array([[0.175],
|
||||
[0.2],
|
||||
[-0.205]])
|
||||
|
||||
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
|
||||
method='gbt', alpha=alpha)
|
||||
|
||||
assert_array_almost_equal(ad_truth, ad)
|
||||
assert_array_almost_equal(bd_truth, bd)
|
||||
assert_array_almost_equal(cd_truth, cd)
|
||||
assert_array_almost_equal(dd_truth, dd)
|
||||
|
||||
def test_euler(self):
|
||||
ac = np.eye(2)
|
||||
bc = np.full((2, 1), 0.5)
|
||||
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
|
||||
dc = np.array([[0.0], [0.0], [-0.33]])
|
||||
|
||||
dt_requested = 0.5
|
||||
|
||||
ad_truth = 1.5 * np.eye(2)
|
||||
bd_truth = np.full((2, 1), 0.25)
|
||||
cd_truth = np.array([[0.75, 1.0],
|
||||
[1.0, 1.0],
|
||||
[1.0, 0.25]])
|
||||
dd_truth = dc
|
||||
|
||||
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
|
||||
method='euler')
|
||||
|
||||
assert_array_almost_equal(ad_truth, ad)
|
||||
assert_array_almost_equal(bd_truth, bd)
|
||||
assert_array_almost_equal(cd_truth, cd)
|
||||
assert_array_almost_equal(dd_truth, dd)
|
||||
assert_almost_equal(dt_requested, dt)
|
||||
|
||||
def test_backward_diff(self):
|
||||
ac = np.eye(2)
|
||||
bc = np.full((2, 1), 0.5)
|
||||
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
|
||||
dc = np.array([[0.0], [0.0], [-0.33]])
|
||||
|
||||
dt_requested = 0.5
|
||||
|
||||
ad_truth = 2.0 * np.eye(2)
|
||||
bd_truth = np.full((2, 1), 0.5)
|
||||
cd_truth = np.array([[1.5, 2.0],
|
||||
[2.0, 2.0],
|
||||
[2.0, 0.5]])
|
||||
dd_truth = np.array([[0.875],
|
||||
[1.0],
|
||||
[0.295]])
|
||||
|
||||
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
|
||||
method='backward_diff')
|
||||
|
||||
assert_array_almost_equal(ad_truth, ad)
|
||||
assert_array_almost_equal(bd_truth, bd)
|
||||
assert_array_almost_equal(cd_truth, cd)
|
||||
assert_array_almost_equal(dd_truth, dd)
|
||||
|
||||
def test_bilinear(self):
|
||||
ac = np.eye(2)
|
||||
bc = np.full((2, 1), 0.5)
|
||||
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
|
||||
dc = np.array([[0.0], [0.0], [-0.33]])
|
||||
|
||||
dt_requested = 0.5
|
||||
|
||||
ad_truth = (5.0 / 3.0) * np.eye(2)
|
||||
bd_truth = np.full((2, 1), 1.0 / 3.0)
|
||||
cd_truth = np.array([[1.0, 4.0 / 3.0],
|
||||
[4.0 / 3.0, 4.0 / 3.0],
|
||||
[4.0 / 3.0, 1.0 / 3.0]])
|
||||
dd_truth = np.array([[0.291666666666667],
|
||||
[1.0 / 3.0],
|
||||
[-0.121666666666667]])
|
||||
|
||||
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
|
||||
method='bilinear')
|
||||
|
||||
assert_array_almost_equal(ad_truth, ad)
|
||||
assert_array_almost_equal(bd_truth, bd)
|
||||
assert_array_almost_equal(cd_truth, cd)
|
||||
assert_array_almost_equal(dd_truth, dd)
|
||||
assert_almost_equal(dt_requested, dt)
|
||||
|
||||
# Same continuous system again, but change sampling rate
|
||||
|
||||
ad_truth = 1.4 * np.eye(2)
|
||||
bd_truth = np.full((2, 1), 0.2)
|
||||
cd_truth = np.array([[0.9, 1.2], [1.2, 1.2], [1.2, 0.3]])
|
||||
dd_truth = np.array([[0.175], [0.2], [-0.205]])
|
||||
|
||||
dt_requested = 1.0 / 3.0
|
||||
|
||||
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
|
||||
method='bilinear')
|
||||
|
||||
assert_array_almost_equal(ad_truth, ad)
|
||||
assert_array_almost_equal(bd_truth, bd)
|
||||
assert_array_almost_equal(cd_truth, cd)
|
||||
assert_array_almost_equal(dd_truth, dd)
|
||||
assert_almost_equal(dt_requested, dt)
|
||||
|
||||
def test_transferfunction(self):
|
||||
numc = np.array([0.25, 0.25, 0.5])
|
||||
denc = np.array([0.75, 0.75, 1.0])
|
||||
|
||||
numd = np.array([[1.0 / 3.0, -0.427419169438754, 0.221654141101125]])
|
||||
dend = np.array([1.0, -1.351394049721225, 0.606530659712634])
|
||||
|
||||
dt_requested = 0.5
|
||||
|
||||
num, den, dt = c2d((numc, denc), dt_requested, method='zoh')
|
||||
|
||||
assert_array_almost_equal(numd, num)
|
||||
assert_array_almost_equal(dend, den)
|
||||
assert_almost_equal(dt_requested, dt)
|
||||
|
||||
def test_zerospolesgain(self):
|
||||
zeros_c = np.array([0.5, -0.5])
|
||||
poles_c = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)])
|
||||
k_c = 1.0
|
||||
|
||||
zeros_d = [1.23371727305860, 0.735356894461267]
|
||||
polls_d = [0.938148335039729 + 0.346233593780536j,
|
||||
0.938148335039729 - 0.346233593780536j]
|
||||
k_d = 1.0
|
||||
|
||||
dt_requested = 0.5
|
||||
|
||||
zeros, poles, k, dt = c2d((zeros_c, poles_c, k_c), dt_requested,
|
||||
method='zoh')
|
||||
|
||||
assert_array_almost_equal(zeros_d, zeros)
|
||||
assert_array_almost_equal(polls_d, poles)
|
||||
assert_almost_equal(k_d, k)
|
||||
assert_almost_equal(dt_requested, dt)
|
||||
|
||||
def test_gbt_with_sio_tf_and_zpk(self):
|
||||
"""Test method='gbt' with alpha=0.25 for tf and zpk cases."""
|
||||
# State space coefficients for the continuous SIO system.
|
||||
A = -1.0
|
||||
B = 1.0
|
||||
C = 1.0
|
||||
D = 0.5
|
||||
|
||||
# The continuous transfer function coefficients.
|
||||
cnum, cden = ss2tf(A, B, C, D)
|
||||
|
||||
# Continuous zpk representation
|
||||
cz, cp, ck = ss2zpk(A, B, C, D)
|
||||
|
||||
h = 1.0
|
||||
alpha = 0.25
|
||||
|
||||
# Explicit formulas, in the scalar case.
|
||||
Ad = (1 + (1 - alpha) * h * A) / (1 - alpha * h * A)
|
||||
Bd = h * B / (1 - alpha * h * A)
|
||||
Cd = C / (1 - alpha * h * A)
|
||||
Dd = D + alpha * C * Bd
|
||||
|
||||
# Convert the explicit solution to tf
|
||||
dnum, dden = ss2tf(Ad, Bd, Cd, Dd)
|
||||
|
||||
# Compute the discrete tf using cont2discrete.
|
||||
c2dnum, c2dden, dt = c2d((cnum, cden), h, method='gbt', alpha=alpha)
|
||||
|
||||
assert_allclose(dnum, c2dnum)
|
||||
assert_allclose(dden, c2dden)
|
||||
|
||||
# Convert explicit solution to zpk.
|
||||
dz, dp, dk = ss2zpk(Ad, Bd, Cd, Dd)
|
||||
|
||||
# Compute the discrete zpk using cont2discrete.
|
||||
c2dz, c2dp, c2dk, dt = c2d((cz, cp, ck), h, method='gbt', alpha=alpha)
|
||||
|
||||
assert_allclose(dz, c2dz)
|
||||
assert_allclose(dp, c2dp)
|
||||
assert_allclose(dk, c2dk)
|
||||
|
||||
def test_discrete_approx(self):
|
||||
"""
|
||||
Test that the solution to the discrete approximation of a continuous
|
||||
system actually approximates the solution to the continuous system.
|
||||
This is an indirect test of the correctness of the implementation
|
||||
of cont2discrete.
|
||||
"""
|
||||
|
||||
def u(t):
|
||||
return np.sin(2.5 * t)
|
||||
|
||||
a = np.array([[-0.01]])
|
||||
b = np.array([[1.0]])
|
||||
c = np.array([[1.0]])
|
||||
d = np.array([[0.2]])
|
||||
x0 = 1.0
|
||||
|
||||
t = np.linspace(0, 10.0, 101)
|
||||
dt = t[1] - t[0]
|
||||
u1 = u(t)
|
||||
|
||||
# Use lsim2 to compute the solution to the continuous system.
|
||||
t, yout, xout = lsim2((a, b, c, d), T=t, U=u1, X0=x0,
|
||||
rtol=1e-9, atol=1e-11)
|
||||
|
||||
# Convert the continuous system to a discrete approximation.
|
||||
dsys = c2d((a, b, c, d), dt, method='bilinear')
|
||||
|
||||
# Use dlsim with the pairwise averaged input to compute the output
|
||||
# of the discrete system.
|
||||
u2 = 0.5 * (u1[:-1] + u1[1:])
|
||||
t2 = t[:-1]
|
||||
td2, yd2, xd2 = dlsim(dsys, u=u2.reshape(-1, 1), t=t2, x0=x0)
|
||||
|
||||
# ymid is the average of consecutive terms of the "exact" output
|
||||
# computed by lsim2. This is what the discrete approximation
|
||||
# actually approximates.
|
||||
ymid = 0.5 * (yout[:-1] + yout[1:])
|
||||
|
||||
assert_allclose(yd2.ravel(), ymid, rtol=1e-4)
|
||||
|
||||
def test_simo_tf(self):
|
||||
# See gh-5753
|
||||
tf = ([[1, 0], [1, 1]], [1, 1])
|
||||
num, den, dt = c2d(tf, 0.01)
|
||||
|
||||
assert_equal(dt, 0.01) # sanity check
|
||||
assert_allclose(den, [1, -0.990404983], rtol=1e-3)
|
||||
assert_allclose(num, [[1, -1], [1, -0.99004983]], rtol=1e-3)
|
||||
|
||||
def test_multioutput(self):
|
||||
ts = 0.01 # time step
|
||||
|
||||
tf = ([[1, -3], [1, 5]], [1, 1])
|
||||
num, den, dt = c2d(tf, ts)
|
||||
|
||||
tf1 = (tf[0][0], tf[1])
|
||||
num1, den1, dt1 = c2d(tf1, ts)
|
||||
|
||||
tf2 = (tf[0][1], tf[1])
|
||||
num2, den2, dt2 = c2d(tf2, ts)
|
||||
|
||||
# Sanity checks
|
||||
assert_equal(dt, dt1)
|
||||
assert_equal(dt, dt2)
|
||||
|
||||
# Check that we get the same results
|
||||
assert_allclose(num, np.vstack((num1, num2)), rtol=1e-13)
|
||||
|
||||
# Single input, so the denominator should
|
||||
# not be multidimensional like the numerator
|
||||
assert_allclose(den, den1, rtol=1e-13)
|
||||
assert_allclose(den, den2, rtol=1e-13)
|
||||
|
||||
class TestC2dLti(object):
|
||||
def test_c2d_ss(self):
|
||||
# StateSpace
|
||||
A = np.array([[-0.3, 0.1], [0.2, -0.7]])
|
||||
B = np.array([[0], [1]])
|
||||
C = np.array([[1, 0]])
|
||||
D = 0
|
||||
|
||||
A_res = np.array([[0.985136404135682, 0.004876671474795],
|
||||
[0.009753342949590, 0.965629718236502]])
|
||||
B_res = np.array([[0.000122937599964], [0.049135527547844]])
|
||||
|
||||
sys_ssc = lti(A, B, C, D)
|
||||
sys_ssd = sys_ssc.to_discrete(0.05)
|
||||
|
||||
assert_allclose(sys_ssd.A, A_res)
|
||||
assert_allclose(sys_ssd.B, B_res)
|
||||
assert_allclose(sys_ssd.C, C)
|
||||
assert_allclose(sys_ssd.D, D)
|
||||
|
||||
def test_c2d_tf(self):
|
||||
|
||||
sys = lti([0.5, 0.3], [1.0, 0.4])
|
||||
sys = sys.to_discrete(0.005)
|
||||
|
||||
# Matlab results
|
||||
num_res = np.array([0.5, -0.485149004980066])
|
||||
den_res = np.array([1.0, -0.980198673306755])
|
||||
|
||||
# Somehow a lot of numerical errors
|
||||
assert_allclose(sys.den, den_res, atol=0.02)
|
||||
assert_allclose(sys.num, num_res, atol=0.02)
|
||||
|
||||
|
||||
class TestC2dInvariants:
|
||||
# Some test cases for checking the invariances.
|
||||
# Array of triplets: (system, sample time, number of samples)
|
||||
cases = [
|
||||
(tf2ss([1, 1], [1, 1.5, 1]), 0.25, 10),
|
||||
(tf2ss([1, 2], [1, 1.5, 3, 1]), 0.5, 10),
|
||||
(tf2ss(0.1, [1, 1, 2, 1]), 0.5, 10),
|
||||
]
|
||||
|
||||
# Some options for lsim2 and derived routines
|
||||
tolerances = {'rtol': 1e-9, 'atol': 1e-11}
|
||||
|
||||
# Check that systems discretized with the impulse-invariant
|
||||
# method really hold the invariant
|
||||
@pytest.mark.parametrize("sys,sample_time,samples_number", cases)
|
||||
def test_impulse_invariant(self, sys, sample_time, samples_number):
|
||||
time = np.arange(samples_number) * sample_time
|
||||
_, yout_cont = impulse2(sys, T=time, **self.tolerances)
|
||||
_, yout_disc = dimpulse(c2d(sys, sample_time, method='impulse'),
|
||||
n=len(time))
|
||||
assert_allclose(sample_time * yout_cont.ravel(), yout_disc[0].ravel())
|
||||
|
||||
# Step invariant should hold for ZOH discretized systems
|
||||
@pytest.mark.parametrize("sys,sample_time,samples_number", cases)
|
||||
def test_step_invariant(self, sys, sample_time, samples_number):
|
||||
time = np.arange(samples_number) * sample_time
|
||||
_, yout_cont = step2(sys, T=time, **self.tolerances)
|
||||
_, yout_disc = dstep(c2d(sys, sample_time, method='zoh'), n=len(time))
|
||||
assert_allclose(yout_cont.ravel(), yout_disc[0].ravel())
|
||||
|
||||
# Linear invariant should hold for FOH discretized systems
|
||||
@pytest.mark.parametrize("sys,sample_time,samples_number", cases)
|
||||
def test_linear_invariant(self, sys, sample_time, samples_number):
|
||||
time = np.arange(samples_number) * sample_time
|
||||
_, yout_cont, _ = lsim2(sys, T=time, U=time, **self.tolerances)
|
||||
_, yout_disc, _ = dlsim(c2d(sys, sample_time, method='foh'), u=time)
|
||||
assert_allclose(yout_cont.ravel(), yout_disc.ravel())
|
598
venv/Lib/site-packages/scipy/signal/tests/test_dltisys.py
Normal file
598
venv/Lib/site-packages/scipy/signal/tests/test_dltisys.py
Normal file
|
@ -0,0 +1,598 @@
|
|||
# Author: Jeffrey Armstrong <jeff@approximatrix.com>
|
||||
# April 4, 2011
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import (assert_equal,
|
||||
assert_array_almost_equal, assert_array_equal,
|
||||
assert_allclose, assert_, assert_almost_equal,
|
||||
suppress_warnings)
|
||||
from pytest import raises as assert_raises
|
||||
from scipy.signal import (dlsim, dstep, dimpulse, tf2zpk, lti, dlti,
|
||||
StateSpace, TransferFunction, ZerosPolesGain,
|
||||
dfreqresp, dbode, BadCoefficients)
|
||||
|
||||
|
||||
class TestDLTI(object):
|
||||
|
||||
def test_dlsim(self):
|
||||
|
||||
a = np.asarray([[0.9, 0.1], [-0.2, 0.9]])
|
||||
b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]])
|
||||
c = np.asarray([[0.1, 0.3]])
|
||||
d = np.asarray([[0.0, -0.1, 0.0]])
|
||||
dt = 0.5
|
||||
|
||||
# Create an input matrix with inputs down the columns (3 cols) and its
|
||||
# respective time input vector
|
||||
u = np.hstack((np.linspace(0, 4.0, num=5)[:, np.newaxis],
|
||||
np.full((5, 1), 0.01),
|
||||
np.full((5, 1), -0.002)))
|
||||
t_in = np.linspace(0, 2.0, num=5)
|
||||
|
||||
# Define the known result
|
||||
yout_truth = np.array([[-0.001,
|
||||
-0.00073,
|
||||
0.039446,
|
||||
0.0915387,
|
||||
0.13195948]]).T
|
||||
xout_truth = np.asarray([[0, 0],
|
||||
[0.0012, 0.0005],
|
||||
[0.40233, 0.00071],
|
||||
[1.163368, -0.079327],
|
||||
[2.2402985, -0.3035679]])
|
||||
|
||||
tout, yout, xout = dlsim((a, b, c, d, dt), u, t_in)
|
||||
|
||||
assert_array_almost_equal(yout_truth, yout)
|
||||
assert_array_almost_equal(xout_truth, xout)
|
||||
assert_array_almost_equal(t_in, tout)
|
||||
|
||||
# Make sure input with single-dimension doesn't raise error
|
||||
dlsim((1, 2, 3), 4)
|
||||
|
||||
# Interpolated control - inputs should have different time steps
|
||||
# than the discrete model uses internally
|
||||
u_sparse = u[[0, 4], :]
|
||||
t_sparse = np.asarray([0.0, 2.0])
|
||||
|
||||
tout, yout, xout = dlsim((a, b, c, d, dt), u_sparse, t_sparse)
|
||||
|
||||
assert_array_almost_equal(yout_truth, yout)
|
||||
assert_array_almost_equal(xout_truth, xout)
|
||||
assert_equal(len(tout), yout.shape[0])
|
||||
|
||||
# Transfer functions (assume dt = 0.5)
|
||||
num = np.asarray([1.0, -0.1])
|
||||
den = np.asarray([0.3, 1.0, 0.2])
|
||||
yout_truth = np.array([[0.0,
|
||||
0.0,
|
||||
3.33333333333333,
|
||||
-4.77777777777778,
|
||||
23.0370370370370]]).T
|
||||
|
||||
# Assume use of the first column of the control input built earlier
|
||||
tout, yout = dlsim((num, den, 0.5), u[:, 0], t_in)
|
||||
|
||||
assert_array_almost_equal(yout, yout_truth)
|
||||
assert_array_almost_equal(t_in, tout)
|
||||
|
||||
# Retest the same with a 1-D input vector
|
||||
uflat = np.asarray(u[:, 0])
|
||||
uflat = uflat.reshape((5,))
|
||||
tout, yout = dlsim((num, den, 0.5), uflat, t_in)
|
||||
|
||||
assert_array_almost_equal(yout, yout_truth)
|
||||
assert_array_almost_equal(t_in, tout)
|
||||
|
||||
# zeros-poles-gain representation
|
||||
zd = np.array([0.5, -0.5])
|
||||
pd = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)])
|
||||
k = 1.0
|
||||
yout_truth = np.array([[0.0, 1.0, 2.0, 2.25, 2.5]]).T
|
||||
|
||||
tout, yout = dlsim((zd, pd, k, 0.5), u[:, 0], t_in)
|
||||
|
||||
assert_array_almost_equal(yout, yout_truth)
|
||||
assert_array_almost_equal(t_in, tout)
|
||||
|
||||
# Raise an error for continuous-time systems
|
||||
system = lti([1], [1, 1])
|
||||
assert_raises(AttributeError, dlsim, system, u)
|
||||
|
||||
def test_dstep(self):
|
||||
|
||||
a = np.asarray([[0.9, 0.1], [-0.2, 0.9]])
|
||||
b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]])
|
||||
c = np.asarray([[0.1, 0.3]])
|
||||
d = np.asarray([[0.0, -0.1, 0.0]])
|
||||
dt = 0.5
|
||||
|
||||
# Because b.shape[1] == 3, dstep should result in a tuple of three
|
||||
# result vectors
|
||||
yout_step_truth = (np.asarray([0.0, 0.04, 0.052, 0.0404, 0.00956,
|
||||
-0.036324, -0.093318, -0.15782348,
|
||||
-0.226628324, -0.2969374948]),
|
||||
np.asarray([-0.1, -0.075, -0.058, -0.04815,
|
||||
-0.04453, -0.0461895, -0.0521812,
|
||||
-0.061588875, -0.073549579,
|
||||
-0.08727047595]),
|
||||
np.asarray([0.0, -0.01, -0.013, -0.0101, -0.00239,
|
||||
0.009081, 0.0233295, 0.03945587,
|
||||
0.056657081, 0.0742343737]))
|
||||
|
||||
tout, yout = dstep((a, b, c, d, dt), n=10)
|
||||
|
||||
assert_equal(len(yout), 3)
|
||||
|
||||
for i in range(0, len(yout)):
|
||||
assert_equal(yout[i].shape[0], 10)
|
||||
assert_array_almost_equal(yout[i].flatten(), yout_step_truth[i])
|
||||
|
||||
# Check that the other two inputs (tf, zpk) will work as well
|
||||
tfin = ([1.0], [1.0, 1.0], 0.5)
|
||||
yout_tfstep = np.asarray([0.0, 1.0, 0.0])
|
||||
tout, yout = dstep(tfin, n=3)
|
||||
assert_equal(len(yout), 1)
|
||||
assert_array_almost_equal(yout[0].flatten(), yout_tfstep)
|
||||
|
||||
zpkin = tf2zpk(tfin[0], tfin[1]) + (0.5,)
|
||||
tout, yout = dstep(zpkin, n=3)
|
||||
assert_equal(len(yout), 1)
|
||||
assert_array_almost_equal(yout[0].flatten(), yout_tfstep)
|
||||
|
||||
# Raise an error for continuous-time systems
|
||||
system = lti([1], [1, 1])
|
||||
assert_raises(AttributeError, dstep, system)
|
||||
|
||||
def test_dimpulse(self):
|
||||
|
||||
a = np.asarray([[0.9, 0.1], [-0.2, 0.9]])
|
||||
b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]])
|
||||
c = np.asarray([[0.1, 0.3]])
|
||||
d = np.asarray([[0.0, -0.1, 0.0]])
|
||||
dt = 0.5
|
||||
|
||||
# Because b.shape[1] == 3, dimpulse should result in a tuple of three
|
||||
# result vectors
|
||||
yout_imp_truth = (np.asarray([0.0, 0.04, 0.012, -0.0116, -0.03084,
|
||||
-0.045884, -0.056994, -0.06450548,
|
||||
-0.068804844, -0.0703091708]),
|
||||
np.asarray([-0.1, 0.025, 0.017, 0.00985, 0.00362,
|
||||
-0.0016595, -0.0059917, -0.009407675,
|
||||
-0.011960704, -0.01372089695]),
|
||||
np.asarray([0.0, -0.01, -0.003, 0.0029, 0.00771,
|
||||
0.011471, 0.0142485, 0.01612637,
|
||||
0.017201211, 0.0175772927]))
|
||||
|
||||
tout, yout = dimpulse((a, b, c, d, dt), n=10)
|
||||
|
||||
assert_equal(len(yout), 3)
|
||||
|
||||
for i in range(0, len(yout)):
|
||||
assert_equal(yout[i].shape[0], 10)
|
||||
assert_array_almost_equal(yout[i].flatten(), yout_imp_truth[i])
|
||||
|
||||
# Check that the other two inputs (tf, zpk) will work as well
|
||||
tfin = ([1.0], [1.0, 1.0], 0.5)
|
||||
yout_tfimpulse = np.asarray([0.0, 1.0, -1.0])
|
||||
tout, yout = dimpulse(tfin, n=3)
|
||||
assert_equal(len(yout), 1)
|
||||
assert_array_almost_equal(yout[0].flatten(), yout_tfimpulse)
|
||||
|
||||
zpkin = tf2zpk(tfin[0], tfin[1]) + (0.5,)
|
||||
tout, yout = dimpulse(zpkin, n=3)
|
||||
assert_equal(len(yout), 1)
|
||||
assert_array_almost_equal(yout[0].flatten(), yout_tfimpulse)
|
||||
|
||||
# Raise an error for continuous-time systems
|
||||
system = lti([1], [1, 1])
|
||||
assert_raises(AttributeError, dimpulse, system)
|
||||
|
||||
def test_dlsim_trivial(self):
|
||||
a = np.array([[0.0]])
|
||||
b = np.array([[0.0]])
|
||||
c = np.array([[0.0]])
|
||||
d = np.array([[0.0]])
|
||||
n = 5
|
||||
u = np.zeros(n).reshape(-1, 1)
|
||||
tout, yout, xout = dlsim((a, b, c, d, 1), u)
|
||||
assert_array_equal(tout, np.arange(float(n)))
|
||||
assert_array_equal(yout, np.zeros((n, 1)))
|
||||
assert_array_equal(xout, np.zeros((n, 1)))
|
||||
|
||||
def test_dlsim_simple1d(self):
|
||||
a = np.array([[0.5]])
|
||||
b = np.array([[0.0]])
|
||||
c = np.array([[1.0]])
|
||||
d = np.array([[0.0]])
|
||||
n = 5
|
||||
u = np.zeros(n).reshape(-1, 1)
|
||||
tout, yout, xout = dlsim((a, b, c, d, 1), u, x0=1)
|
||||
assert_array_equal(tout, np.arange(float(n)))
|
||||
expected = (0.5 ** np.arange(float(n))).reshape(-1, 1)
|
||||
assert_array_equal(yout, expected)
|
||||
assert_array_equal(xout, expected)
|
||||
|
||||
def test_dlsim_simple2d(self):
|
||||
lambda1 = 0.5
|
||||
lambda2 = 0.25
|
||||
a = np.array([[lambda1, 0.0],
|
||||
[0.0, lambda2]])
|
||||
b = np.array([[0.0],
|
||||
[0.0]])
|
||||
c = np.array([[1.0, 0.0],
|
||||
[0.0, 1.0]])
|
||||
d = np.array([[0.0],
|
||||
[0.0]])
|
||||
n = 5
|
||||
u = np.zeros(n).reshape(-1, 1)
|
||||
tout, yout, xout = dlsim((a, b, c, d, 1), u, x0=1)
|
||||
assert_array_equal(tout, np.arange(float(n)))
|
||||
# The analytical solution:
|
||||
expected = (np.array([lambda1, lambda2]) **
|
||||
np.arange(float(n)).reshape(-1, 1))
|
||||
assert_array_equal(yout, expected)
|
||||
assert_array_equal(xout, expected)
|
||||
|
||||
def test_more_step_and_impulse(self):
|
||||
lambda1 = 0.5
|
||||
lambda2 = 0.75
|
||||
a = np.array([[lambda1, 0.0],
|
||||
[0.0, lambda2]])
|
||||
b = np.array([[1.0, 0.0],
|
||||
[0.0, 1.0]])
|
||||
c = np.array([[1.0, 1.0]])
|
||||
d = np.array([[0.0, 0.0]])
|
||||
|
||||
n = 10
|
||||
|
||||
# Check a step response.
|
||||
ts, ys = dstep((a, b, c, d, 1), n=n)
|
||||
|
||||
# Create the exact step response.
|
||||
stp0 = (1.0 / (1 - lambda1)) * (1.0 - lambda1 ** np.arange(n))
|
||||
stp1 = (1.0 / (1 - lambda2)) * (1.0 - lambda2 ** np.arange(n))
|
||||
|
||||
assert_allclose(ys[0][:, 0], stp0)
|
||||
assert_allclose(ys[1][:, 0], stp1)
|
||||
|
||||
# Check an impulse response with an initial condition.
|
||||
x0 = np.array([1.0, 1.0])
|
||||
ti, yi = dimpulse((a, b, c, d, 1), n=n, x0=x0)
|
||||
|
||||
# Create the exact impulse response.
|
||||
imp = (np.array([lambda1, lambda2]) **
|
||||
np.arange(-1, n + 1).reshape(-1, 1))
|
||||
imp[0, :] = 0.0
|
||||
# Analytical solution to impulse response
|
||||
y0 = imp[:n, 0] + np.dot(imp[1:n + 1, :], x0)
|
||||
y1 = imp[:n, 1] + np.dot(imp[1:n + 1, :], x0)
|
||||
|
||||
assert_allclose(yi[0][:, 0], y0)
|
||||
assert_allclose(yi[1][:, 0], y1)
|
||||
|
||||
# Check that dt=0.1, n=3 gives 3 time values.
|
||||
system = ([1.0], [1.0, -0.5], 0.1)
|
||||
t, (y,) = dstep(system, n=3)
|
||||
assert_allclose(t, [0, 0.1, 0.2])
|
||||
assert_array_equal(y.T, [[0, 1.0, 1.5]])
|
||||
t, (y,) = dimpulse(system, n=3)
|
||||
assert_allclose(t, [0, 0.1, 0.2])
|
||||
assert_array_equal(y.T, [[0, 1, 0.5]])
|
||||
|
||||
|
||||
class TestDlti(object):
|
||||
def test_dlti_instantiation(self):
|
||||
# Test that lti can be instantiated.
|
||||
|
||||
dt = 0.05
|
||||
# TransferFunction
|
||||
s = dlti([1], [-1], dt=dt)
|
||||
assert_(isinstance(s, TransferFunction))
|
||||
assert_(isinstance(s, dlti))
|
||||
assert_(not isinstance(s, lti))
|
||||
assert_equal(s.dt, dt)
|
||||
|
||||
# ZerosPolesGain
|
||||
s = dlti(np.array([]), np.array([-1]), 1, dt=dt)
|
||||
assert_(isinstance(s, ZerosPolesGain))
|
||||
assert_(isinstance(s, dlti))
|
||||
assert_(not isinstance(s, lti))
|
||||
assert_equal(s.dt, dt)
|
||||
|
||||
# StateSpace
|
||||
s = dlti([1], [-1], 1, 3, dt=dt)
|
||||
assert_(isinstance(s, StateSpace))
|
||||
assert_(isinstance(s, dlti))
|
||||
assert_(not isinstance(s, lti))
|
||||
assert_equal(s.dt, dt)
|
||||
|
||||
# Number of inputs
|
||||
assert_raises(ValueError, dlti, 1)
|
||||
assert_raises(ValueError, dlti, 1, 1, 1, 1, 1)
|
||||
|
||||
|
||||
class TestStateSpaceDisc(object):
|
||||
def test_initialization(self):
|
||||
# Check that all initializations work
|
||||
dt = 0.05
|
||||
StateSpace(1, 1, 1, 1, dt=dt)
|
||||
StateSpace([1], [2], [3], [4], dt=dt)
|
||||
StateSpace(np.array([[1, 2], [3, 4]]), np.array([[1], [2]]),
|
||||
np.array([[1, 0]]), np.array([[0]]), dt=dt)
|
||||
StateSpace(1, 1, 1, 1, dt=True)
|
||||
|
||||
def test_conversion(self):
|
||||
# Check the conversion functions
|
||||
s = StateSpace(1, 2, 3, 4, dt=0.05)
|
||||
assert_(isinstance(s.to_ss(), StateSpace))
|
||||
assert_(isinstance(s.to_tf(), TransferFunction))
|
||||
assert_(isinstance(s.to_zpk(), ZerosPolesGain))
|
||||
|
||||
# Make sure copies work
|
||||
assert_(StateSpace(s) is not s)
|
||||
assert_(s.to_ss() is not s)
|
||||
|
||||
def test_properties(self):
|
||||
# Test setters/getters for cross class properties.
|
||||
# This implicitly tests to_tf() and to_zpk()
|
||||
|
||||
# Getters
|
||||
s = StateSpace(1, 1, 1, 1, dt=0.05)
|
||||
assert_equal(s.poles, [1])
|
||||
assert_equal(s.zeros, [0])
|
||||
|
||||
|
||||
class TestTransferFunction(object):
|
||||
def test_initialization(self):
|
||||
# Check that all initializations work
|
||||
dt = 0.05
|
||||
TransferFunction(1, 1, dt=dt)
|
||||
TransferFunction([1], [2], dt=dt)
|
||||
TransferFunction(np.array([1]), np.array([2]), dt=dt)
|
||||
TransferFunction(1, 1, dt=True)
|
||||
|
||||
def test_conversion(self):
|
||||
# Check the conversion functions
|
||||
s = TransferFunction([1, 0], [1, -1], dt=0.05)
|
||||
assert_(isinstance(s.to_ss(), StateSpace))
|
||||
assert_(isinstance(s.to_tf(), TransferFunction))
|
||||
assert_(isinstance(s.to_zpk(), ZerosPolesGain))
|
||||
|
||||
# Make sure copies work
|
||||
assert_(TransferFunction(s) is not s)
|
||||
assert_(s.to_tf() is not s)
|
||||
|
||||
def test_properties(self):
|
||||
# Test setters/getters for cross class properties.
|
||||
# This implicitly tests to_ss() and to_zpk()
|
||||
|
||||
# Getters
|
||||
s = TransferFunction([1, 0], [1, -1], dt=0.05)
|
||||
assert_equal(s.poles, [1])
|
||||
assert_equal(s.zeros, [0])
|
||||
|
||||
|
||||
class TestZerosPolesGain(object):
|
||||
def test_initialization(self):
|
||||
# Check that all initializations work
|
||||
dt = 0.05
|
||||
ZerosPolesGain(1, 1, 1, dt=dt)
|
||||
ZerosPolesGain([1], [2], 1, dt=dt)
|
||||
ZerosPolesGain(np.array([1]), np.array([2]), 1, dt=dt)
|
||||
ZerosPolesGain(1, 1, 1, dt=True)
|
||||
|
||||
def test_conversion(self):
|
||||
# Check the conversion functions
|
||||
s = ZerosPolesGain(1, 2, 3, dt=0.05)
|
||||
assert_(isinstance(s.to_ss(), StateSpace))
|
||||
assert_(isinstance(s.to_tf(), TransferFunction))
|
||||
assert_(isinstance(s.to_zpk(), ZerosPolesGain))
|
||||
|
||||
# Make sure copies work
|
||||
assert_(ZerosPolesGain(s) is not s)
|
||||
assert_(s.to_zpk() is not s)
|
||||
|
||||
|
||||
class Test_dfreqresp(object):
|
||||
|
||||
def test_manual(self):
|
||||
# Test dfreqresp() real part calculation (manual sanity check).
|
||||
# 1st order low-pass filter: H(z) = 1 / (z - 0.2),
|
||||
system = TransferFunction(1, [1, -0.2], dt=0.1)
|
||||
w = [0.1, 1, 10]
|
||||
w, H = dfreqresp(system, w=w)
|
||||
|
||||
# test real
|
||||
expected_re = [1.2383, 0.4130, -0.7553]
|
||||
assert_almost_equal(H.real, expected_re, decimal=4)
|
||||
|
||||
# test imag
|
||||
expected_im = [-0.1555, -1.0214, 0.3955]
|
||||
assert_almost_equal(H.imag, expected_im, decimal=4)
|
||||
|
||||
def test_auto(self):
|
||||
# Test dfreqresp() real part calculation.
|
||||
# 1st order low-pass filter: H(z) = 1 / (z - 0.2),
|
||||
system = TransferFunction(1, [1, -0.2], dt=0.1)
|
||||
w = [0.1, 1, 10, 100]
|
||||
w, H = dfreqresp(system, w=w)
|
||||
jw = np.exp(w * 1j)
|
||||
y = np.polyval(system.num, jw) / np.polyval(system.den, jw)
|
||||
|
||||
# test real
|
||||
expected_re = y.real
|
||||
assert_almost_equal(H.real, expected_re)
|
||||
|
||||
# test imag
|
||||
expected_im = y.imag
|
||||
assert_almost_equal(H.imag, expected_im)
|
||||
|
||||
def test_freq_range(self):
|
||||
# Test that freqresp() finds a reasonable frequency range.
|
||||
# 1st order low-pass filter: H(z) = 1 / (z - 0.2),
|
||||
# Expected range is from 0.01 to 10.
|
||||
system = TransferFunction(1, [1, -0.2], dt=0.1)
|
||||
n = 10
|
||||
expected_w = np.linspace(0, np.pi, 10, endpoint=False)
|
||||
w, H = dfreqresp(system, n=n)
|
||||
assert_almost_equal(w, expected_w)
|
||||
|
||||
def test_pole_one(self):
|
||||
# Test that freqresp() doesn't fail on a system with a pole at 0.
|
||||
# integrator, pole at zero: H(s) = 1 / s
|
||||
system = TransferFunction([1], [1, -1], dt=0.1)
|
||||
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(RuntimeWarning, message="divide by zero")
|
||||
sup.filter(RuntimeWarning, message="invalid value encountered")
|
||||
w, H = dfreqresp(system, n=2)
|
||||
assert_equal(w[0], 0.) # a fail would give not-a-number
|
||||
|
||||
def test_error(self):
|
||||
# Raise an error for continuous-time systems
|
||||
system = lti([1], [1, 1])
|
||||
assert_raises(AttributeError, dfreqresp, system)
|
||||
|
||||
def test_from_state_space(self):
|
||||
# H(z) = 2 / z^3 - 0.5 * z^2
|
||||
|
||||
system_TF = dlti([2], [1, -0.5, 0, 0])
|
||||
|
||||
A = np.array([[0.5, 0, 0],
|
||||
[1, 0, 0],
|
||||
[0, 1, 0]])
|
||||
B = np.array([[1, 0, 0]]).T
|
||||
C = np.array([[0, 0, 2]])
|
||||
D = 0
|
||||
|
||||
system_SS = dlti(A, B, C, D)
|
||||
w = 10.0**np.arange(-3,0,.5)
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(BadCoefficients)
|
||||
w1, H1 = dfreqresp(system_TF, w=w)
|
||||
w2, H2 = dfreqresp(system_SS, w=w)
|
||||
|
||||
assert_almost_equal(H1, H2)
|
||||
|
||||
def test_from_zpk(self):
|
||||
# 1st order low-pass filter: H(s) = 0.3 / (z - 0.2),
|
||||
system_ZPK = dlti([],[0.2],0.3)
|
||||
system_TF = dlti(0.3, [1, -0.2])
|
||||
w = [0.1, 1, 10, 100]
|
||||
w1, H1 = dfreqresp(system_ZPK, w=w)
|
||||
w2, H2 = dfreqresp(system_TF, w=w)
|
||||
assert_almost_equal(H1, H2)
|
||||
|
||||
|
||||
class Test_bode(object):
|
||||
|
||||
def test_manual(self):
|
||||
# Test bode() magnitude calculation (manual sanity check).
|
||||
# 1st order low-pass filter: H(s) = 0.3 / (z - 0.2),
|
||||
dt = 0.1
|
||||
system = TransferFunction(0.3, [1, -0.2], dt=dt)
|
||||
w = [0.1, 0.5, 1, np.pi]
|
||||
w2, mag, phase = dbode(system, w=w)
|
||||
|
||||
# Test mag
|
||||
expected_mag = [-8.5329, -8.8396, -9.6162, -12.0412]
|
||||
assert_almost_equal(mag, expected_mag, decimal=4)
|
||||
|
||||
# Test phase
|
||||
expected_phase = [-7.1575, -35.2814, -67.9809, -180.0000]
|
||||
assert_almost_equal(phase, expected_phase, decimal=4)
|
||||
|
||||
# Test frequency
|
||||
assert_equal(np.array(w) / dt, w2)
|
||||
|
||||
def test_auto(self):
|
||||
# Test bode() magnitude calculation.
|
||||
# 1st order low-pass filter: H(s) = 0.3 / (z - 0.2),
|
||||
system = TransferFunction(0.3, [1, -0.2], dt=0.1)
|
||||
w = np.array([0.1, 0.5, 1, np.pi])
|
||||
w2, mag, phase = dbode(system, w=w)
|
||||
jw = np.exp(w * 1j)
|
||||
y = np.polyval(system.num, jw) / np.polyval(system.den, jw)
|
||||
|
||||
# Test mag
|
||||
expected_mag = 20.0 * np.log10(abs(y))
|
||||
assert_almost_equal(mag, expected_mag)
|
||||
|
||||
# Test phase
|
||||
expected_phase = np.rad2deg(np.angle(y))
|
||||
assert_almost_equal(phase, expected_phase)
|
||||
|
||||
def test_range(self):
|
||||
# Test that bode() finds a reasonable frequency range.
|
||||
# 1st order low-pass filter: H(s) = 0.3 / (z - 0.2),
|
||||
dt = 0.1
|
||||
system = TransferFunction(0.3, [1, -0.2], dt=0.1)
|
||||
n = 10
|
||||
# Expected range is from 0.01 to 10.
|
||||
expected_w = np.linspace(0, np.pi, n, endpoint=False) / dt
|
||||
w, mag, phase = dbode(system, n=n)
|
||||
assert_almost_equal(w, expected_w)
|
||||
|
||||
def test_pole_one(self):
|
||||
# Test that freqresp() doesn't fail on a system with a pole at 0.
|
||||
# integrator, pole at zero: H(s) = 1 / s
|
||||
system = TransferFunction([1], [1, -1], dt=0.1)
|
||||
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(RuntimeWarning, message="divide by zero")
|
||||
sup.filter(RuntimeWarning, message="invalid value encountered")
|
||||
w, mag, phase = dbode(system, n=2)
|
||||
assert_equal(w[0], 0.) # a fail would give not-a-number
|
||||
|
||||
def test_imaginary(self):
|
||||
# bode() should not fail on a system with pure imaginary poles.
|
||||
# The test passes if bode doesn't raise an exception.
|
||||
system = TransferFunction([1], [1, 0, 100], dt=0.1)
|
||||
dbode(system, n=2)
|
||||
|
||||
def test_error(self):
|
||||
# Raise an error for continuous-time systems
|
||||
system = lti([1], [1, 1])
|
||||
assert_raises(AttributeError, dbode, system)
|
||||
|
||||
|
||||
class TestTransferFunctionZConversion(object):
|
||||
"""Test private conversions between 'z' and 'z**-1' polynomials."""
|
||||
|
||||
def test_full(self):
|
||||
# Numerator and denominator same order
|
||||
num = [2, 3, 4]
|
||||
den = [5, 6, 7]
|
||||
num2, den2 = TransferFunction._z_to_zinv(num, den)
|
||||
assert_equal(num, num2)
|
||||
assert_equal(den, den2)
|
||||
|
||||
num2, den2 = TransferFunction._zinv_to_z(num, den)
|
||||
assert_equal(num, num2)
|
||||
assert_equal(den, den2)
|
||||
|
||||
def test_numerator(self):
|
||||
# Numerator lower order than denominator
|
||||
num = [2, 3]
|
||||
den = [5, 6, 7]
|
||||
num2, den2 = TransferFunction._z_to_zinv(num, den)
|
||||
assert_equal([0, 2, 3], num2)
|
||||
assert_equal(den, den2)
|
||||
|
||||
num2, den2 = TransferFunction._zinv_to_z(num, den)
|
||||
assert_equal([2, 3, 0], num2)
|
||||
assert_equal(den, den2)
|
||||
|
||||
def test_denominator(self):
|
||||
# Numerator higher order than denominator
|
||||
num = [2, 3, 4]
|
||||
den = [5, 6]
|
||||
num2, den2 = TransferFunction._z_to_zinv(num, den)
|
||||
assert_equal(num, num2)
|
||||
assert_equal([0, 5, 6], den2)
|
||||
|
||||
num2, den2 = TransferFunction._zinv_to_z(num, den)
|
||||
assert_equal(num, num2)
|
||||
assert_equal([5, 6, 0], den2)
|
||||
|
3736
venv/Lib/site-packages/scipy/signal/tests/test_filter_design.py
Normal file
3736
venv/Lib/site-packages/scipy/signal/tests/test_filter_design.py
Normal file
File diff suppressed because it is too large
Load diff
|
@ -0,0 +1,641 @@
|
|||
import numpy as np
|
||||
from numpy.testing import (assert_almost_equal, assert_array_almost_equal,
|
||||
assert_equal, assert_,
|
||||
assert_allclose, assert_warns)
|
||||
from pytest import raises as assert_raises
|
||||
import pytest
|
||||
|
||||
from scipy.fft import fft
|
||||
from scipy.special import sinc
|
||||
from scipy.signal import kaiser_beta, kaiser_atten, kaiserord, \
|
||||
firwin, firwin2, freqz, remez, firls, minimum_phase
|
||||
|
||||
|
||||
def test_kaiser_beta():
|
||||
b = kaiser_beta(58.7)
|
||||
assert_almost_equal(b, 0.1102 * 50.0)
|
||||
b = kaiser_beta(22.0)
|
||||
assert_almost_equal(b, 0.5842 + 0.07886)
|
||||
b = kaiser_beta(21.0)
|
||||
assert_equal(b, 0.0)
|
||||
b = kaiser_beta(10.0)
|
||||
assert_equal(b, 0.0)
|
||||
|
||||
|
||||
def test_kaiser_atten():
|
||||
a = kaiser_atten(1, 1.0)
|
||||
assert_equal(a, 7.95)
|
||||
a = kaiser_atten(2, 1/np.pi)
|
||||
assert_equal(a, 2.285 + 7.95)
|
||||
|
||||
|
||||
def test_kaiserord():
|
||||
assert_raises(ValueError, kaiserord, 1.0, 1.0)
|
||||
numtaps, beta = kaiserord(2.285 + 7.95 - 0.001, 1/np.pi)
|
||||
assert_equal((numtaps, beta), (2, 0.0))
|
||||
|
||||
|
||||
class TestFirwin(object):
|
||||
|
||||
def check_response(self, h, expected_response, tol=.05):
|
||||
N = len(h)
|
||||
alpha = 0.5 * (N-1)
|
||||
m = np.arange(0,N) - alpha # time indices of taps
|
||||
for freq, expected in expected_response:
|
||||
actual = abs(np.sum(h*np.exp(-1.j*np.pi*m*freq)))
|
||||
mse = abs(actual-expected)**2
|
||||
assert_(mse < tol, 'response not as expected, mse=%g > %g'
|
||||
% (mse, tol))
|
||||
|
||||
def test_response(self):
|
||||
N = 51
|
||||
f = .5
|
||||
# increase length just to try even/odd
|
||||
h = firwin(N, f) # low-pass from 0 to f
|
||||
self.check_response(h, [(.25,1), (.75,0)])
|
||||
|
||||
h = firwin(N+1, f, window='nuttall') # specific window
|
||||
self.check_response(h, [(.25,1), (.75,0)])
|
||||
|
||||
h = firwin(N+2, f, pass_zero=False) # stop from 0 to f --> high-pass
|
||||
self.check_response(h, [(.25,0), (.75,1)])
|
||||
|
||||
f1, f2, f3, f4 = .2, .4, .6, .8
|
||||
h = firwin(N+3, [f1, f2], pass_zero=False) # band-pass filter
|
||||
self.check_response(h, [(.1,0), (.3,1), (.5,0)])
|
||||
|
||||
h = firwin(N+4, [f1, f2]) # band-stop filter
|
||||
self.check_response(h, [(.1,1), (.3,0), (.5,1)])
|
||||
|
||||
h = firwin(N+5, [f1, f2, f3, f4], pass_zero=False, scale=False)
|
||||
self.check_response(h, [(.1,0), (.3,1), (.5,0), (.7,1), (.9,0)])
|
||||
|
||||
h = firwin(N+6, [f1, f2, f3, f4]) # multiband filter
|
||||
self.check_response(h, [(.1,1), (.3,0), (.5,1), (.7,0), (.9,1)])
|
||||
|
||||
h = firwin(N+7, 0.1, width=.03) # low-pass
|
||||
self.check_response(h, [(.05,1), (.75,0)])
|
||||
|
||||
h = firwin(N+8, 0.1, pass_zero=False) # high-pass
|
||||
self.check_response(h, [(.05,0), (.75,1)])
|
||||
|
||||
def mse(self, h, bands):
|
||||
"""Compute mean squared error versus ideal response across frequency
|
||||
band.
|
||||
h -- coefficients
|
||||
bands -- list of (left, right) tuples relative to 1==Nyquist of
|
||||
passbands
|
||||
"""
|
||||
w, H = freqz(h, worN=1024)
|
||||
f = w/np.pi
|
||||
passIndicator = np.zeros(len(w), bool)
|
||||
for left, right in bands:
|
||||
passIndicator |= (f >= left) & (f < right)
|
||||
Hideal = np.where(passIndicator, 1, 0)
|
||||
mse = np.mean(abs(abs(H)-Hideal)**2)
|
||||
return mse
|
||||
|
||||
def test_scaling(self):
|
||||
"""
|
||||
For one lowpass, bandpass, and highpass example filter, this test
|
||||
checks two things:
|
||||
- the mean squared error over the frequency domain of the unscaled
|
||||
filter is smaller than the scaled filter (true for rectangular
|
||||
window)
|
||||
- the response of the scaled filter is exactly unity at the center
|
||||
of the first passband
|
||||
"""
|
||||
N = 11
|
||||
cases = [
|
||||
([.5], True, (0, 1)),
|
||||
([0.2, .6], False, (.4, 1)),
|
||||
([.5], False, (1, 1)),
|
||||
]
|
||||
for cutoff, pass_zero, expected_response in cases:
|
||||
h = firwin(N, cutoff, scale=False, pass_zero=pass_zero, window='ones')
|
||||
hs = firwin(N, cutoff, scale=True, pass_zero=pass_zero, window='ones')
|
||||
if len(cutoff) == 1:
|
||||
if pass_zero:
|
||||
cutoff = [0] + cutoff
|
||||
else:
|
||||
cutoff = cutoff + [1]
|
||||
assert_(self.mse(h, [cutoff]) < self.mse(hs, [cutoff]),
|
||||
'least squares violation')
|
||||
self.check_response(hs, [expected_response], 1e-12)
|
||||
|
||||
|
||||
class TestFirWinMore(object):
|
||||
"""Different author, different style, different tests..."""
|
||||
|
||||
def test_lowpass(self):
|
||||
width = 0.04
|
||||
ntaps, beta = kaiserord(120, width)
|
||||
kwargs = dict(cutoff=0.5, window=('kaiser', beta), scale=False)
|
||||
taps = firwin(ntaps, **kwargs)
|
||||
|
||||
# Check the symmetry of taps.
|
||||
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
|
||||
|
||||
# Check the gain at a few samples where we know it should be approximately 0 or 1.
|
||||
freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2, 0.75, 1.0])
|
||||
freqs, response = freqz(taps, worN=np.pi*freq_samples)
|
||||
assert_array_almost_equal(np.abs(response),
|
||||
[1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5)
|
||||
|
||||
taps_str = firwin(ntaps, pass_zero='lowpass', **kwargs)
|
||||
assert_allclose(taps, taps_str)
|
||||
|
||||
def test_highpass(self):
|
||||
width = 0.04
|
||||
ntaps, beta = kaiserord(120, width)
|
||||
|
||||
# Ensure that ntaps is odd.
|
||||
ntaps |= 1
|
||||
|
||||
kwargs = dict(cutoff=0.5, window=('kaiser', beta), scale=False)
|
||||
taps = firwin(ntaps, pass_zero=False, **kwargs)
|
||||
|
||||
# Check the symmetry of taps.
|
||||
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
|
||||
|
||||
# Check the gain at a few samples where we know it should be approximately 0 or 1.
|
||||
freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2, 0.75, 1.0])
|
||||
freqs, response = freqz(taps, worN=np.pi*freq_samples)
|
||||
assert_array_almost_equal(np.abs(response),
|
||||
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5)
|
||||
|
||||
taps_str = firwin(ntaps, pass_zero='highpass', **kwargs)
|
||||
assert_allclose(taps, taps_str)
|
||||
|
||||
def test_bandpass(self):
|
||||
width = 0.04
|
||||
ntaps, beta = kaiserord(120, width)
|
||||
kwargs = dict(cutoff=[0.3, 0.7], window=('kaiser', beta), scale=False)
|
||||
taps = firwin(ntaps, pass_zero=False, **kwargs)
|
||||
|
||||
# Check the symmetry of taps.
|
||||
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
|
||||
|
||||
# Check the gain at a few samples where we know it should be approximately 0 or 1.
|
||||
freq_samples = np.array([0.0, 0.2, 0.3-width/2, 0.3+width/2, 0.5,
|
||||
0.7-width/2, 0.7+width/2, 0.8, 1.0])
|
||||
freqs, response = freqz(taps, worN=np.pi*freq_samples)
|
||||
assert_array_almost_equal(np.abs(response),
|
||||
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5)
|
||||
|
||||
taps_str = firwin(ntaps, pass_zero='bandpass', **kwargs)
|
||||
assert_allclose(taps, taps_str)
|
||||
|
||||
def test_bandstop_multi(self):
|
||||
width = 0.04
|
||||
ntaps, beta = kaiserord(120, width)
|
||||
kwargs = dict(cutoff=[0.2, 0.5, 0.8], window=('kaiser', beta),
|
||||
scale=False)
|
||||
taps = firwin(ntaps, **kwargs)
|
||||
|
||||
# Check the symmetry of taps.
|
||||
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
|
||||
|
||||
# Check the gain at a few samples where we know it should be approximately 0 or 1.
|
||||
freq_samples = np.array([0.0, 0.1, 0.2-width/2, 0.2+width/2, 0.35,
|
||||
0.5-width/2, 0.5+width/2, 0.65,
|
||||
0.8-width/2, 0.8+width/2, 0.9, 1.0])
|
||||
freqs, response = freqz(taps, worN=np.pi*freq_samples)
|
||||
assert_array_almost_equal(np.abs(response),
|
||||
[1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0],
|
||||
decimal=5)
|
||||
|
||||
taps_str = firwin(ntaps, pass_zero='bandstop', **kwargs)
|
||||
assert_allclose(taps, taps_str)
|
||||
|
||||
def test_fs_nyq(self):
|
||||
"""Test the fs and nyq keywords."""
|
||||
nyquist = 1000
|
||||
width = 40.0
|
||||
relative_width = width/nyquist
|
||||
ntaps, beta = kaiserord(120, relative_width)
|
||||
taps = firwin(ntaps, cutoff=[300, 700], window=('kaiser', beta),
|
||||
pass_zero=False, scale=False, fs=2*nyquist)
|
||||
|
||||
# Check the symmetry of taps.
|
||||
assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1])
|
||||
|
||||
# Check the gain at a few samples where we know it should be approximately 0 or 1.
|
||||
freq_samples = np.array([0.0, 200, 300-width/2, 300+width/2, 500,
|
||||
700-width/2, 700+width/2, 800, 1000])
|
||||
freqs, response = freqz(taps, worN=np.pi*freq_samples/nyquist)
|
||||
assert_array_almost_equal(np.abs(response),
|
||||
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5)
|
||||
|
||||
taps2 = firwin(ntaps, cutoff=[300, 700], window=('kaiser', beta),
|
||||
pass_zero=False, scale=False, nyq=nyquist)
|
||||
assert_allclose(taps2, taps)
|
||||
|
||||
def test_bad_cutoff(self):
|
||||
"""Test that invalid cutoff argument raises ValueError."""
|
||||
# cutoff values must be greater than 0 and less than 1.
|
||||
assert_raises(ValueError, firwin, 99, -0.5)
|
||||
assert_raises(ValueError, firwin, 99, 1.5)
|
||||
# Don't allow 0 or 1 in cutoff.
|
||||
assert_raises(ValueError, firwin, 99, [0, 0.5])
|
||||
assert_raises(ValueError, firwin, 99, [0.5, 1])
|
||||
# cutoff values must be strictly increasing.
|
||||
assert_raises(ValueError, firwin, 99, [0.1, 0.5, 0.2])
|
||||
assert_raises(ValueError, firwin, 99, [0.1, 0.5, 0.5])
|
||||
# Must have at least one cutoff value.
|
||||
assert_raises(ValueError, firwin, 99, [])
|
||||
# 2D array not allowed.
|
||||
assert_raises(ValueError, firwin, 99, [[0.1, 0.2],[0.3, 0.4]])
|
||||
# cutoff values must be less than nyq.
|
||||
assert_raises(ValueError, firwin, 99, 50.0, nyq=40)
|
||||
assert_raises(ValueError, firwin, 99, [10, 20, 30], nyq=25)
|
||||
assert_raises(ValueError, firwin, 99, 50.0, fs=80)
|
||||
assert_raises(ValueError, firwin, 99, [10, 20, 30], fs=50)
|
||||
|
||||
def test_even_highpass_raises_value_error(self):
|
||||
"""Test that attempt to create a highpass filter with an even number
|
||||
of taps raises a ValueError exception."""
|
||||
assert_raises(ValueError, firwin, 40, 0.5, pass_zero=False)
|
||||
assert_raises(ValueError, firwin, 40, [.25, 0.5])
|
||||
|
||||
def test_bad_pass_zero(self):
|
||||
"""Test degenerate pass_zero cases."""
|
||||
with assert_raises(ValueError, match='pass_zero must be'):
|
||||
firwin(41, 0.5, pass_zero='foo')
|
||||
with assert_raises(TypeError, match='cannot be interpreted'):
|
||||
firwin(41, 0.5, pass_zero=1.)
|
||||
for pass_zero in ('lowpass', 'highpass'):
|
||||
with assert_raises(ValueError, match='cutoff must have one'):
|
||||
firwin(41, [0.5, 0.6], pass_zero=pass_zero)
|
||||
for pass_zero in ('bandpass', 'bandstop'):
|
||||
with assert_raises(ValueError, match='must have at least two'):
|
||||
firwin(41, [0.5], pass_zero=pass_zero)
|
||||
|
||||
|
||||
class TestFirwin2(object):
|
||||
|
||||
def test_invalid_args(self):
|
||||
# `freq` and `gain` have different lengths.
|
||||
with assert_raises(ValueError, match='must be of same length'):
|
||||
firwin2(50, [0, 0.5, 1], [0.0, 1.0])
|
||||
# `nfreqs` is less than `ntaps`.
|
||||
with assert_raises(ValueError, match='ntaps must be less than nfreqs'):
|
||||
firwin2(50, [0, 0.5, 1], [0.0, 1.0, 1.0], nfreqs=33)
|
||||
# Decreasing value in `freq`
|
||||
with assert_raises(ValueError, match='must be nondecreasing'):
|
||||
firwin2(50, [0, 0.5, 0.4, 1.0], [0, .25, .5, 1.0])
|
||||
# Value in `freq` repeated more than once.
|
||||
with assert_raises(ValueError, match='must not occur more than twice'):
|
||||
firwin2(50, [0, .1, .1, .1, 1.0], [0.0, 0.5, 0.75, 1.0, 1.0])
|
||||
# `freq` does not start at 0.0.
|
||||
with assert_raises(ValueError, match='start with 0'):
|
||||
firwin2(50, [0.5, 1.0], [0.0, 1.0])
|
||||
# `freq` does not end at fs/2.
|
||||
with assert_raises(ValueError, match='end with fs/2'):
|
||||
firwin2(50, [0.0, 0.5], [0.0, 1.0])
|
||||
# Value 0 is repeated in `freq`
|
||||
with assert_raises(ValueError, match='0 must not be repeated'):
|
||||
firwin2(50, [0.0, 0.0, 0.5, 1.0], [1.0, 1.0, 0.0, 0.0])
|
||||
# Value fs/2 is repeated in `freq`
|
||||
with assert_raises(ValueError, match='fs/2 must not be repeated'):
|
||||
firwin2(50, [0.0, 0.5, 1.0, 1.0], [1.0, 1.0, 0.0, 0.0])
|
||||
# Value in `freq` that is too close to a repeated number
|
||||
with assert_raises(ValueError, match='cannot contain numbers '
|
||||
'that are too close'):
|
||||
firwin2(50, [0.0, 0.5 - np.finfo(float).eps * 0.5, 0.5, 0.5, 1.0],
|
||||
[1.0, 1.0, 1.0, 0.0, 0.0])
|
||||
|
||||
# Type II filter, but the gain at nyquist frequency is not zero.
|
||||
with assert_raises(ValueError, match='Type II filter'):
|
||||
firwin2(16, [0.0, 0.5, 1.0], [0.0, 1.0, 1.0])
|
||||
|
||||
# Type III filter, but the gains at nyquist and zero rate are not zero.
|
||||
with assert_raises(ValueError, match='Type III filter'):
|
||||
firwin2(17, [0.0, 0.5, 1.0], [0.0, 1.0, 1.0], antisymmetric=True)
|
||||
with assert_raises(ValueError, match='Type III filter'):
|
||||
firwin2(17, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0], antisymmetric=True)
|
||||
with assert_raises(ValueError, match='Type III filter'):
|
||||
firwin2(17, [0.0, 0.5, 1.0], [1.0, 1.0, 1.0], antisymmetric=True)
|
||||
|
||||
# Type IV filter, but the gain at zero rate is not zero.
|
||||
with assert_raises(ValueError, match='Type IV filter'):
|
||||
firwin2(16, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0], antisymmetric=True)
|
||||
|
||||
def test01(self):
|
||||
width = 0.04
|
||||
beta = 12.0
|
||||
ntaps = 400
|
||||
# Filter is 1 from w=0 to w=0.5, then decreases linearly from 1 to 0 as w
|
||||
# increases from w=0.5 to w=1 (w=1 is the Nyquist frequency).
|
||||
freq = [0.0, 0.5, 1.0]
|
||||
gain = [1.0, 1.0, 0.0]
|
||||
taps = firwin2(ntaps, freq, gain, window=('kaiser', beta))
|
||||
freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2,
|
||||
0.75, 1.0-width/2])
|
||||
freqs, response = freqz(taps, worN=np.pi*freq_samples)
|
||||
assert_array_almost_equal(np.abs(response),
|
||||
[1.0, 1.0, 1.0, 1.0-width, 0.5, width], decimal=5)
|
||||
|
||||
def test02(self):
|
||||
width = 0.04
|
||||
beta = 12.0
|
||||
# ntaps must be odd for positive gain at Nyquist.
|
||||
ntaps = 401
|
||||
# An ideal highpass filter.
|
||||
freq = [0.0, 0.5, 0.5, 1.0]
|
||||
gain = [0.0, 0.0, 1.0, 1.0]
|
||||
taps = firwin2(ntaps, freq, gain, window=('kaiser', beta))
|
||||
freq_samples = np.array([0.0, 0.25, 0.5-width, 0.5+width, 0.75, 1.0])
|
||||
freqs, response = freqz(taps, worN=np.pi*freq_samples)
|
||||
assert_array_almost_equal(np.abs(response),
|
||||
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5)
|
||||
|
||||
def test03(self):
|
||||
width = 0.02
|
||||
ntaps, beta = kaiserord(120, width)
|
||||
# ntaps must be odd for positive gain at Nyquist.
|
||||
ntaps = int(ntaps) | 1
|
||||
freq = [0.0, 0.4, 0.4, 0.5, 0.5, 1.0]
|
||||
gain = [1.0, 1.0, 0.0, 0.0, 1.0, 1.0]
|
||||
taps = firwin2(ntaps, freq, gain, window=('kaiser', beta))
|
||||
freq_samples = np.array([0.0, 0.4-width, 0.4+width, 0.45,
|
||||
0.5-width, 0.5+width, 0.75, 1.0])
|
||||
freqs, response = freqz(taps, worN=np.pi*freq_samples)
|
||||
assert_array_almost_equal(np.abs(response),
|
||||
[1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5)
|
||||
|
||||
def test04(self):
|
||||
"""Test firwin2 when window=None."""
|
||||
ntaps = 5
|
||||
# Ideal lowpass: gain is 1 on [0,0.5], and 0 on [0.5, 1.0]
|
||||
freq = [0.0, 0.5, 0.5, 1.0]
|
||||
gain = [1.0, 1.0, 0.0, 0.0]
|
||||
taps = firwin2(ntaps, freq, gain, window=None, nfreqs=8193)
|
||||
alpha = 0.5 * (ntaps - 1)
|
||||
m = np.arange(0, ntaps) - alpha
|
||||
h = 0.5 * sinc(0.5 * m)
|
||||
assert_array_almost_equal(h, taps)
|
||||
|
||||
def test05(self):
|
||||
"""Test firwin2 for calculating Type IV filters"""
|
||||
ntaps = 1500
|
||||
|
||||
freq = [0.0, 1.0]
|
||||
gain = [0.0, 1.0]
|
||||
taps = firwin2(ntaps, freq, gain, window=None, antisymmetric=True)
|
||||
assert_array_almost_equal(taps[: ntaps // 2], -taps[ntaps // 2:][::-1])
|
||||
|
||||
freqs, response = freqz(taps, worN=2048)
|
||||
assert_array_almost_equal(abs(response), freqs / np.pi, decimal=4)
|
||||
|
||||
def test06(self):
|
||||
"""Test firwin2 for calculating Type III filters"""
|
||||
ntaps = 1501
|
||||
|
||||
freq = [0.0, 0.5, 0.55, 1.0]
|
||||
gain = [0.0, 0.5, 0.0, 0.0]
|
||||
taps = firwin2(ntaps, freq, gain, window=None, antisymmetric=True)
|
||||
assert_equal(taps[ntaps // 2], 0.0)
|
||||
assert_array_almost_equal(taps[: ntaps // 2], -taps[ntaps // 2 + 1:][::-1])
|
||||
|
||||
freqs, response1 = freqz(taps, worN=2048)
|
||||
response2 = np.interp(freqs / np.pi, freq, gain)
|
||||
assert_array_almost_equal(abs(response1), response2, decimal=3)
|
||||
|
||||
def test_fs_nyq(self):
|
||||
taps1 = firwin2(80, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
|
||||
taps2 = firwin2(80, [0.0, 30.0, 60.0], [1.0, 1.0, 0.0], fs=120.0)
|
||||
assert_array_almost_equal(taps1, taps2)
|
||||
taps2 = firwin2(80, [0.0, 30.0, 60.0], [1.0, 1.0, 0.0], nyq=60.0)
|
||||
assert_array_almost_equal(taps1, taps2)
|
||||
|
||||
def test_tuple(self):
|
||||
taps1 = firwin2(150, (0.0, 0.5, 0.5, 1.0), (1.0, 1.0, 0.0, 0.0))
|
||||
taps2 = firwin2(150, [0.0, 0.5, 0.5, 1.0], [1.0, 1.0, 0.0, 0.0])
|
||||
assert_array_almost_equal(taps1, taps2)
|
||||
|
||||
def test_input_modyfication(self):
|
||||
freq1 = np.array([0.0, 0.5, 0.5, 1.0])
|
||||
freq2 = np.array(freq1)
|
||||
firwin2(80, freq1, [1.0, 1.0, 0.0, 0.0])
|
||||
assert_equal(freq1, freq2)
|
||||
|
||||
|
||||
class TestRemez(object):
|
||||
|
||||
def test_bad_args(self):
|
||||
assert_raises(ValueError, remez, 11, [0.1, 0.4], [1], type='pooka')
|
||||
|
||||
def test_hilbert(self):
|
||||
N = 11 # number of taps in the filter
|
||||
a = 0.1 # width of the transition band
|
||||
|
||||
# design an unity gain hilbert bandpass filter from w to 0.5-w
|
||||
h = remez(11, [a, 0.5-a], [1], type='hilbert')
|
||||
|
||||
# make sure the filter has correct # of taps
|
||||
assert_(len(h) == N, "Number of Taps")
|
||||
|
||||
# make sure it is type III (anti-symmetric tap coefficients)
|
||||
assert_array_almost_equal(h[:(N-1)//2], -h[:-(N-1)//2-1:-1])
|
||||
|
||||
# Since the requested response is symmetric, all even coefficients
|
||||
# should be zero (or in this case really small)
|
||||
assert_((abs(h[1::2]) < 1e-15).all(), "Even Coefficients Equal Zero")
|
||||
|
||||
# now check the frequency response
|
||||
w, H = freqz(h, 1)
|
||||
f = w/2/np.pi
|
||||
Hmag = abs(H)
|
||||
|
||||
# should have a zero at 0 and pi (in this case close to zero)
|
||||
assert_((Hmag[[0, -1]] < 0.02).all(), "Zero at zero and pi")
|
||||
|
||||
# check that the pass band is close to unity
|
||||
idx = np.logical_and(f > a, f < 0.5-a)
|
||||
assert_((abs(Hmag[idx] - 1) < 0.015).all(), "Pass Band Close To Unity")
|
||||
|
||||
def test_compare(self):
|
||||
# test comparison to MATLAB
|
||||
k = [0.024590270518440, -0.041314581814658, -0.075943803756711,
|
||||
-0.003530911231040, 0.193140296954975, 0.373400753484939,
|
||||
0.373400753484939, 0.193140296954975, -0.003530911231040,
|
||||
-0.075943803756711, -0.041314581814658, 0.024590270518440]
|
||||
h = remez(12, [0, 0.3, 0.5, 1], [1, 0], Hz=2.)
|
||||
assert_allclose(h, k)
|
||||
h = remez(12, [0, 0.3, 0.5, 1], [1, 0], fs=2.)
|
||||
assert_allclose(h, k)
|
||||
|
||||
h = [-0.038976016082299, 0.018704846485491, -0.014644062687875,
|
||||
0.002879152556419, 0.016849978528150, -0.043276706138248,
|
||||
0.073641298245579, -0.103908158578635, 0.129770906801075,
|
||||
-0.147163447297124, 0.153302248456347, -0.147163447297124,
|
||||
0.129770906801075, -0.103908158578635, 0.073641298245579,
|
||||
-0.043276706138248, 0.016849978528150, 0.002879152556419,
|
||||
-0.014644062687875, 0.018704846485491, -0.038976016082299]
|
||||
assert_allclose(remez(21, [0, 0.8, 0.9, 1], [0, 1], Hz=2.), h)
|
||||
assert_allclose(remez(21, [0, 0.8, 0.9, 1], [0, 1], fs=2.), h)
|
||||
|
||||
|
||||
class TestFirls(object):
|
||||
|
||||
def test_bad_args(self):
|
||||
# even numtaps
|
||||
assert_raises(ValueError, firls, 10, [0.1, 0.2], [0, 0])
|
||||
# odd bands
|
||||
assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.4], [0, 0, 0])
|
||||
# len(bands) != len(desired)
|
||||
assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.3, 0.4], [0, 0, 0])
|
||||
# non-monotonic bands
|
||||
assert_raises(ValueError, firls, 11, [0.2, 0.1], [0, 0])
|
||||
assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.3, 0.3], [0] * 4)
|
||||
assert_raises(ValueError, firls, 11, [0.3, 0.4, 0.1, 0.2], [0] * 4)
|
||||
assert_raises(ValueError, firls, 11, [0.1, 0.3, 0.2, 0.4], [0] * 4)
|
||||
# negative desired
|
||||
assert_raises(ValueError, firls, 11, [0.1, 0.2], [-1, 1])
|
||||
# len(weight) != len(pairs)
|
||||
assert_raises(ValueError, firls, 11, [0.1, 0.2], [0, 0], [1, 2])
|
||||
# negative weight
|
||||
assert_raises(ValueError, firls, 11, [0.1, 0.2], [0, 0], [-1])
|
||||
|
||||
def test_firls(self):
|
||||
N = 11 # number of taps in the filter
|
||||
a = 0.1 # width of the transition band
|
||||
|
||||
# design a halfband symmetric low-pass filter
|
||||
h = firls(11, [0, a, 0.5-a, 0.5], [1, 1, 0, 0], fs=1.0)
|
||||
|
||||
# make sure the filter has correct # of taps
|
||||
assert_equal(len(h), N)
|
||||
|
||||
# make sure it is symmetric
|
||||
midx = (N-1) // 2
|
||||
assert_array_almost_equal(h[:midx], h[:-midx-1:-1])
|
||||
|
||||
# make sure the center tap is 0.5
|
||||
assert_almost_equal(h[midx], 0.5)
|
||||
|
||||
# For halfband symmetric, odd coefficients (except the center)
|
||||
# should be zero (really small)
|
||||
hodd = np.hstack((h[1:midx:2], h[-midx+1::2]))
|
||||
assert_array_almost_equal(hodd, 0)
|
||||
|
||||
# now check the frequency response
|
||||
w, H = freqz(h, 1)
|
||||
f = w/2/np.pi
|
||||
Hmag = np.abs(H)
|
||||
|
||||
# check that the pass band is close to unity
|
||||
idx = np.logical_and(f > 0, f < a)
|
||||
assert_array_almost_equal(Hmag[idx], 1, decimal=3)
|
||||
|
||||
# check that the stop band is close to zero
|
||||
idx = np.logical_and(f > 0.5-a, f < 0.5)
|
||||
assert_array_almost_equal(Hmag[idx], 0, decimal=3)
|
||||
|
||||
def test_compare(self):
|
||||
# compare to OCTAVE output
|
||||
taps = firls(9, [0, 0.5, 0.55, 1], [1, 1, 0, 0], [1, 2])
|
||||
# >> taps = firls(8, [0 0.5 0.55 1], [1 1 0 0], [1, 2]);
|
||||
known_taps = [-6.26930101730182e-04, -1.03354450635036e-01,
|
||||
-9.81576747564301e-03, 3.17271686090449e-01,
|
||||
5.11409425599933e-01, 3.17271686090449e-01,
|
||||
-9.81576747564301e-03, -1.03354450635036e-01,
|
||||
-6.26930101730182e-04]
|
||||
assert_allclose(taps, known_taps)
|
||||
|
||||
# compare to MATLAB output
|
||||
taps = firls(11, [0, 0.5, 0.5, 1], [1, 1, 0, 0], [1, 2])
|
||||
# >> taps = firls(10, [0 0.5 0.5 1], [1 1 0 0], [1, 2]);
|
||||
known_taps = [
|
||||
0.058545300496815, -0.014233383714318, -0.104688258464392,
|
||||
0.012403323025279, 0.317930861136062, 0.488047220029700,
|
||||
0.317930861136062, 0.012403323025279, -0.104688258464392,
|
||||
-0.014233383714318, 0.058545300496815]
|
||||
assert_allclose(taps, known_taps)
|
||||
|
||||
# With linear changes:
|
||||
taps = firls(7, (0, 1, 2, 3, 4, 5), [1, 0, 0, 1, 1, 0], fs=20)
|
||||
# >> taps = firls(6, [0, 0.1, 0.2, 0.3, 0.4, 0.5], [1, 0, 0, 1, 1, 0])
|
||||
known_taps = [
|
||||
1.156090832768218, -4.1385894727395849, 7.5288619164321826,
|
||||
-8.5530572592947856, 7.5288619164321826, -4.1385894727395849,
|
||||
1.156090832768218]
|
||||
assert_allclose(taps, known_taps)
|
||||
|
||||
taps = firls(7, (0, 1, 2, 3, 4, 5), [1, 0, 0, 1, 1, 0], nyq=10)
|
||||
assert_allclose(taps, known_taps)
|
||||
|
||||
with pytest.raises(ValueError, match='between 0 and 1'):
|
||||
firls(7, [0, 1], [0, 1], nyq=0.5)
|
||||
|
||||
def test_rank_deficient(self):
|
||||
# solve() runs but warns (only sometimes, so here we don't use match)
|
||||
x = firls(21, [0, 0.1, 0.9, 1], [1, 1, 0, 0])
|
||||
w, h = freqz(x, fs=2.)
|
||||
assert_allclose(np.abs(h[:2]), 1., atol=1e-5)
|
||||
assert_allclose(np.abs(h[-2:]), 0., atol=1e-6)
|
||||
# switch to pinvh (tolerances could be higher with longer
|
||||
# filters, but using shorter ones is faster computationally and
|
||||
# the idea is the same)
|
||||
x = firls(101, [0, 0.01, 0.99, 1], [1, 1, 0, 0])
|
||||
w, h = freqz(x, fs=2.)
|
||||
mask = w < 0.01
|
||||
assert mask.sum() > 3
|
||||
assert_allclose(np.abs(h[mask]), 1., atol=1e-4)
|
||||
mask = w > 0.99
|
||||
assert mask.sum() > 3
|
||||
assert_allclose(np.abs(h[mask]), 0., atol=1e-4)
|
||||
|
||||
|
||||
class TestMinimumPhase(object):
|
||||
|
||||
def test_bad_args(self):
|
||||
# not enough taps
|
||||
assert_raises(ValueError, minimum_phase, [1.])
|
||||
assert_raises(ValueError, minimum_phase, [1., 1.])
|
||||
assert_raises(ValueError, minimum_phase, np.full(10, 1j))
|
||||
assert_raises(ValueError, minimum_phase, 'foo')
|
||||
assert_raises(ValueError, minimum_phase, np.ones(10), n_fft=8)
|
||||
assert_raises(ValueError, minimum_phase, np.ones(10), method='foo')
|
||||
assert_warns(RuntimeWarning, minimum_phase, np.arange(3))
|
||||
|
||||
def test_homomorphic(self):
|
||||
# check that it can recover frequency responses of arbitrary
|
||||
# linear-phase filters
|
||||
|
||||
# for some cases we can get the actual filter back
|
||||
h = [1, -1]
|
||||
h_new = minimum_phase(np.convolve(h, h[::-1]))
|
||||
assert_allclose(h_new, h, rtol=0.05)
|
||||
|
||||
# but in general we only guarantee we get the magnitude back
|
||||
rng = np.random.RandomState(0)
|
||||
for n in (2, 3, 10, 11, 15, 16, 17, 20, 21, 100, 101):
|
||||
h = rng.randn(n)
|
||||
h_new = minimum_phase(np.convolve(h, h[::-1]))
|
||||
assert_allclose(np.abs(fft(h_new)),
|
||||
np.abs(fft(h)), rtol=1e-4)
|
||||
|
||||
def test_hilbert(self):
|
||||
# compare to MATLAB output of reference implementation
|
||||
|
||||
# f=[0 0.3 0.5 1];
|
||||
# a=[1 1 0 0];
|
||||
# h=remez(11,f,a);
|
||||
h = remez(12, [0, 0.3, 0.5, 1], [1, 0], fs=2.)
|
||||
k = [0.349585548646686, 0.373552164395447, 0.326082685363438,
|
||||
0.077152207480935, -0.129943946349364, -0.059355880509749]
|
||||
m = minimum_phase(h, 'hilbert')
|
||||
assert_allclose(m, k, rtol=5e-3)
|
||||
|
||||
# f=[0 0.8 0.9 1];
|
||||
# a=[0 0 1 1];
|
||||
# h=remez(20,f,a);
|
||||
h = remez(21, [0, 0.8, 0.9, 1], [0, 1], fs=2.)
|
||||
k = [0.232486803906329, -0.133551833687071, 0.151871456867244,
|
||||
-0.157957283165866, 0.151739294892963, -0.129293146705090,
|
||||
0.100787844523204, -0.065832656741252, 0.035361328741024,
|
||||
-0.014977068692269, -0.158416139047557]
|
||||
m = minimum_phase(h, 'hilbert', n_fft=2**19)
|
||||
assert_allclose(m, k, rtol=2e-3)
|
1269
venv/Lib/site-packages/scipy/signal/tests/test_ltisys.py
Normal file
1269
venv/Lib/site-packages/scipy/signal/tests/test_ltisys.py
Normal file
File diff suppressed because it is too large
Load diff
|
@ -0,0 +1,65 @@
|
|||
import numpy as np
|
||||
from numpy.testing import assert_allclose, assert_array_equal
|
||||
from pytest import raises as assert_raises
|
||||
|
||||
from numpy.fft import fft, ifft
|
||||
|
||||
from scipy.signal import max_len_seq
|
||||
|
||||
|
||||
class TestMLS(object):
|
||||
|
||||
def test_mls_inputs(self):
|
||||
# can't all be zero state
|
||||
assert_raises(ValueError, max_len_seq,
|
||||
10, state=np.zeros(10))
|
||||
# wrong size state
|
||||
assert_raises(ValueError, max_len_seq, 10,
|
||||
state=np.ones(3))
|
||||
# wrong length
|
||||
assert_raises(ValueError, max_len_seq, 10, length=-1)
|
||||
assert_array_equal(max_len_seq(10, length=0)[0], [])
|
||||
# unknown taps
|
||||
assert_raises(ValueError, max_len_seq, 64)
|
||||
# bad taps
|
||||
assert_raises(ValueError, max_len_seq, 10, taps=[-1, 1])
|
||||
|
||||
def test_mls_output(self):
|
||||
# define some alternate working taps
|
||||
alt_taps = {2: [1], 3: [2], 4: [3], 5: [4, 3, 2], 6: [5, 4, 1], 7: [4],
|
||||
8: [7, 5, 3]}
|
||||
# assume the other bit levels work, too slow to test higher orders...
|
||||
for nbits in range(2, 8):
|
||||
for state in [None, np.round(np.random.rand(nbits))]:
|
||||
for taps in [None, alt_taps[nbits]]:
|
||||
if state is not None and np.all(state == 0):
|
||||
state[0] = 1 # they can't all be zero
|
||||
orig_m = max_len_seq(nbits, state=state,
|
||||
taps=taps)[0]
|
||||
m = 2. * orig_m - 1. # convert to +/- 1 representation
|
||||
# First, make sure we got all 1's or -1
|
||||
err_msg = "mls had non binary terms"
|
||||
assert_array_equal(np.abs(m), np.ones_like(m),
|
||||
err_msg=err_msg)
|
||||
# Test via circular cross-correlation, which is just mult.
|
||||
# in the frequency domain with one signal conjugated
|
||||
tester = np.real(ifft(fft(m) * np.conj(fft(m))))
|
||||
out_len = 2**nbits - 1
|
||||
# impulse amplitude == test_len
|
||||
err_msg = "mls impulse has incorrect value"
|
||||
assert_allclose(tester[0], out_len, err_msg=err_msg)
|
||||
# steady-state is -1
|
||||
err_msg = "mls steady-state has incorrect value"
|
||||
assert_allclose(tester[1:], np.full(out_len - 1, -1),
|
||||
err_msg=err_msg)
|
||||
# let's do the split thing using a couple options
|
||||
for n in (1, 2**(nbits - 1)):
|
||||
m1, s1 = max_len_seq(nbits, state=state, taps=taps,
|
||||
length=n)
|
||||
m2, s2 = max_len_seq(nbits, state=s1, taps=taps,
|
||||
length=1)
|
||||
m3, s3 = max_len_seq(nbits, state=s2, taps=taps,
|
||||
length=out_len - n - 1)
|
||||
new_m = np.concatenate((m1, m2, m3))
|
||||
assert_array_equal(orig_m, new_m)
|
||||
|
847
venv/Lib/site-packages/scipy/signal/tests/test_peak_finding.py
Normal file
847
venv/Lib/site-packages/scipy/signal/tests/test_peak_finding.py
Normal file
|
@ -0,0 +1,847 @@
|
|||
import copy
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import (
|
||||
assert_,
|
||||
assert_equal,
|
||||
assert_allclose,
|
||||
assert_array_equal
|
||||
)
|
||||
import pytest
|
||||
from pytest import raises, warns
|
||||
|
||||
from scipy.signal._peak_finding import (
|
||||
argrelmax,
|
||||
argrelmin,
|
||||
peak_prominences,
|
||||
peak_widths,
|
||||
_unpack_condition_args,
|
||||
find_peaks,
|
||||
find_peaks_cwt,
|
||||
_identify_ridge_lines
|
||||
)
|
||||
from scipy.signal._peak_finding_utils import _local_maxima_1d, PeakPropertyWarning
|
||||
|
||||
|
||||
def _gen_gaussians(center_locs, sigmas, total_length):
|
||||
xdata = np.arange(0, total_length).astype(float)
|
||||
out_data = np.zeros(total_length, dtype=float)
|
||||
for ind, sigma in enumerate(sigmas):
|
||||
tmp = (xdata - center_locs[ind]) / sigma
|
||||
out_data += np.exp(-(tmp**2))
|
||||
return out_data
|
||||
|
||||
|
||||
def _gen_gaussians_even(sigmas, total_length):
|
||||
num_peaks = len(sigmas)
|
||||
delta = total_length / (num_peaks + 1)
|
||||
center_locs = np.linspace(delta, total_length - delta, num=num_peaks).astype(int)
|
||||
out_data = _gen_gaussians(center_locs, sigmas, total_length)
|
||||
return out_data, center_locs
|
||||
|
||||
|
||||
def _gen_ridge_line(start_locs, max_locs, length, distances, gaps):
|
||||
"""
|
||||
Generate coordinates for a ridge line.
|
||||
|
||||
Will be a series of coordinates, starting a start_loc (length 2).
|
||||
The maximum distance between any adjacent columns will be
|
||||
`max_distance`, the max distance between adjacent rows
|
||||
will be `map_gap'.
|
||||
|
||||
`max_locs` should be the size of the intended matrix. The
|
||||
ending coordinates are guaranteed to be less than `max_locs`,
|
||||
although they may not approach `max_locs` at all.
|
||||
"""
|
||||
|
||||
def keep_bounds(num, max_val):
|
||||
out = max(num, 0)
|
||||
out = min(out, max_val)
|
||||
return out
|
||||
|
||||
gaps = copy.deepcopy(gaps)
|
||||
distances = copy.deepcopy(distances)
|
||||
|
||||
locs = np.zeros([length, 2], dtype=int)
|
||||
locs[0, :] = start_locs
|
||||
total_length = max_locs[0] - start_locs[0] - sum(gaps)
|
||||
if total_length < length:
|
||||
raise ValueError('Cannot generate ridge line according to constraints')
|
||||
dist_int = length / len(distances) - 1
|
||||
gap_int = length / len(gaps) - 1
|
||||
for ind in range(1, length):
|
||||
nextcol = locs[ind - 1, 1]
|
||||
nextrow = locs[ind - 1, 0] + 1
|
||||
if (ind % dist_int == 0) and (len(distances) > 0):
|
||||
nextcol += ((-1)**ind)*distances.pop()
|
||||
if (ind % gap_int == 0) and (len(gaps) > 0):
|
||||
nextrow += gaps.pop()
|
||||
nextrow = keep_bounds(nextrow, max_locs[0])
|
||||
nextcol = keep_bounds(nextcol, max_locs[1])
|
||||
locs[ind, :] = [nextrow, nextcol]
|
||||
|
||||
return [locs[:, 0], locs[:, 1]]
|
||||
|
||||
|
||||
class TestLocalMaxima1d(object):
|
||||
|
||||
def test_empty(self):
|
||||
"""Test with empty signal."""
|
||||
x = np.array([], dtype=np.float64)
|
||||
for array in _local_maxima_1d(x):
|
||||
assert_equal(array, np.array([]))
|
||||
assert_(array.base is None)
|
||||
|
||||
def test_linear(self):
|
||||
"""Test with linear signal."""
|
||||
x = np.linspace(0, 100)
|
||||
for array in _local_maxima_1d(x):
|
||||
assert_equal(array, np.array([]))
|
||||
assert_(array.base is None)
|
||||
|
||||
def test_simple(self):
|
||||
"""Test with simple signal."""
|
||||
x = np.linspace(-10, 10, 50)
|
||||
x[2::3] += 1
|
||||
expected = np.arange(2, 50, 3)
|
||||
for array in _local_maxima_1d(x):
|
||||
# For plateaus of size 1, the edges are identical with the
|
||||
# midpoints
|
||||
assert_equal(array, expected)
|
||||
assert_(array.base is None)
|
||||
|
||||
def test_flat_maxima(self):
|
||||
"""Test if flat maxima are detected correctly."""
|
||||
x = np.array([-1.3, 0, 1, 0, 2, 2, 0, 3, 3, 3, 2.99, 4, 4, 4, 4, -10,
|
||||
-5, -5, -5, -5, -5, -10])
|
||||
midpoints, left_edges, right_edges = _local_maxima_1d(x)
|
||||
assert_equal(midpoints, np.array([2, 4, 8, 12, 18]))
|
||||
assert_equal(left_edges, np.array([2, 4, 7, 11, 16]))
|
||||
assert_equal(right_edges, np.array([2, 5, 9, 14, 20]))
|
||||
|
||||
@pytest.mark.parametrize('x', [
|
||||
np.array([1., 0, 2]),
|
||||
np.array([3., 3, 0, 4, 4]),
|
||||
np.array([5., 5, 5, 0, 6, 6, 6]),
|
||||
])
|
||||
def test_signal_edges(self, x):
|
||||
"""Test if behavior on signal edges is correct."""
|
||||
for array in _local_maxima_1d(x):
|
||||
assert_equal(array, np.array([]))
|
||||
assert_(array.base is None)
|
||||
|
||||
def test_exceptions(self):
|
||||
"""Test input validation and raised exceptions."""
|
||||
with raises(ValueError, match="wrong number of dimensions"):
|
||||
_local_maxima_1d(np.ones((1, 1)))
|
||||
with raises(ValueError, match="expected 'float64_t'"):
|
||||
_local_maxima_1d(np.ones(1, dtype=int))
|
||||
with raises(TypeError, match="list"):
|
||||
_local_maxima_1d([1., 2.])
|
||||
with raises(TypeError, match="'x' must not be None"):
|
||||
_local_maxima_1d(None)
|
||||
|
||||
|
||||
class TestRidgeLines(object):
|
||||
|
||||
def test_empty(self):
|
||||
test_matr = np.zeros([20, 100])
|
||||
lines = _identify_ridge_lines(test_matr, np.full(20, 2), 1)
|
||||
assert_(len(lines) == 0)
|
||||
|
||||
def test_minimal(self):
|
||||
test_matr = np.zeros([20, 100])
|
||||
test_matr[0, 10] = 1
|
||||
lines = _identify_ridge_lines(test_matr, np.full(20, 2), 1)
|
||||
assert_(len(lines) == 1)
|
||||
|
||||
test_matr = np.zeros([20, 100])
|
||||
test_matr[0:2, 10] = 1
|
||||
lines = _identify_ridge_lines(test_matr, np.full(20, 2), 1)
|
||||
assert_(len(lines) == 1)
|
||||
|
||||
def test_single_pass(self):
|
||||
distances = [0, 1, 2, 5]
|
||||
gaps = [0, 1, 2, 0, 1]
|
||||
test_matr = np.zeros([20, 50]) + 1e-12
|
||||
length = 12
|
||||
line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps)
|
||||
test_matr[line[0], line[1]] = 1
|
||||
max_distances = np.full(20, max(distances))
|
||||
identified_lines = _identify_ridge_lines(test_matr, max_distances, max(gaps) + 1)
|
||||
assert_array_equal(identified_lines, [line])
|
||||
|
||||
def test_single_bigdist(self):
|
||||
distances = [0, 1, 2, 5]
|
||||
gaps = [0, 1, 2, 4]
|
||||
test_matr = np.zeros([20, 50])
|
||||
length = 12
|
||||
line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps)
|
||||
test_matr[line[0], line[1]] = 1
|
||||
max_dist = 3
|
||||
max_distances = np.full(20, max_dist)
|
||||
#This should get 2 lines, since the distance is too large
|
||||
identified_lines = _identify_ridge_lines(test_matr, max_distances, max(gaps) + 1)
|
||||
assert_(len(identified_lines) == 2)
|
||||
|
||||
for iline in identified_lines:
|
||||
adists = np.diff(iline[1])
|
||||
np.testing.assert_array_less(np.abs(adists), max_dist)
|
||||
|
||||
agaps = np.diff(iline[0])
|
||||
np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1)
|
||||
|
||||
def test_single_biggap(self):
|
||||
distances = [0, 1, 2, 5]
|
||||
max_gap = 3
|
||||
gaps = [0, 4, 2, 1]
|
||||
test_matr = np.zeros([20, 50])
|
||||
length = 12
|
||||
line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps)
|
||||
test_matr[line[0], line[1]] = 1
|
||||
max_dist = 6
|
||||
max_distances = np.full(20, max_dist)
|
||||
#This should get 2 lines, since the gap is too large
|
||||
identified_lines = _identify_ridge_lines(test_matr, max_distances, max_gap)
|
||||
assert_(len(identified_lines) == 2)
|
||||
|
||||
for iline in identified_lines:
|
||||
adists = np.diff(iline[1])
|
||||
np.testing.assert_array_less(np.abs(adists), max_dist)
|
||||
|
||||
agaps = np.diff(iline[0])
|
||||
np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1)
|
||||
|
||||
def test_single_biggaps(self):
|
||||
distances = [0]
|
||||
max_gap = 1
|
||||
gaps = [3, 6]
|
||||
test_matr = np.zeros([50, 50])
|
||||
length = 30
|
||||
line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps)
|
||||
test_matr[line[0], line[1]] = 1
|
||||
max_dist = 1
|
||||
max_distances = np.full(50, max_dist)
|
||||
#This should get 3 lines, since the gaps are too large
|
||||
identified_lines = _identify_ridge_lines(test_matr, max_distances, max_gap)
|
||||
assert_(len(identified_lines) == 3)
|
||||
|
||||
for iline in identified_lines:
|
||||
adists = np.diff(iline[1])
|
||||
np.testing.assert_array_less(np.abs(adists), max_dist)
|
||||
|
||||
agaps = np.diff(iline[0])
|
||||
np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1)
|
||||
|
||||
|
||||
class TestArgrel(object):
|
||||
|
||||
def test_empty(self):
|
||||
# Regression test for gh-2832.
|
||||
# When there are no relative extrema, make sure that
|
||||
# the number of empty arrays returned matches the
|
||||
# dimension of the input.
|
||||
|
||||
empty_array = np.array([], dtype=int)
|
||||
|
||||
z1 = np.zeros(5)
|
||||
|
||||
i = argrelmin(z1)
|
||||
assert_equal(len(i), 1)
|
||||
assert_array_equal(i[0], empty_array)
|
||||
|
||||
z2 = np.zeros((3,5))
|
||||
|
||||
row, col = argrelmin(z2, axis=0)
|
||||
assert_array_equal(row, empty_array)
|
||||
assert_array_equal(col, empty_array)
|
||||
|
||||
row, col = argrelmin(z2, axis=1)
|
||||
assert_array_equal(row, empty_array)
|
||||
assert_array_equal(col, empty_array)
|
||||
|
||||
def test_basic(self):
|
||||
# Note: the docstrings for the argrel{min,max,extrema} functions
|
||||
# do not give a guarantee of the order of the indices, so we'll
|
||||
# sort them before testing.
|
||||
|
||||
x = np.array([[1, 2, 2, 3, 2],
|
||||
[2, 1, 2, 2, 3],
|
||||
[3, 2, 1, 2, 2],
|
||||
[2, 3, 2, 1, 2],
|
||||
[1, 2, 3, 2, 1]])
|
||||
|
||||
row, col = argrelmax(x, axis=0)
|
||||
order = np.argsort(row)
|
||||
assert_equal(row[order], [1, 2, 3])
|
||||
assert_equal(col[order], [4, 0, 1])
|
||||
|
||||
row, col = argrelmax(x, axis=1)
|
||||
order = np.argsort(row)
|
||||
assert_equal(row[order], [0, 3, 4])
|
||||
assert_equal(col[order], [3, 1, 2])
|
||||
|
||||
row, col = argrelmin(x, axis=0)
|
||||
order = np.argsort(row)
|
||||
assert_equal(row[order], [1, 2, 3])
|
||||
assert_equal(col[order], [1, 2, 3])
|
||||
|
||||
row, col = argrelmin(x, axis=1)
|
||||
order = np.argsort(row)
|
||||
assert_equal(row[order], [1, 2, 3])
|
||||
assert_equal(col[order], [1, 2, 3])
|
||||
|
||||
def test_highorder(self):
|
||||
order = 2
|
||||
sigmas = [1.0, 2.0, 10.0, 5.0, 15.0]
|
||||
test_data, act_locs = _gen_gaussians_even(sigmas, 500)
|
||||
test_data[act_locs + order] = test_data[act_locs]*0.99999
|
||||
test_data[act_locs - order] = test_data[act_locs]*0.99999
|
||||
rel_max_locs = argrelmax(test_data, order=order, mode='clip')[0]
|
||||
|
||||
assert_(len(rel_max_locs) == len(act_locs))
|
||||
assert_((rel_max_locs == act_locs).all())
|
||||
|
||||
def test_2d_gaussians(self):
|
||||
sigmas = [1.0, 2.0, 10.0]
|
||||
test_data, act_locs = _gen_gaussians_even(sigmas, 100)
|
||||
rot_factor = 20
|
||||
rot_range = np.arange(0, len(test_data)) - rot_factor
|
||||
test_data_2 = np.vstack([test_data, test_data[rot_range]])
|
||||
rel_max_rows, rel_max_cols = argrelmax(test_data_2, axis=1, order=1)
|
||||
|
||||
for rw in range(0, test_data_2.shape[0]):
|
||||
inds = (rel_max_rows == rw)
|
||||
|
||||
assert_(len(rel_max_cols[inds]) == len(act_locs))
|
||||
assert_((act_locs == (rel_max_cols[inds] - rot_factor*rw)).all())
|
||||
|
||||
|
||||
class TestPeakProminences(object):
|
||||
|
||||
def test_empty(self):
|
||||
"""
|
||||
Test if an empty array is returned if no peaks are provided.
|
||||
"""
|
||||
out = peak_prominences([1, 2, 3], [])
|
||||
for arr, dtype in zip(out, [np.float64, np.intp, np.intp]):
|
||||
assert_(arr.size == 0)
|
||||
assert_(arr.dtype == dtype)
|
||||
|
||||
out = peak_prominences([], [])
|
||||
for arr, dtype in zip(out, [np.float64, np.intp, np.intp]):
|
||||
assert_(arr.size == 0)
|
||||
assert_(arr.dtype == dtype)
|
||||
|
||||
def test_basic(self):
|
||||
"""
|
||||
Test if height of prominences is correctly calculated in signal with
|
||||
rising baseline (peak widths are 1 sample).
|
||||
"""
|
||||
# Prepare basic signal
|
||||
x = np.array([-1, 1.2, 1.2, 1, 3.2, 1.3, 2.88, 2.1])
|
||||
peaks = np.array([1, 2, 4, 6])
|
||||
lbases = np.array([0, 0, 0, 5])
|
||||
rbases = np.array([3, 3, 5, 7])
|
||||
proms = x[peaks] - np.max([x[lbases], x[rbases]], axis=0)
|
||||
# Test if calculation matches handcrafted result
|
||||
out = peak_prominences(x, peaks)
|
||||
assert_equal(out[0], proms)
|
||||
assert_equal(out[1], lbases)
|
||||
assert_equal(out[2], rbases)
|
||||
|
||||
def test_edge_cases(self):
|
||||
"""
|
||||
Test edge cases.
|
||||
"""
|
||||
# Peaks have same height, prominence and bases
|
||||
x = [0, 2, 1, 2, 1, 2, 0]
|
||||
peaks = [1, 3, 5]
|
||||
proms, lbases, rbases = peak_prominences(x, peaks)
|
||||
assert_equal(proms, [2, 2, 2])
|
||||
assert_equal(lbases, [0, 0, 0])
|
||||
assert_equal(rbases, [6, 6, 6])
|
||||
|
||||
# Peaks have same height & prominence but different bases
|
||||
x = [0, 1, 0, 1, 0, 1, 0]
|
||||
peaks = np.array([1, 3, 5])
|
||||
proms, lbases, rbases = peak_prominences(x, peaks)
|
||||
assert_equal(proms, [1, 1, 1])
|
||||
assert_equal(lbases, peaks - 1)
|
||||
assert_equal(rbases, peaks + 1)
|
||||
|
||||
def test_non_contiguous(self):
|
||||
"""
|
||||
Test with non-C-contiguous input arrays.
|
||||
"""
|
||||
x = np.repeat([-9, 9, 9, 0, 3, 1], 2)
|
||||
peaks = np.repeat([1, 2, 4], 2)
|
||||
proms, lbases, rbases = peak_prominences(x[::2], peaks[::2])
|
||||
assert_equal(proms, [9, 9, 2])
|
||||
assert_equal(lbases, [0, 0, 3])
|
||||
assert_equal(rbases, [3, 3, 5])
|
||||
|
||||
def test_wlen(self):
|
||||
"""
|
||||
Test if wlen actually shrinks the evaluation range correctly.
|
||||
"""
|
||||
x = [0, 1, 2, 3, 1, 0, -1]
|
||||
peak = [3]
|
||||
# Test rounding behavior of wlen
|
||||
assert_equal(peak_prominences(x, peak), [3., 0, 6])
|
||||
for wlen, i in [(8, 0), (7, 0), (6, 0), (5, 1), (3.2, 1), (3, 2), (1.1, 2)]:
|
||||
assert_equal(peak_prominences(x, peak, wlen), [3. - i, 0 + i, 6 - i])
|
||||
|
||||
def test_exceptions(self):
|
||||
"""
|
||||
Verify that exceptions and warnings are raised.
|
||||
"""
|
||||
# x with dimension > 1
|
||||
with raises(ValueError, match='1-D array'):
|
||||
peak_prominences([[0, 1, 1, 0]], [1, 2])
|
||||
# peaks with dimension > 1
|
||||
with raises(ValueError, match='1-D array'):
|
||||
peak_prominences([0, 1, 1, 0], [[1, 2]])
|
||||
# x with dimension < 1
|
||||
with raises(ValueError, match='1-D array'):
|
||||
peak_prominences(3, [0,])
|
||||
|
||||
# empty x with supplied
|
||||
with raises(ValueError, match='not a valid index'):
|
||||
peak_prominences([], [0])
|
||||
# invalid indices with non-empty x
|
||||
for p in [-100, -1, 3, 1000]:
|
||||
with raises(ValueError, match='not a valid index'):
|
||||
peak_prominences([1, 0, 2], [p])
|
||||
|
||||
# peaks is not cast-able to np.intp
|
||||
with raises(TypeError, match='cannot safely cast'):
|
||||
peak_prominences([0, 1, 1, 0], [1.1, 2.3])
|
||||
|
||||
# wlen < 3
|
||||
with raises(ValueError, match='wlen'):
|
||||
peak_prominences(np.arange(10), [3, 5], wlen=1)
|
||||
|
||||
def test_warnings(self):
|
||||
"""
|
||||
Verify that appropriate warnings are raised.
|
||||
"""
|
||||
msg = "some peaks have a prominence of 0"
|
||||
for p in [0, 1, 2]:
|
||||
with warns(PeakPropertyWarning, match=msg):
|
||||
peak_prominences([1, 0, 2], [p,])
|
||||
with warns(PeakPropertyWarning, match=msg):
|
||||
peak_prominences([0, 1, 1, 1, 0], [2], wlen=2)
|
||||
|
||||
|
||||
class TestPeakWidths(object):
|
||||
|
||||
def test_empty(self):
|
||||
"""
|
||||
Test if an empty array is returned if no peaks are provided.
|
||||
"""
|
||||
widths = peak_widths([], [])[0]
|
||||
assert_(isinstance(widths, np.ndarray))
|
||||
assert_equal(widths.size, 0)
|
||||
widths = peak_widths([1, 2, 3], [])[0]
|
||||
assert_(isinstance(widths, np.ndarray))
|
||||
assert_equal(widths.size, 0)
|
||||
out = peak_widths([], [])
|
||||
for arr in out:
|
||||
assert_(isinstance(arr, np.ndarray))
|
||||
assert_equal(arr.size, 0)
|
||||
|
||||
@pytest.mark.filterwarnings("ignore:some peaks have a width of 0")
|
||||
def test_basic(self):
|
||||
"""
|
||||
Test a simple use case with easy to verify results at different relative
|
||||
heights.
|
||||
"""
|
||||
x = np.array([1, 0, 1, 2, 1, 0, -1])
|
||||
prominence = 2
|
||||
for rel_height, width_true, lip_true, rip_true in [
|
||||
(0., 0., 3., 3.), # raises warning
|
||||
(0.25, 1., 2.5, 3.5),
|
||||
(0.5, 2., 2., 4.),
|
||||
(0.75, 3., 1.5, 4.5),
|
||||
(1., 4., 1., 5.),
|
||||
(2., 5., 1., 6.),
|
||||
(3., 5., 1., 6.)
|
||||
]:
|
||||
width_calc, height, lip_calc, rip_calc = peak_widths(
|
||||
x, [3], rel_height)
|
||||
assert_allclose(width_calc, width_true)
|
||||
assert_allclose(height, 2 - rel_height * prominence)
|
||||
assert_allclose(lip_calc, lip_true)
|
||||
assert_allclose(rip_calc, rip_true)
|
||||
|
||||
def test_non_contiguous(self):
|
||||
"""
|
||||
Test with non-C-contiguous input arrays.
|
||||
"""
|
||||
x = np.repeat([0, 100, 50], 4)
|
||||
peaks = np.repeat([1], 3)
|
||||
result = peak_widths(x[::4], peaks[::3])
|
||||
assert_equal(result, [0.75, 75, 0.75, 1.5])
|
||||
|
||||
def test_exceptions(self):
|
||||
"""
|
||||
Verify that argument validation works as intended.
|
||||
"""
|
||||
with raises(ValueError, match='1-D array'):
|
||||
# x with dimension > 1
|
||||
peak_widths(np.zeros((3, 4)), np.ones(3))
|
||||
with raises(ValueError, match='1-D array'):
|
||||
# x with dimension < 1
|
||||
peak_widths(3, [0])
|
||||
with raises(ValueError, match='1-D array'):
|
||||
# peaks with dimension > 1
|
||||
peak_widths(np.arange(10), np.ones((3, 2), dtype=np.intp))
|
||||
with raises(ValueError, match='1-D array'):
|
||||
# peaks with dimension < 1
|
||||
peak_widths(np.arange(10), 3)
|
||||
with raises(ValueError, match='not a valid index'):
|
||||
# peak pos exceeds x.size
|
||||
peak_widths(np.arange(10), [8, 11])
|
||||
with raises(ValueError, match='not a valid index'):
|
||||
# empty x with peaks supplied
|
||||
peak_widths([], [1, 2])
|
||||
with raises(TypeError, match='cannot safely cast'):
|
||||
# peak cannot be safely casted to intp
|
||||
peak_widths(np.arange(10), [1.1, 2.3])
|
||||
with raises(ValueError, match='rel_height'):
|
||||
# rel_height is < 0
|
||||
peak_widths([0, 1, 0, 1, 0], [1, 3], rel_height=-1)
|
||||
with raises(TypeError, match='None'):
|
||||
# prominence data contains None
|
||||
peak_widths([1, 2, 1], [1], prominence_data=(None, None, None))
|
||||
|
||||
def test_warnings(self):
|
||||
"""
|
||||
Verify that appropriate warnings are raised.
|
||||
"""
|
||||
msg = "some peaks have a width of 0"
|
||||
with warns(PeakPropertyWarning, match=msg):
|
||||
# Case: rel_height is 0
|
||||
peak_widths([0, 1, 0], [1], rel_height=0)
|
||||
with warns(PeakPropertyWarning, match=msg):
|
||||
# Case: prominence is 0 and bases are identical
|
||||
peak_widths(
|
||||
[0, 1, 1, 1, 0], [2],
|
||||
prominence_data=(np.array([0.], np.float64),
|
||||
np.array([2], np.intp),
|
||||
np.array([2], np.intp))
|
||||
)
|
||||
|
||||
def test_mismatching_prominence_data(self):
|
||||
"""Test with mismatching peak and / or prominence data."""
|
||||
x = [0, 1, 0]
|
||||
peak = [1]
|
||||
for i, (prominences, left_bases, right_bases) in enumerate([
|
||||
((1.,), (-1,), (2,)), # left base not in x
|
||||
((1.,), (0,), (3,)), # right base not in x
|
||||
((1.,), (2,), (0,)), # swapped bases same as peak
|
||||
((1., 1.), (0, 0), (2, 2)), # array shapes don't match peaks
|
||||
((1., 1.), (0,), (2,)), # arrays with different shapes
|
||||
((1.,), (0, 0), (2,)), # arrays with different shapes
|
||||
((1.,), (0,), (2, 2)) # arrays with different shapes
|
||||
]):
|
||||
# Make sure input is matches output of signal.peak_prominences
|
||||
prominence_data = (np.array(prominences, dtype=np.float64),
|
||||
np.array(left_bases, dtype=np.intp),
|
||||
np.array(right_bases, dtype=np.intp))
|
||||
# Test for correct exception
|
||||
if i < 3:
|
||||
match = "prominence data is invalid for peak"
|
||||
else:
|
||||
match = "arrays in `prominence_data` must have the same shape"
|
||||
with raises(ValueError, match=match):
|
||||
peak_widths(x, peak, prominence_data=prominence_data)
|
||||
|
||||
@pytest.mark.filterwarnings("ignore:some peaks have a width of 0")
|
||||
def test_intersection_rules(self):
|
||||
"""Test if x == eval_height counts as an intersection."""
|
||||
# Flatt peak with two possible intersection points if evaluated at 1
|
||||
x = [0, 1, 2, 1, 3, 3, 3, 1, 2, 1, 0]
|
||||
# relative height is 0 -> width is 0 as well, raises warning
|
||||
assert_allclose(peak_widths(x, peaks=[5], rel_height=0),
|
||||
[(0.,), (3.,), (5.,), (5.,)])
|
||||
# width_height == x counts as intersection -> nearest 1 is chosen
|
||||
assert_allclose(peak_widths(x, peaks=[5], rel_height=2/3),
|
||||
[(4.,), (1.,), (3.,), (7.,)])
|
||||
|
||||
|
||||
def test_unpack_condition_args():
|
||||
"""
|
||||
Verify parsing of condition arguments for `scipy.signal.find_peaks` function.
|
||||
"""
|
||||
x = np.arange(10)
|
||||
amin_true = x
|
||||
amax_true = amin_true + 10
|
||||
peaks = amin_true[1::2]
|
||||
|
||||
# Test unpacking with None or interval
|
||||
assert_((None, None) == _unpack_condition_args((None, None), x, peaks))
|
||||
assert_((1, None) == _unpack_condition_args(1, x, peaks))
|
||||
assert_((1, None) == _unpack_condition_args((1, None), x, peaks))
|
||||
assert_((None, 2) == _unpack_condition_args((None, 2), x, peaks))
|
||||
assert_((3., 4.5) == _unpack_condition_args((3., 4.5), x, peaks))
|
||||
|
||||
# Test if borders are correctly reduced with `peaks`
|
||||
amin_calc, amax_calc = _unpack_condition_args((amin_true, amax_true), x, peaks)
|
||||
assert_equal(amin_calc, amin_true[peaks])
|
||||
assert_equal(amax_calc, amax_true[peaks])
|
||||
|
||||
# Test raises if array borders don't match x
|
||||
with raises(ValueError, match="array size of lower"):
|
||||
_unpack_condition_args(amin_true, np.arange(11), peaks)
|
||||
with raises(ValueError, match="array size of upper"):
|
||||
_unpack_condition_args((None, amin_true), np.arange(11), peaks)
|
||||
|
||||
|
||||
class TestFindPeaks(object):
|
||||
|
||||
# Keys of optionally returned properties
|
||||
property_keys = {'peak_heights', 'left_thresholds', 'right_thresholds',
|
||||
'prominences', 'left_bases', 'right_bases', 'widths',
|
||||
'width_heights', 'left_ips', 'right_ips'}
|
||||
|
||||
def test_constant(self):
|
||||
"""
|
||||
Test behavior for signal without local maxima.
|
||||
"""
|
||||
open_interval = (None, None)
|
||||
peaks, props = find_peaks(np.ones(10),
|
||||
height=open_interval, threshold=open_interval,
|
||||
prominence=open_interval, width=open_interval)
|
||||
assert_(peaks.size == 0)
|
||||
for key in self.property_keys:
|
||||
assert_(props[key].size == 0)
|
||||
|
||||
def test_plateau_size(self):
|
||||
"""
|
||||
Test plateau size condition for peaks.
|
||||
"""
|
||||
# Prepare signal with peaks with peak_height == plateau_size
|
||||
plateau_sizes = np.array([1, 2, 3, 4, 8, 20, 111])
|
||||
x = np.zeros(plateau_sizes.size * 2 + 1)
|
||||
x[1::2] = plateau_sizes
|
||||
repeats = np.ones(x.size, dtype=int)
|
||||
repeats[1::2] = x[1::2]
|
||||
x = np.repeat(x, repeats)
|
||||
|
||||
# Test full output
|
||||
peaks, props = find_peaks(x, plateau_size=(None, None))
|
||||
assert_equal(peaks, [1, 3, 7, 11, 18, 33, 100])
|
||||
assert_equal(props["plateau_sizes"], plateau_sizes)
|
||||
assert_equal(props["left_edges"], peaks - (plateau_sizes - 1) // 2)
|
||||
assert_equal(props["right_edges"], peaks + plateau_sizes // 2)
|
||||
|
||||
# Test conditions
|
||||
assert_equal(find_peaks(x, plateau_size=4)[0], [11, 18, 33, 100])
|
||||
assert_equal(find_peaks(x, plateau_size=(None, 3.5))[0], [1, 3, 7])
|
||||
assert_equal(find_peaks(x, plateau_size=(5, 50))[0], [18, 33])
|
||||
|
||||
def test_height_condition(self):
|
||||
"""
|
||||
Test height condition for peaks.
|
||||
"""
|
||||
x = (0., 1/3, 0., 2.5, 0, 4., 0)
|
||||
peaks, props = find_peaks(x, height=(None, None))
|
||||
assert_equal(peaks, np.array([1, 3, 5]))
|
||||
assert_equal(props['peak_heights'], np.array([1/3, 2.5, 4.]))
|
||||
assert_equal(find_peaks(x, height=0.5)[0], np.array([3, 5]))
|
||||
assert_equal(find_peaks(x, height=(None, 3))[0], np.array([1, 3]))
|
||||
assert_equal(find_peaks(x, height=(2, 3))[0], np.array([3]))
|
||||
|
||||
def test_threshold_condition(self):
|
||||
"""
|
||||
Test threshold condition for peaks.
|
||||
"""
|
||||
x = (0, 2, 1, 4, -1)
|
||||
peaks, props = find_peaks(x, threshold=(None, None))
|
||||
assert_equal(peaks, np.array([1, 3]))
|
||||
assert_equal(props['left_thresholds'], np.array([2, 3]))
|
||||
assert_equal(props['right_thresholds'], np.array([1, 5]))
|
||||
assert_equal(find_peaks(x, threshold=2)[0], np.array([3]))
|
||||
assert_equal(find_peaks(x, threshold=3.5)[0], np.array([]))
|
||||
assert_equal(find_peaks(x, threshold=(None, 5))[0], np.array([1, 3]))
|
||||
assert_equal(find_peaks(x, threshold=(None, 4))[0], np.array([1]))
|
||||
assert_equal(find_peaks(x, threshold=(2, 4))[0], np.array([]))
|
||||
|
||||
def test_distance_condition(self):
|
||||
"""
|
||||
Test distance condition for peaks.
|
||||
"""
|
||||
# Peaks of different height with constant distance 3
|
||||
peaks_all = np.arange(1, 21, 3)
|
||||
x = np.zeros(21)
|
||||
x[peaks_all] += np.linspace(1, 2, peaks_all.size)
|
||||
|
||||
# Test if peaks with "minimal" distance are still selected (distance = 3)
|
||||
assert_equal(find_peaks(x, distance=3)[0], peaks_all)
|
||||
|
||||
# Select every second peak (distance > 3)
|
||||
peaks_subset = find_peaks(x, distance=3.0001)[0]
|
||||
# Test if peaks_subset is subset of peaks_all
|
||||
assert_(
|
||||
np.setdiff1d(peaks_subset, peaks_all, assume_unique=True).size == 0
|
||||
)
|
||||
# Test if every second peak was removed
|
||||
assert_equal(np.diff(peaks_subset), 6)
|
||||
|
||||
# Test priority of peak removal
|
||||
x = [-2, 1, -1, 0, -3]
|
||||
peaks_subset = find_peaks(x, distance=10)[0] # use distance > x size
|
||||
assert_(peaks_subset.size == 1 and peaks_subset[0] == 1)
|
||||
|
||||
def test_prominence_condition(self):
|
||||
"""
|
||||
Test prominence condition for peaks.
|
||||
"""
|
||||
x = np.linspace(0, 10, 100)
|
||||
peaks_true = np.arange(1, 99, 2)
|
||||
offset = np.linspace(1, 10, peaks_true.size)
|
||||
x[peaks_true] += offset
|
||||
prominences = x[peaks_true] - x[peaks_true + 1]
|
||||
interval = (3, 9)
|
||||
keep = np.nonzero(
|
||||
(interval[0] <= prominences) & (prominences <= interval[1]))
|
||||
|
||||
peaks_calc, properties = find_peaks(x, prominence=interval)
|
||||
assert_equal(peaks_calc, peaks_true[keep])
|
||||
assert_equal(properties['prominences'], prominences[keep])
|
||||
assert_equal(properties['left_bases'], 0)
|
||||
assert_equal(properties['right_bases'], peaks_true[keep] + 1)
|
||||
|
||||
def test_width_condition(self):
|
||||
"""
|
||||
Test width condition for peaks.
|
||||
"""
|
||||
x = np.array([1, 0, 1, 2, 1, 0, -1, 4, 0])
|
||||
peaks, props = find_peaks(x, width=(None, 2), rel_height=0.75)
|
||||
assert_equal(peaks.size, 1)
|
||||
assert_equal(peaks, 7)
|
||||
assert_allclose(props['widths'], 1.35)
|
||||
assert_allclose(props['width_heights'], 1.)
|
||||
assert_allclose(props['left_ips'], 6.4)
|
||||
assert_allclose(props['right_ips'], 7.75)
|
||||
|
||||
def test_properties(self):
|
||||
"""
|
||||
Test returned properties.
|
||||
"""
|
||||
open_interval = (None, None)
|
||||
x = [0, 1, 0, 2, 1.5, 0, 3, 0, 5, 9]
|
||||
peaks, props = find_peaks(x,
|
||||
height=open_interval, threshold=open_interval,
|
||||
prominence=open_interval, width=open_interval)
|
||||
assert_(len(props) == len(self.property_keys))
|
||||
for key in self.property_keys:
|
||||
assert_(peaks.size == props[key].size)
|
||||
|
||||
def test_raises(self):
|
||||
"""
|
||||
Test exceptions raised by function.
|
||||
"""
|
||||
with raises(ValueError, match="1-D array"):
|
||||
find_peaks(np.array(1))
|
||||
with raises(ValueError, match="1-D array"):
|
||||
find_peaks(np.ones((2, 2)))
|
||||
with raises(ValueError, match="distance"):
|
||||
find_peaks(np.arange(10), distance=-1)
|
||||
|
||||
@pytest.mark.filterwarnings("ignore:some peaks have a prominence of 0",
|
||||
"ignore:some peaks have a width of 0")
|
||||
def test_wlen_smaller_plateau(self):
|
||||
"""
|
||||
Test behavior of prominence and width calculation if the given window
|
||||
length is smaller than a peak's plateau size.
|
||||
|
||||
Regression test for gh-9110.
|
||||
"""
|
||||
peaks, props = find_peaks([0, 1, 1, 1, 0], prominence=(None, None),
|
||||
width=(None, None), wlen=2)
|
||||
assert_equal(peaks, 2)
|
||||
assert_equal(props["prominences"], 0)
|
||||
assert_equal(props["widths"], 0)
|
||||
assert_equal(props["width_heights"], 1)
|
||||
for key in ("left_bases", "right_bases", "left_ips", "right_ips"):
|
||||
assert_equal(props[key], peaks)
|
||||
|
||||
|
||||
class TestFindPeaksCwt(object):
|
||||
|
||||
def test_find_peaks_exact(self):
|
||||
"""
|
||||
Generate a series of gaussians and attempt to find the peak locations.
|
||||
"""
|
||||
sigmas = [5.0, 3.0, 10.0, 20.0, 10.0, 50.0]
|
||||
num_points = 500
|
||||
test_data, act_locs = _gen_gaussians_even(sigmas, num_points)
|
||||
widths = np.arange(0.1, max(sigmas))
|
||||
found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=0,
|
||||
min_length=None)
|
||||
np.testing.assert_array_equal(found_locs, act_locs,
|
||||
"Found maximum locations did not equal those expected")
|
||||
|
||||
def test_find_peaks_withnoise(self):
|
||||
"""
|
||||
Verify that peak locations are (approximately) found
|
||||
for a series of gaussians with added noise.
|
||||
"""
|
||||
sigmas = [5.0, 3.0, 10.0, 20.0, 10.0, 50.0]
|
||||
num_points = 500
|
||||
test_data, act_locs = _gen_gaussians_even(sigmas, num_points)
|
||||
widths = np.arange(0.1, max(sigmas))
|
||||
noise_amp = 0.07
|
||||
np.random.seed(18181911)
|
||||
test_data += (np.random.rand(num_points) - 0.5)*(2*noise_amp)
|
||||
found_locs = find_peaks_cwt(test_data, widths, min_length=15,
|
||||
gap_thresh=1, min_snr=noise_amp / 5)
|
||||
|
||||
np.testing.assert_equal(len(found_locs), len(act_locs), 'Different number' +
|
||||
'of peaks found than expected')
|
||||
diffs = np.abs(found_locs - act_locs)
|
||||
max_diffs = np.array(sigmas) / 5
|
||||
np.testing.assert_array_less(diffs, max_diffs, 'Maximum location differed' +
|
||||
'by more than %s' % (max_diffs))
|
||||
|
||||
def test_find_peaks_nopeak(self):
|
||||
"""
|
||||
Verify that no peak is found in
|
||||
data that's just noise.
|
||||
"""
|
||||
noise_amp = 1.0
|
||||
num_points = 100
|
||||
np.random.seed(181819141)
|
||||
test_data = (np.random.rand(num_points) - 0.5)*(2*noise_amp)
|
||||
widths = np.arange(10, 50)
|
||||
found_locs = find_peaks_cwt(test_data, widths, min_snr=5, noise_perc=30)
|
||||
np.testing.assert_equal(len(found_locs), 0)
|
||||
|
||||
def test_find_peaks_window_size(self):
|
||||
"""
|
||||
Verify that window_size is passed correctly to private function and
|
||||
affects the result.
|
||||
"""
|
||||
sigmas = [2.0, 2.0]
|
||||
num_points = 1000
|
||||
test_data, act_locs = _gen_gaussians_even(sigmas, num_points)
|
||||
widths = np.arange(0.1, max(sigmas), 0.2)
|
||||
noise_amp = 0.05
|
||||
np.random.seed(18181911)
|
||||
test_data += (np.random.rand(num_points) - 0.5)*(2*noise_amp)
|
||||
|
||||
# Possibly contrived negative region to throw off peak finding
|
||||
# when window_size is too large
|
||||
test_data[250:320] -= 1
|
||||
|
||||
found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=3,
|
||||
min_length=None, window_size=None)
|
||||
with pytest.raises(AssertionError):
|
||||
assert found_locs.size == act_locs.size
|
||||
|
||||
found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=3,
|
||||
min_length=None, window_size=20)
|
||||
assert found_locs.size == act_locs.size
|
301
venv/Lib/site-packages/scipy/signal/tests/test_savitzky_golay.py
Normal file
301
venv/Lib/site-packages/scipy/signal/tests/test_savitzky_golay.py
Normal file
|
@ -0,0 +1,301 @@
|
|||
import numpy as np
|
||||
from numpy.testing import (assert_allclose, assert_equal,
|
||||
assert_almost_equal, assert_array_equal,
|
||||
assert_array_almost_equal)
|
||||
|
||||
from scipy.ndimage import convolve1d
|
||||
|
||||
from scipy.signal import savgol_coeffs, savgol_filter
|
||||
from scipy.signal._savitzky_golay import _polyder
|
||||
|
||||
|
||||
def check_polyder(p, m, expected):
|
||||
dp = _polyder(p, m)
|
||||
assert_array_equal(dp, expected)
|
||||
|
||||
|
||||
def test_polyder():
|
||||
cases = [
|
||||
([5], 0, [5]),
|
||||
([5], 1, [0]),
|
||||
([3, 2, 1], 0, [3, 2, 1]),
|
||||
([3, 2, 1], 1, [6, 2]),
|
||||
([3, 2, 1], 2, [6]),
|
||||
([3, 2, 1], 3, [0]),
|
||||
([[3, 2, 1], [5, 6, 7]], 0, [[3, 2, 1], [5, 6, 7]]),
|
||||
([[3, 2, 1], [5, 6, 7]], 1, [[6, 2], [10, 6]]),
|
||||
([[3, 2, 1], [5, 6, 7]], 2, [[6], [10]]),
|
||||
([[3, 2, 1], [5, 6, 7]], 3, [[0], [0]]),
|
||||
]
|
||||
for p, m, expected in cases:
|
||||
check_polyder(np.array(p).T, m, np.array(expected).T)
|
||||
|
||||
|
||||
#--------------------------------------------------------------------
|
||||
# savgol_coeffs tests
|
||||
#--------------------------------------------------------------------
|
||||
|
||||
def alt_sg_coeffs(window_length, polyorder, pos):
|
||||
"""This is an alternative implementation of the SG coefficients.
|
||||
|
||||
It uses numpy.polyfit and numpy.polyval. The results should be
|
||||
equivalent to those of savgol_coeffs(), but this implementation
|
||||
is slower.
|
||||
|
||||
window_length should be odd.
|
||||
|
||||
"""
|
||||
if pos is None:
|
||||
pos = window_length // 2
|
||||
t = np.arange(window_length)
|
||||
unit = (t == pos).astype(int)
|
||||
h = np.polyval(np.polyfit(t, unit, polyorder), t)
|
||||
return h
|
||||
|
||||
|
||||
def test_sg_coeffs_trivial():
|
||||
# Test a trivial case of savgol_coeffs: polyorder = window_length - 1
|
||||
h = savgol_coeffs(1, 0)
|
||||
assert_allclose(h, [1])
|
||||
|
||||
h = savgol_coeffs(3, 2)
|
||||
assert_allclose(h, [0, 1, 0], atol=1e-10)
|
||||
|
||||
h = savgol_coeffs(5, 4)
|
||||
assert_allclose(h, [0, 0, 1, 0, 0], atol=1e-10)
|
||||
|
||||
h = savgol_coeffs(5, 4, pos=1)
|
||||
assert_allclose(h, [0, 0, 0, 1, 0], atol=1e-10)
|
||||
|
||||
h = savgol_coeffs(5, 4, pos=1, use='dot')
|
||||
assert_allclose(h, [0, 1, 0, 0, 0], atol=1e-10)
|
||||
|
||||
|
||||
def compare_coeffs_to_alt(window_length, order):
|
||||
# For the given window_length and order, compare the results
|
||||
# of savgol_coeffs and alt_sg_coeffs for pos from 0 to window_length - 1.
|
||||
# Also include pos=None.
|
||||
for pos in [None] + list(range(window_length)):
|
||||
h1 = savgol_coeffs(window_length, order, pos=pos, use='dot')
|
||||
h2 = alt_sg_coeffs(window_length, order, pos=pos)
|
||||
assert_allclose(h1, h2, atol=1e-10,
|
||||
err_msg=("window_length = %d, order = %d, pos = %s" %
|
||||
(window_length, order, pos)))
|
||||
|
||||
|
||||
def test_sg_coeffs_compare():
|
||||
# Compare savgol_coeffs() to alt_sg_coeffs().
|
||||
for window_length in range(1, 8, 2):
|
||||
for order in range(window_length):
|
||||
compare_coeffs_to_alt(window_length, order)
|
||||
|
||||
|
||||
def test_sg_coeffs_exact():
|
||||
polyorder = 4
|
||||
window_length = 9
|
||||
halflen = window_length // 2
|
||||
|
||||
x = np.linspace(0, 21, 43)
|
||||
delta = x[1] - x[0]
|
||||
|
||||
# The data is a cubic polynomial. We'll use an order 4
|
||||
# SG filter, so the filtered values should equal the input data
|
||||
# (except within half window_length of the edges).
|
||||
y = 0.5 * x ** 3 - x
|
||||
h = savgol_coeffs(window_length, polyorder)
|
||||
y0 = convolve1d(y, h)
|
||||
assert_allclose(y0[halflen:-halflen], y[halflen:-halflen])
|
||||
|
||||
# Check the same input, but use deriv=1. dy is the exact result.
|
||||
dy = 1.5 * x ** 2 - 1
|
||||
h = savgol_coeffs(window_length, polyorder, deriv=1, delta=delta)
|
||||
y1 = convolve1d(y, h)
|
||||
assert_allclose(y1[halflen:-halflen], dy[halflen:-halflen])
|
||||
|
||||
# Check the same input, but use deriv=2. d2y is the exact result.
|
||||
d2y = 3.0 * x
|
||||
h = savgol_coeffs(window_length, polyorder, deriv=2, delta=delta)
|
||||
y2 = convolve1d(y, h)
|
||||
assert_allclose(y2[halflen:-halflen], d2y[halflen:-halflen])
|
||||
|
||||
|
||||
def test_sg_coeffs_deriv():
|
||||
# The data in `x` is a sampled parabola, so using savgol_coeffs with an
|
||||
# order 2 or higher polynomial should give exact results.
|
||||
i = np.array([-2.0, 0.0, 2.0, 4.0, 6.0])
|
||||
x = i ** 2 / 4
|
||||
dx = i / 2
|
||||
d2x = np.full_like(i, 0.5)
|
||||
for pos in range(x.size):
|
||||
coeffs0 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot')
|
||||
assert_allclose(coeffs0.dot(x), x[pos], atol=1e-10)
|
||||
coeffs1 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=1)
|
||||
assert_allclose(coeffs1.dot(x), dx[pos], atol=1e-10)
|
||||
coeffs2 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=2)
|
||||
assert_allclose(coeffs2.dot(x), d2x[pos], atol=1e-10)
|
||||
|
||||
|
||||
def test_sg_coeffs_deriv_gt_polyorder():
|
||||
"""
|
||||
If deriv > polyorder, the coefficients should be all 0.
|
||||
This is a regression test for a bug where, e.g.,
|
||||
savgol_coeffs(5, polyorder=1, deriv=2)
|
||||
raised an error.
|
||||
"""
|
||||
coeffs = savgol_coeffs(5, polyorder=1, deriv=2)
|
||||
assert_array_equal(coeffs, np.zeros(5))
|
||||
coeffs = savgol_coeffs(7, polyorder=4, deriv=6)
|
||||
assert_array_equal(coeffs, np.zeros(7))
|
||||
|
||||
|
||||
def test_sg_coeffs_large():
|
||||
# Test that for large values of window_length and polyorder the array of
|
||||
# coefficients returned is symmetric. The aim is to ensure that
|
||||
# no potential numeric overflow occurs.
|
||||
coeffs0 = savgol_coeffs(31, 9)
|
||||
assert_array_almost_equal(coeffs0, coeffs0[::-1])
|
||||
coeffs1 = savgol_coeffs(31, 9, deriv=1)
|
||||
assert_array_almost_equal(coeffs1, -coeffs1[::-1])
|
||||
|
||||
|
||||
#--------------------------------------------------------------------
|
||||
# savgol_filter tests
|
||||
#--------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_sg_filter_trivial():
|
||||
""" Test some trivial edge cases for savgol_filter()."""
|
||||
x = np.array([1.0])
|
||||
y = savgol_filter(x, 1, 0)
|
||||
assert_equal(y, [1.0])
|
||||
|
||||
# Input is a single value. With a window length of 3 and polyorder 1,
|
||||
# the value in y is from the straight-line fit of (-1,0), (0,3) and
|
||||
# (1, 0) at 0. This is just the average of the three values, hence 1.0.
|
||||
x = np.array([3.0])
|
||||
y = savgol_filter(x, 3, 1, mode='constant')
|
||||
assert_almost_equal(y, [1.0], decimal=15)
|
||||
|
||||
x = np.array([3.0])
|
||||
y = savgol_filter(x, 3, 1, mode='nearest')
|
||||
assert_almost_equal(y, [3.0], decimal=15)
|
||||
|
||||
x = np.array([1.0] * 3)
|
||||
y = savgol_filter(x, 3, 1, mode='wrap')
|
||||
assert_almost_equal(y, [1.0, 1.0, 1.0], decimal=15)
|
||||
|
||||
|
||||
def test_sg_filter_basic():
|
||||
# Some basic test cases for savgol_filter().
|
||||
x = np.array([1.0, 2.0, 1.0])
|
||||
y = savgol_filter(x, 3, 1, mode='constant')
|
||||
assert_allclose(y, [1.0, 4.0 / 3, 1.0])
|
||||
|
||||
y = savgol_filter(x, 3, 1, mode='mirror')
|
||||
assert_allclose(y, [5.0 / 3, 4.0 / 3, 5.0 / 3])
|
||||
|
||||
y = savgol_filter(x, 3, 1, mode='wrap')
|
||||
assert_allclose(y, [4.0 / 3, 4.0 / 3, 4.0 / 3])
|
||||
|
||||
|
||||
def test_sg_filter_2d():
|
||||
x = np.array([[1.0, 2.0, 1.0],
|
||||
[2.0, 4.0, 2.0]])
|
||||
expected = np.array([[1.0, 4.0 / 3, 1.0],
|
||||
[2.0, 8.0 / 3, 2.0]])
|
||||
y = savgol_filter(x, 3, 1, mode='constant')
|
||||
assert_allclose(y, expected)
|
||||
|
||||
y = savgol_filter(x.T, 3, 1, mode='constant', axis=0)
|
||||
assert_allclose(y, expected.T)
|
||||
|
||||
|
||||
def test_sg_filter_interp_edges():
|
||||
# Another test with low degree polynomial data, for which we can easily
|
||||
# give the exact results. In this test, we use mode='interp', so
|
||||
# savgol_filter should match the exact solution for the entire data set,
|
||||
# including the edges.
|
||||
t = np.linspace(-5, 5, 21)
|
||||
delta = t[1] - t[0]
|
||||
# Polynomial test data.
|
||||
x = np.array([t,
|
||||
3 * t ** 2,
|
||||
t ** 3 - t])
|
||||
dx = np.array([np.ones_like(t),
|
||||
6 * t,
|
||||
3 * t ** 2 - 1.0])
|
||||
d2x = np.array([np.zeros_like(t),
|
||||
np.full_like(t, 6),
|
||||
6 * t])
|
||||
|
||||
window_length = 7
|
||||
|
||||
y = savgol_filter(x, window_length, 3, axis=-1, mode='interp')
|
||||
assert_allclose(y, x, atol=1e-12)
|
||||
|
||||
y1 = savgol_filter(x, window_length, 3, axis=-1, mode='interp',
|
||||
deriv=1, delta=delta)
|
||||
assert_allclose(y1, dx, atol=1e-12)
|
||||
|
||||
y2 = savgol_filter(x, window_length, 3, axis=-1, mode='interp',
|
||||
deriv=2, delta=delta)
|
||||
assert_allclose(y2, d2x, atol=1e-12)
|
||||
|
||||
# Transpose everything, and test again with axis=0.
|
||||
|
||||
x = x.T
|
||||
dx = dx.T
|
||||
d2x = d2x.T
|
||||
|
||||
y = savgol_filter(x, window_length, 3, axis=0, mode='interp')
|
||||
assert_allclose(y, x, atol=1e-12)
|
||||
|
||||
y1 = savgol_filter(x, window_length, 3, axis=0, mode='interp',
|
||||
deriv=1, delta=delta)
|
||||
assert_allclose(y1, dx, atol=1e-12)
|
||||
|
||||
y2 = savgol_filter(x, window_length, 3, axis=0, mode='interp',
|
||||
deriv=2, delta=delta)
|
||||
assert_allclose(y2, d2x, atol=1e-12)
|
||||
|
||||
|
||||
def test_sg_filter_interp_edges_3d():
|
||||
# Test mode='interp' with a 3-D array.
|
||||
t = np.linspace(-5, 5, 21)
|
||||
delta = t[1] - t[0]
|
||||
x1 = np.array([t, -t])
|
||||
x2 = np.array([t ** 2, 3 * t ** 2 + 5])
|
||||
x3 = np.array([t ** 3, 2 * t ** 3 + t ** 2 - 0.5 * t])
|
||||
dx1 = np.array([np.ones_like(t), -np.ones_like(t)])
|
||||
dx2 = np.array([2 * t, 6 * t])
|
||||
dx3 = np.array([3 * t ** 2, 6 * t ** 2 + 2 * t - 0.5])
|
||||
|
||||
# z has shape (3, 2, 21)
|
||||
z = np.array([x1, x2, x3])
|
||||
dz = np.array([dx1, dx2, dx3])
|
||||
|
||||
y = savgol_filter(z, 7, 3, axis=-1, mode='interp', delta=delta)
|
||||
assert_allclose(y, z, atol=1e-10)
|
||||
|
||||
dy = savgol_filter(z, 7, 3, axis=-1, mode='interp', deriv=1, delta=delta)
|
||||
assert_allclose(dy, dz, atol=1e-10)
|
||||
|
||||
# z has shape (3, 21, 2)
|
||||
z = np.array([x1.T, x2.T, x3.T])
|
||||
dz = np.array([dx1.T, dx2.T, dx3.T])
|
||||
|
||||
y = savgol_filter(z, 7, 3, axis=1, mode='interp', delta=delta)
|
||||
assert_allclose(y, z, atol=1e-10)
|
||||
|
||||
dy = savgol_filter(z, 7, 3, axis=1, mode='interp', deriv=1, delta=delta)
|
||||
assert_allclose(dy, dz, atol=1e-10)
|
||||
|
||||
# z has shape (21, 3, 2)
|
||||
z = z.swapaxes(0, 1).copy()
|
||||
dz = dz.swapaxes(0, 1).copy()
|
||||
|
||||
y = savgol_filter(z, 7, 3, axis=0, mode='interp', delta=delta)
|
||||
assert_allclose(y, z, atol=1e-10)
|
||||
|
||||
dy = savgol_filter(z, 7, 3, axis=0, mode='interp', deriv=1, delta=delta)
|
||||
assert_allclose(dy, dz, atol=1e-10)
|
3380
venv/Lib/site-packages/scipy/signal/tests/test_signaltools.py
Normal file
3380
venv/Lib/site-packages/scipy/signal/tests/test_signaltools.py
Normal file
File diff suppressed because it is too large
Load diff
1461
venv/Lib/site-packages/scipy/signal/tests/test_spectral.py
Normal file
1461
venv/Lib/site-packages/scipy/signal/tests/test_spectral.py
Normal file
File diff suppressed because it is too large
Load diff
273
venv/Lib/site-packages/scipy/signal/tests/test_upfirdn.py
Normal file
273
venv/Lib/site-packages/scipy/signal/tests/test_upfirdn.py
Normal file
|
@ -0,0 +1,273 @@
|
|||
# Code adapted from "upfirdn" python library with permission:
|
||||
#
|
||||
# Copyright (c) 2009, Motorola, Inc
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright notice,
|
||||
# this list of conditions and the following disclaimer.
|
||||
#
|
||||
# * Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution.
|
||||
#
|
||||
# * Neither the name of Motorola nor the names of its contributors may be
|
||||
# used to endorse or promote products derived from this software without
|
||||
# specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
||||
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
import numpy as np
|
||||
from itertools import product
|
||||
|
||||
from numpy.testing import assert_equal, assert_allclose
|
||||
from pytest import raises as assert_raises
|
||||
import pytest
|
||||
|
||||
from scipy.signal import upfirdn, firwin
|
||||
from scipy.signal._upfirdn import _output_len, _upfirdn_modes
|
||||
from scipy.signal._upfirdn_apply import _pad_test
|
||||
|
||||
|
||||
def upfirdn_naive(x, h, up=1, down=1):
|
||||
"""Naive upfirdn processing in Python.
|
||||
|
||||
Note: arg order (x, h) differs to facilitate apply_along_axis use.
|
||||
"""
|
||||
h = np.asarray(h)
|
||||
out = np.zeros(len(x) * up, x.dtype)
|
||||
out[::up] = x
|
||||
out = np.convolve(h, out)[::down][:_output_len(len(h), len(x), up, down)]
|
||||
return out
|
||||
|
||||
|
||||
class UpFIRDnCase(object):
|
||||
"""Test _UpFIRDn object"""
|
||||
def __init__(self, up, down, h, x_dtype):
|
||||
self.up = up
|
||||
self.down = down
|
||||
self.h = np.atleast_1d(h)
|
||||
self.x_dtype = x_dtype
|
||||
self.rng = np.random.RandomState(17)
|
||||
|
||||
def __call__(self):
|
||||
# tiny signal
|
||||
self.scrub(np.ones(1, self.x_dtype))
|
||||
# ones
|
||||
self.scrub(np.ones(10, self.x_dtype)) # ones
|
||||
# randn
|
||||
x = self.rng.randn(10).astype(self.x_dtype)
|
||||
if self.x_dtype in (np.complex64, np.complex128):
|
||||
x += 1j * self.rng.randn(10)
|
||||
self.scrub(x)
|
||||
# ramp
|
||||
self.scrub(np.arange(10).astype(self.x_dtype))
|
||||
# 3D, random
|
||||
size = (2, 3, 5)
|
||||
x = self.rng.randn(*size).astype(self.x_dtype)
|
||||
if self.x_dtype in (np.complex64, np.complex128):
|
||||
x += 1j * self.rng.randn(*size)
|
||||
for axis in range(len(size)):
|
||||
self.scrub(x, axis=axis)
|
||||
x = x[:, ::2, 1::3].T
|
||||
for axis in range(len(size)):
|
||||
self.scrub(x, axis=axis)
|
||||
|
||||
def scrub(self, x, axis=-1):
|
||||
yr = np.apply_along_axis(upfirdn_naive, axis, x,
|
||||
self.h, self.up, self.down)
|
||||
want_len = _output_len(len(self.h), x.shape[axis], self.up, self.down)
|
||||
assert yr.shape[axis] == want_len
|
||||
y = upfirdn(self.h, x, self.up, self.down, axis=axis)
|
||||
assert y.shape[axis] == want_len
|
||||
assert y.shape == yr.shape
|
||||
dtypes = (self.h.dtype, x.dtype)
|
||||
if all(d == np.complex64 for d in dtypes):
|
||||
assert_equal(y.dtype, np.complex64)
|
||||
elif np.complex64 in dtypes and np.float32 in dtypes:
|
||||
assert_equal(y.dtype, np.complex64)
|
||||
elif all(d == np.float32 for d in dtypes):
|
||||
assert_equal(y.dtype, np.float32)
|
||||
elif np.complex128 in dtypes or np.complex64 in dtypes:
|
||||
assert_equal(y.dtype, np.complex128)
|
||||
else:
|
||||
assert_equal(y.dtype, np.float64)
|
||||
assert_allclose(yr, y)
|
||||
|
||||
|
||||
_UPFIRDN_TYPES = (int, np.float32, np.complex64, float, complex)
|
||||
|
||||
|
||||
class TestUpfirdn(object):
|
||||
|
||||
def test_valid_input(self):
|
||||
assert_raises(ValueError, upfirdn, [1], [1], 1, 0) # up or down < 1
|
||||
assert_raises(ValueError, upfirdn, [], [1], 1, 1) # h.ndim != 1
|
||||
assert_raises(ValueError, upfirdn, [[1]], [1], 1, 1)
|
||||
|
||||
@pytest.mark.parametrize('len_h', [1, 2, 3, 4, 5])
|
||||
@pytest.mark.parametrize('len_x', [1, 2, 3, 4, 5])
|
||||
def test_singleton(self, len_h, len_x):
|
||||
# gh-9844: lengths producing expected outputs
|
||||
h = np.zeros(len_h)
|
||||
h[len_h // 2] = 1. # make h a delta
|
||||
x = np.ones(len_x)
|
||||
y = upfirdn(h, x, 1, 1)
|
||||
want = np.pad(x, (len_h // 2, (len_h - 1) // 2), 'constant')
|
||||
assert_allclose(y, want)
|
||||
|
||||
def test_shift_x(self):
|
||||
# gh-9844: shifted x can change values?
|
||||
y = upfirdn([1, 1], [1.], 1, 1)
|
||||
assert_allclose(y, [1, 1]) # was [0, 1] in the issue
|
||||
y = upfirdn([1, 1], [0., 1.], 1, 1)
|
||||
assert_allclose(y, [0, 1, 1])
|
||||
|
||||
# A bunch of lengths/factors chosen because they exposed differences
|
||||
# between the "old way" and new way of computing length, and then
|
||||
# got `expected` from MATLAB
|
||||
@pytest.mark.parametrize('len_h, len_x, up, down, expected', [
|
||||
(2, 2, 5, 2, [1, 0, 0, 0]),
|
||||
(2, 3, 6, 3, [1, 0, 1, 0, 1]),
|
||||
(2, 4, 4, 3, [1, 0, 0, 0, 1]),
|
||||
(3, 2, 6, 2, [1, 0, 0, 1, 0]),
|
||||
(4, 11, 3, 5, [1, 0, 0, 1, 0, 0, 1]),
|
||||
])
|
||||
def test_length_factors(self, len_h, len_x, up, down, expected):
|
||||
# gh-9844: weird factors
|
||||
h = np.zeros(len_h)
|
||||
h[0] = 1.
|
||||
x = np.ones(len_x)
|
||||
y = upfirdn(h, x, up, down)
|
||||
assert_allclose(y, expected)
|
||||
|
||||
@pytest.mark.parametrize('down, want_len', [ # lengths from MATLAB
|
||||
(2, 5015),
|
||||
(11, 912),
|
||||
(79, 127),
|
||||
])
|
||||
def test_vs_convolve(self, down, want_len):
|
||||
# Check that up=1.0 gives same answer as convolve + slicing
|
||||
random_state = np.random.RandomState(17)
|
||||
try_types = (int, np.float32, np.complex64, float, complex)
|
||||
size = 10000
|
||||
|
||||
for dtype in try_types:
|
||||
x = random_state.randn(size).astype(dtype)
|
||||
if dtype in (np.complex64, np.complex128):
|
||||
x += 1j * random_state.randn(size)
|
||||
|
||||
h = firwin(31, 1. / down, window='hamming')
|
||||
yl = upfirdn_naive(x, h, 1, down)
|
||||
y = upfirdn(h, x, up=1, down=down)
|
||||
assert y.shape == (want_len,)
|
||||
assert yl.shape[0] == y.shape[0]
|
||||
assert_allclose(yl, y, atol=1e-7, rtol=1e-7)
|
||||
|
||||
@pytest.mark.parametrize('x_dtype', _UPFIRDN_TYPES)
|
||||
@pytest.mark.parametrize('h', (1., 1j))
|
||||
@pytest.mark.parametrize('up, down', [(1, 1), (2, 2), (3, 2), (2, 3)])
|
||||
def test_vs_naive_delta(self, x_dtype, h, up, down):
|
||||
UpFIRDnCase(up, down, h, x_dtype)()
|
||||
|
||||
@pytest.mark.parametrize('x_dtype', _UPFIRDN_TYPES)
|
||||
@pytest.mark.parametrize('h_dtype', _UPFIRDN_TYPES)
|
||||
@pytest.mark.parametrize('p_max, q_max',
|
||||
list(product((10, 100), (10, 100))))
|
||||
def test_vs_naive(self, x_dtype, h_dtype, p_max, q_max):
|
||||
tests = self._random_factors(p_max, q_max, h_dtype, x_dtype)
|
||||
for test in tests:
|
||||
test()
|
||||
|
||||
def _random_factors(self, p_max, q_max, h_dtype, x_dtype):
|
||||
n_rep = 3
|
||||
longest_h = 25
|
||||
random_state = np.random.RandomState(17)
|
||||
tests = []
|
||||
|
||||
for _ in range(n_rep):
|
||||
# Randomize the up/down factors somewhat
|
||||
p_add = q_max if p_max > q_max else 1
|
||||
q_add = p_max if q_max > p_max else 1
|
||||
p = random_state.randint(p_max) + p_add
|
||||
q = random_state.randint(q_max) + q_add
|
||||
|
||||
# Generate random FIR coefficients
|
||||
len_h = random_state.randint(longest_h) + 1
|
||||
h = np.atleast_1d(random_state.randint(len_h))
|
||||
h = h.astype(h_dtype)
|
||||
if h_dtype == complex:
|
||||
h += 1j * random_state.randint(len_h)
|
||||
|
||||
tests.append(UpFIRDnCase(p, q, h, x_dtype))
|
||||
|
||||
return tests
|
||||
|
||||
@pytest.mark.parametrize('mode', _upfirdn_modes)
|
||||
def test_extensions(self, mode):
|
||||
"""Test vs. manually computed results for modes not in numpy's pad."""
|
||||
x = np.array([1, 2, 3, 1], dtype=float)
|
||||
npre, npost = 6, 6
|
||||
y = _pad_test(x, npre=npre, npost=npost, mode=mode)
|
||||
if mode == 'antisymmetric':
|
||||
y_expected = np.asarray(
|
||||
[3, 1, -1, -3, -2, -1, 1, 2, 3, 1, -1, -3, -2, -1, 1, 2])
|
||||
elif mode == 'antireflect':
|
||||
y_expected = np.asarray(
|
||||
[1, 2, 3, 1, -1, 0, 1, 2, 3, 1, -1, 0, 1, 2, 3, 1])
|
||||
elif mode == 'smooth':
|
||||
y_expected = np.asarray(
|
||||
[-5, -4, -3, -2, -1, 0, 1, 2, 3, 1, -1, -3, -5, -7, -9, -11])
|
||||
elif mode == "line":
|
||||
lin_slope = (x[-1] - x[0]) / (len(x) - 1)
|
||||
left = x[0] + np.arange(-npre, 0, 1) * lin_slope
|
||||
right = x[-1] + np.arange(1, npost + 1) * lin_slope
|
||||
y_expected = np.concatenate((left, x, right))
|
||||
else:
|
||||
y_expected = np.pad(x, (npre, npost), mode=mode)
|
||||
assert_allclose(y, y_expected)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'size, h_len, mode, dtype',
|
||||
product(
|
||||
[8],
|
||||
[4, 5, 26], # include cases with h_len > 2*size
|
||||
_upfirdn_modes,
|
||||
[np.float32, np.float64, np.complex64, np.complex128],
|
||||
)
|
||||
)
|
||||
def test_modes(self, size, h_len, mode, dtype):
|
||||
random_state = np.random.RandomState(5)
|
||||
x = random_state.randn(size).astype(dtype)
|
||||
if dtype in (np.complex64, np.complex128):
|
||||
x += 1j * random_state.randn(size)
|
||||
h = np.arange(1, 1 + h_len, dtype=x.real.dtype)
|
||||
|
||||
y = upfirdn(h, x, up=1, down=1, mode=mode)
|
||||
# expected result: pad the input, filter with zero padding, then crop
|
||||
npad = h_len - 1
|
||||
if mode in ['antisymmetric', 'antireflect', 'smooth', 'line']:
|
||||
# use _pad_test test function for modes not supported by np.pad.
|
||||
xpad = _pad_test(x, npre=npad, npost=npad, mode=mode)
|
||||
else:
|
||||
xpad = np.pad(x, npad, mode=mode)
|
||||
ypad = upfirdn(h, xpad, up=1, down=1, mode='constant')
|
||||
y_expected = ypad[npad:-npad]
|
||||
|
||||
atol = rtol = np.finfo(dtype).eps * 1e2
|
||||
assert_allclose(y, y_expected, atol=atol, rtol=rtol)
|
351
venv/Lib/site-packages/scipy/signal/tests/test_waveforms.py
Normal file
351
venv/Lib/site-packages/scipy/signal/tests/test_waveforms.py
Normal file
|
@ -0,0 +1,351 @@
|
|||
import numpy as np
|
||||
from numpy.testing import (assert_almost_equal, assert_equal,
|
||||
assert_, assert_allclose, assert_array_equal)
|
||||
from pytest import raises as assert_raises
|
||||
|
||||
import scipy.signal.waveforms as waveforms
|
||||
|
||||
|
||||
# These chirp_* functions are the instantaneous frequencies of the signals
|
||||
# returned by chirp().
|
||||
|
||||
def chirp_linear(t, f0, f1, t1):
|
||||
f = f0 + (f1 - f0) * t / t1
|
||||
return f
|
||||
|
||||
|
||||
def chirp_quadratic(t, f0, f1, t1, vertex_zero=True):
|
||||
if vertex_zero:
|
||||
f = f0 + (f1 - f0) * t**2 / t1**2
|
||||
else:
|
||||
f = f1 - (f1 - f0) * (t1 - t)**2 / t1**2
|
||||
return f
|
||||
|
||||
|
||||
def chirp_geometric(t, f0, f1, t1):
|
||||
f = f0 * (f1/f0)**(t/t1)
|
||||
return f
|
||||
|
||||
|
||||
def chirp_hyperbolic(t, f0, f1, t1):
|
||||
f = f0*f1*t1 / ((f0 - f1)*t + f1*t1)
|
||||
return f
|
||||
|
||||
|
||||
def compute_frequency(t, theta):
|
||||
"""
|
||||
Compute theta'(t)/(2*pi), where theta'(t) is the derivative of theta(t).
|
||||
"""
|
||||
# Assume theta and t are 1-D NumPy arrays.
|
||||
# Assume that t is uniformly spaced.
|
||||
dt = t[1] - t[0]
|
||||
f = np.diff(theta)/(2*np.pi) / dt
|
||||
tf = 0.5*(t[1:] + t[:-1])
|
||||
return tf, f
|
||||
|
||||
|
||||
class TestChirp(object):
|
||||
|
||||
def test_linear_at_zero(self):
|
||||
w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='linear')
|
||||
assert_almost_equal(w, 1.0)
|
||||
|
||||
def test_linear_freq_01(self):
|
||||
method = 'linear'
|
||||
f0 = 1.0
|
||||
f1 = 2.0
|
||||
t1 = 1.0
|
||||
t = np.linspace(0, t1, 100)
|
||||
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
abserr = np.max(np.abs(f - chirp_linear(tf, f0, f1, t1)))
|
||||
assert_(abserr < 1e-6)
|
||||
|
||||
def test_linear_freq_02(self):
|
||||
method = 'linear'
|
||||
f0 = 200.0
|
||||
f1 = 100.0
|
||||
t1 = 10.0
|
||||
t = np.linspace(0, t1, 100)
|
||||
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
abserr = np.max(np.abs(f - chirp_linear(tf, f0, f1, t1)))
|
||||
assert_(abserr < 1e-6)
|
||||
|
||||
def test_quadratic_at_zero(self):
|
||||
w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='quadratic')
|
||||
assert_almost_equal(w, 1.0)
|
||||
|
||||
def test_quadratic_at_zero2(self):
|
||||
w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='quadratic',
|
||||
vertex_zero=False)
|
||||
assert_almost_equal(w, 1.0)
|
||||
|
||||
def test_quadratic_freq_01(self):
|
||||
method = 'quadratic'
|
||||
f0 = 1.0
|
||||
f1 = 2.0
|
||||
t1 = 1.0
|
||||
t = np.linspace(0, t1, 2000)
|
||||
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
abserr = np.max(np.abs(f - chirp_quadratic(tf, f0, f1, t1)))
|
||||
assert_(abserr < 1e-6)
|
||||
|
||||
def test_quadratic_freq_02(self):
|
||||
method = 'quadratic'
|
||||
f0 = 20.0
|
||||
f1 = 10.0
|
||||
t1 = 10.0
|
||||
t = np.linspace(0, t1, 2000)
|
||||
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
abserr = np.max(np.abs(f - chirp_quadratic(tf, f0, f1, t1)))
|
||||
assert_(abserr < 1e-6)
|
||||
|
||||
def test_logarithmic_at_zero(self):
|
||||
w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='logarithmic')
|
||||
assert_almost_equal(w, 1.0)
|
||||
|
||||
def test_logarithmic_freq_01(self):
|
||||
method = 'logarithmic'
|
||||
f0 = 1.0
|
||||
f1 = 2.0
|
||||
t1 = 1.0
|
||||
t = np.linspace(0, t1, 10000)
|
||||
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1)))
|
||||
assert_(abserr < 1e-6)
|
||||
|
||||
def test_logarithmic_freq_02(self):
|
||||
method = 'logarithmic'
|
||||
f0 = 200.0
|
||||
f1 = 100.0
|
||||
t1 = 10.0
|
||||
t = np.linspace(0, t1, 10000)
|
||||
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1)))
|
||||
assert_(abserr < 1e-6)
|
||||
|
||||
def test_logarithmic_freq_03(self):
|
||||
method = 'logarithmic'
|
||||
f0 = 100.0
|
||||
f1 = 100.0
|
||||
t1 = 10.0
|
||||
t = np.linspace(0, t1, 10000)
|
||||
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1)))
|
||||
assert_(abserr < 1e-6)
|
||||
|
||||
def test_hyperbolic_at_zero(self):
|
||||
w = waveforms.chirp(t=0, f0=10.0, f1=1.0, t1=1.0, method='hyperbolic')
|
||||
assert_almost_equal(w, 1.0)
|
||||
|
||||
def test_hyperbolic_freq_01(self):
|
||||
method = 'hyperbolic'
|
||||
t1 = 1.0
|
||||
t = np.linspace(0, t1, 10000)
|
||||
# f0 f1
|
||||
cases = [[10.0, 1.0],
|
||||
[1.0, 10.0],
|
||||
[-10.0, -1.0],
|
||||
[-1.0, -10.0]]
|
||||
for f0, f1 in cases:
|
||||
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
expected = chirp_hyperbolic(tf, f0, f1, t1)
|
||||
assert_allclose(f, expected)
|
||||
|
||||
def test_hyperbolic_zero_freq(self):
|
||||
# f0=0 or f1=0 must raise a ValueError.
|
||||
method = 'hyperbolic'
|
||||
t1 = 1.0
|
||||
t = np.linspace(0, t1, 5)
|
||||
assert_raises(ValueError, waveforms.chirp, t, 0, t1, 1, method)
|
||||
assert_raises(ValueError, waveforms.chirp, t, 1, t1, 0, method)
|
||||
|
||||
def test_unknown_method(self):
|
||||
method = "foo"
|
||||
f0 = 10.0
|
||||
f1 = 20.0
|
||||
t1 = 1.0
|
||||
t = np.linspace(0, t1, 10)
|
||||
assert_raises(ValueError, waveforms.chirp, t, f0, t1, f1, method)
|
||||
|
||||
def test_integer_t1(self):
|
||||
f0 = 10.0
|
||||
f1 = 20.0
|
||||
t = np.linspace(-1, 1, 11)
|
||||
t1 = 3.0
|
||||
float_result = waveforms.chirp(t, f0, t1, f1)
|
||||
t1 = 3
|
||||
int_result = waveforms.chirp(t, f0, t1, f1)
|
||||
err_msg = "Integer input 't1=3' gives wrong result"
|
||||
assert_equal(int_result, float_result, err_msg=err_msg)
|
||||
|
||||
def test_integer_f0(self):
|
||||
f1 = 20.0
|
||||
t1 = 3.0
|
||||
t = np.linspace(-1, 1, 11)
|
||||
f0 = 10.0
|
||||
float_result = waveforms.chirp(t, f0, t1, f1)
|
||||
f0 = 10
|
||||
int_result = waveforms.chirp(t, f0, t1, f1)
|
||||
err_msg = "Integer input 'f0=10' gives wrong result"
|
||||
assert_equal(int_result, float_result, err_msg=err_msg)
|
||||
|
||||
def test_integer_f1(self):
|
||||
f0 = 10.0
|
||||
t1 = 3.0
|
||||
t = np.linspace(-1, 1, 11)
|
||||
f1 = 20.0
|
||||
float_result = waveforms.chirp(t, f0, t1, f1)
|
||||
f1 = 20
|
||||
int_result = waveforms.chirp(t, f0, t1, f1)
|
||||
err_msg = "Integer input 'f1=20' gives wrong result"
|
||||
assert_equal(int_result, float_result, err_msg=err_msg)
|
||||
|
||||
def test_integer_all(self):
|
||||
f0 = 10
|
||||
t1 = 3
|
||||
f1 = 20
|
||||
t = np.linspace(-1, 1, 11)
|
||||
float_result = waveforms.chirp(t, float(f0), float(t1), float(f1))
|
||||
int_result = waveforms.chirp(t, f0, t1, f1)
|
||||
err_msg = "Integer input 'f0=10, t1=3, f1=20' gives wrong result"
|
||||
assert_equal(int_result, float_result, err_msg=err_msg)
|
||||
|
||||
|
||||
class TestSweepPoly(object):
|
||||
|
||||
def test_sweep_poly_quad1(self):
|
||||
p = np.poly1d([1.0, 0.0, 1.0])
|
||||
t = np.linspace(0, 3.0, 10000)
|
||||
phase = waveforms._sweep_poly_phase(t, p)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
expected = p(tf)
|
||||
abserr = np.max(np.abs(f - expected))
|
||||
assert_(abserr < 1e-6)
|
||||
|
||||
def test_sweep_poly_const(self):
|
||||
p = np.poly1d(2.0)
|
||||
t = np.linspace(0, 3.0, 10000)
|
||||
phase = waveforms._sweep_poly_phase(t, p)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
expected = p(tf)
|
||||
abserr = np.max(np.abs(f - expected))
|
||||
assert_(abserr < 1e-6)
|
||||
|
||||
def test_sweep_poly_linear(self):
|
||||
p = np.poly1d([-1.0, 10.0])
|
||||
t = np.linspace(0, 3.0, 10000)
|
||||
phase = waveforms._sweep_poly_phase(t, p)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
expected = p(tf)
|
||||
abserr = np.max(np.abs(f - expected))
|
||||
assert_(abserr < 1e-6)
|
||||
|
||||
def test_sweep_poly_quad2(self):
|
||||
p = np.poly1d([1.0, 0.0, -2.0])
|
||||
t = np.linspace(0, 3.0, 10000)
|
||||
phase = waveforms._sweep_poly_phase(t, p)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
expected = p(tf)
|
||||
abserr = np.max(np.abs(f - expected))
|
||||
assert_(abserr < 1e-6)
|
||||
|
||||
def test_sweep_poly_cubic(self):
|
||||
p = np.poly1d([2.0, 1.0, 0.0, -2.0])
|
||||
t = np.linspace(0, 2.0, 10000)
|
||||
phase = waveforms._sweep_poly_phase(t, p)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
expected = p(tf)
|
||||
abserr = np.max(np.abs(f - expected))
|
||||
assert_(abserr < 1e-6)
|
||||
|
||||
def test_sweep_poly_cubic2(self):
|
||||
"""Use an array of coefficients instead of a poly1d."""
|
||||
p = np.array([2.0, 1.0, 0.0, -2.0])
|
||||
t = np.linspace(0, 2.0, 10000)
|
||||
phase = waveforms._sweep_poly_phase(t, p)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
expected = np.poly1d(p)(tf)
|
||||
abserr = np.max(np.abs(f - expected))
|
||||
assert_(abserr < 1e-6)
|
||||
|
||||
def test_sweep_poly_cubic3(self):
|
||||
"""Use a list of coefficients instead of a poly1d."""
|
||||
p = [2.0, 1.0, 0.0, -2.0]
|
||||
t = np.linspace(0, 2.0, 10000)
|
||||
phase = waveforms._sweep_poly_phase(t, p)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
expected = np.poly1d(p)(tf)
|
||||
abserr = np.max(np.abs(f - expected))
|
||||
assert_(abserr < 1e-6)
|
||||
|
||||
|
||||
class TestGaussPulse(object):
|
||||
|
||||
def test_integer_fc(self):
|
||||
float_result = waveforms.gausspulse('cutoff', fc=1000.0)
|
||||
int_result = waveforms.gausspulse('cutoff', fc=1000)
|
||||
err_msg = "Integer input 'fc=1000' gives wrong result"
|
||||
assert_equal(int_result, float_result, err_msg=err_msg)
|
||||
|
||||
def test_integer_bw(self):
|
||||
float_result = waveforms.gausspulse('cutoff', bw=1.0)
|
||||
int_result = waveforms.gausspulse('cutoff', bw=1)
|
||||
err_msg = "Integer input 'bw=1' gives wrong result"
|
||||
assert_equal(int_result, float_result, err_msg=err_msg)
|
||||
|
||||
def test_integer_bwr(self):
|
||||
float_result = waveforms.gausspulse('cutoff', bwr=-6.0)
|
||||
int_result = waveforms.gausspulse('cutoff', bwr=-6)
|
||||
err_msg = "Integer input 'bwr=-6' gives wrong result"
|
||||
assert_equal(int_result, float_result, err_msg=err_msg)
|
||||
|
||||
def test_integer_tpr(self):
|
||||
float_result = waveforms.gausspulse('cutoff', tpr=-60.0)
|
||||
int_result = waveforms.gausspulse('cutoff', tpr=-60)
|
||||
err_msg = "Integer input 'tpr=-60' gives wrong result"
|
||||
assert_equal(int_result, float_result, err_msg=err_msg)
|
||||
|
||||
|
||||
class TestUnitImpulse(object):
|
||||
|
||||
def test_no_index(self):
|
||||
assert_array_equal(waveforms.unit_impulse(7), [1, 0, 0, 0, 0, 0, 0])
|
||||
assert_array_equal(waveforms.unit_impulse((3, 3)),
|
||||
[[1, 0, 0], [0, 0, 0], [0, 0, 0]])
|
||||
|
||||
def test_index(self):
|
||||
assert_array_equal(waveforms.unit_impulse(10, 3),
|
||||
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0])
|
||||
assert_array_equal(waveforms.unit_impulse((3, 3), (1, 1)),
|
||||
[[0, 0, 0], [0, 1, 0], [0, 0, 0]])
|
||||
|
||||
# Broadcasting
|
||||
imp = waveforms.unit_impulse((4, 4), 2)
|
||||
assert_array_equal(imp, np.array([[0, 0, 0, 0],
|
||||
[0, 0, 0, 0],
|
||||
[0, 0, 1, 0],
|
||||
[0, 0, 0, 0]]))
|
||||
|
||||
def test_mid(self):
|
||||
assert_array_equal(waveforms.unit_impulse((3, 3), 'mid'),
|
||||
[[0, 0, 0], [0, 1, 0], [0, 0, 0]])
|
||||
assert_array_equal(waveforms.unit_impulse(9, 'mid'),
|
||||
[0, 0, 0, 0, 1, 0, 0, 0, 0])
|
||||
|
||||
def test_dtype(self):
|
||||
imp = waveforms.unit_impulse(7)
|
||||
assert_(np.issubdtype(imp.dtype, np.floating))
|
||||
|
||||
imp = waveforms.unit_impulse(5, 3, dtype=int)
|
||||
assert_(np.issubdtype(imp.dtype, np.integer))
|
||||
|
||||
imp = waveforms.unit_impulse((5, 2), (3, 1), dtype=complex)
|
||||
assert_(np.issubdtype(imp.dtype, np.complexfloating))
|
152
venv/Lib/site-packages/scipy/signal/tests/test_wavelets.py
Normal file
152
venv/Lib/site-packages/scipy/signal/tests/test_wavelets.py
Normal file
|
@ -0,0 +1,152 @@
|
|||
import numpy as np
|
||||
from numpy.testing import assert_equal, \
|
||||
assert_array_equal, assert_array_almost_equal, assert_array_less, assert_
|
||||
|
||||
from scipy.signal import wavelets
|
||||
|
||||
|
||||
class TestWavelets(object):
|
||||
def test_qmf(self):
|
||||
assert_array_equal(wavelets.qmf([1, 1]), [1, -1])
|
||||
|
||||
def test_daub(self):
|
||||
for i in range(1, 15):
|
||||
assert_equal(len(wavelets.daub(i)), i * 2)
|
||||
|
||||
def test_cascade(self):
|
||||
for J in range(1, 7):
|
||||
for i in range(1, 5):
|
||||
lpcoef = wavelets.daub(i)
|
||||
k = len(lpcoef)
|
||||
x, phi, psi = wavelets.cascade(lpcoef, J)
|
||||
assert_(len(x) == len(phi) == len(psi))
|
||||
assert_equal(len(x), (k - 1) * 2 ** J)
|
||||
|
||||
def test_morlet(self):
|
||||
x = wavelets.morlet(50, 4.1, complete=True)
|
||||
y = wavelets.morlet(50, 4.1, complete=False)
|
||||
# Test if complete and incomplete wavelet have same lengths:
|
||||
assert_equal(len(x), len(y))
|
||||
# Test if complete wavelet is less than incomplete wavelet:
|
||||
assert_array_less(x, y)
|
||||
|
||||
x = wavelets.morlet(10, 50, complete=False)
|
||||
y = wavelets.morlet(10, 50, complete=True)
|
||||
# For large widths complete and incomplete wavelets should be
|
||||
# identical within numerical precision:
|
||||
assert_equal(x, y)
|
||||
|
||||
# miscellaneous tests:
|
||||
x = np.array([1.73752399e-09 + 9.84327394e-25j,
|
||||
6.49471756e-01 + 0.00000000e+00j,
|
||||
1.73752399e-09 - 9.84327394e-25j])
|
||||
y = wavelets.morlet(3, w=2, complete=True)
|
||||
assert_array_almost_equal(x, y)
|
||||
|
||||
x = np.array([2.00947715e-09 + 9.84327394e-25j,
|
||||
7.51125544e-01 + 0.00000000e+00j,
|
||||
2.00947715e-09 - 9.84327394e-25j])
|
||||
y = wavelets.morlet(3, w=2, complete=False)
|
||||
assert_array_almost_equal(x, y, decimal=2)
|
||||
|
||||
x = wavelets.morlet(10000, s=4, complete=True)
|
||||
y = wavelets.morlet(20000, s=8, complete=True)[5000:15000]
|
||||
assert_array_almost_equal(x, y, decimal=2)
|
||||
|
||||
x = wavelets.morlet(10000, s=4, complete=False)
|
||||
assert_array_almost_equal(y, x, decimal=2)
|
||||
y = wavelets.morlet(20000, s=8, complete=False)[5000:15000]
|
||||
assert_array_almost_equal(x, y, decimal=2)
|
||||
|
||||
x = wavelets.morlet(10000, w=3, s=5, complete=True)
|
||||
y = wavelets.morlet(20000, w=3, s=10, complete=True)[5000:15000]
|
||||
assert_array_almost_equal(x, y, decimal=2)
|
||||
|
||||
x = wavelets.morlet(10000, w=3, s=5, complete=False)
|
||||
assert_array_almost_equal(y, x, decimal=2)
|
||||
y = wavelets.morlet(20000, w=3, s=10, complete=False)[5000:15000]
|
||||
assert_array_almost_equal(x, y, decimal=2)
|
||||
|
||||
x = wavelets.morlet(10000, w=7, s=10, complete=True)
|
||||
y = wavelets.morlet(20000, w=7, s=20, complete=True)[5000:15000]
|
||||
assert_array_almost_equal(x, y, decimal=2)
|
||||
|
||||
x = wavelets.morlet(10000, w=7, s=10, complete=False)
|
||||
assert_array_almost_equal(x, y, decimal=2)
|
||||
y = wavelets.morlet(20000, w=7, s=20, complete=False)[5000:15000]
|
||||
assert_array_almost_equal(x, y, decimal=2)
|
||||
|
||||
def test_morlet2(self):
|
||||
w = wavelets.morlet2(1.0, 0.5)
|
||||
expected = (np.pi**(-0.25) * np.sqrt(1/0.5)).astype(complex)
|
||||
assert_array_equal(w, expected)
|
||||
|
||||
lengths = [5, 11, 15, 51, 101]
|
||||
for length in lengths:
|
||||
w = wavelets.morlet2(length, 1.0)
|
||||
assert_(len(w) == length)
|
||||
max_loc = np.argmax(w)
|
||||
assert_(max_loc == (length // 2))
|
||||
|
||||
points = 100
|
||||
w = abs(wavelets.morlet2(points, 2.0))
|
||||
half_vec = np.arange(0, points // 2)
|
||||
assert_array_almost_equal(w[half_vec], w[-(half_vec + 1)])
|
||||
|
||||
x = np.array([5.03701224e-09 + 2.46742437e-24j,
|
||||
1.88279253e+00 + 0.00000000e+00j,
|
||||
5.03701224e-09 - 2.46742437e-24j])
|
||||
y = wavelets.morlet2(3, s=1/(2*np.pi), w=2)
|
||||
assert_array_almost_equal(x, y)
|
||||
|
||||
def test_ricker(self):
|
||||
w = wavelets.ricker(1.0, 1)
|
||||
expected = 2 / (np.sqrt(3 * 1.0) * (np.pi ** 0.25))
|
||||
assert_array_equal(w, expected)
|
||||
|
||||
lengths = [5, 11, 15, 51, 101]
|
||||
for length in lengths:
|
||||
w = wavelets.ricker(length, 1.0)
|
||||
assert_(len(w) == length)
|
||||
max_loc = np.argmax(w)
|
||||
assert_(max_loc == (length // 2))
|
||||
|
||||
points = 100
|
||||
w = wavelets.ricker(points, 2.0)
|
||||
half_vec = np.arange(0, points // 2)
|
||||
#Wavelet should be symmetric
|
||||
assert_array_almost_equal(w[half_vec], w[-(half_vec + 1)])
|
||||
|
||||
#Check zeros
|
||||
aas = [5, 10, 15, 20, 30]
|
||||
points = 99
|
||||
for a in aas:
|
||||
w = wavelets.ricker(points, a)
|
||||
vec = np.arange(0, points) - (points - 1.0) / 2
|
||||
exp_zero1 = np.argmin(np.abs(vec - a))
|
||||
exp_zero2 = np.argmin(np.abs(vec + a))
|
||||
assert_array_almost_equal(w[exp_zero1], 0)
|
||||
assert_array_almost_equal(w[exp_zero2], 0)
|
||||
|
||||
def test_cwt(self):
|
||||
widths = [1.0]
|
||||
delta_wavelet = lambda s, t: np.array([1])
|
||||
len_data = 100
|
||||
test_data = np.sin(np.pi * np.arange(0, len_data) / 10.0)
|
||||
|
||||
#Test delta function input gives same data as output
|
||||
cwt_dat = wavelets.cwt(test_data, delta_wavelet, widths)
|
||||
assert_(cwt_dat.shape == (len(widths), len_data))
|
||||
assert_array_almost_equal(test_data, cwt_dat.flatten())
|
||||
|
||||
#Check proper shape on output
|
||||
widths = [1, 3, 4, 5, 10]
|
||||
cwt_dat = wavelets.cwt(test_data, wavelets.ricker, widths)
|
||||
assert_(cwt_dat.shape == (len(widths), len_data))
|
||||
|
||||
widths = [len_data * 10]
|
||||
#Note: this wavelet isn't defined quite right, but is fine for this test
|
||||
flat_wavelet = lambda l, w: np.full(w, 1 / w)
|
||||
cwt_dat = wavelets.cwt(test_data, flat_wavelet, widths)
|
||||
assert_array_almost_equal(cwt_dat, np.mean(test_data))
|
||||
|
638
venv/Lib/site-packages/scipy/signal/tests/test_windows.py
Normal file
638
venv/Lib/site-packages/scipy/signal/tests/test_windows.py
Normal file
File diff suppressed because one or more lines are too long
664
venv/Lib/site-packages/scipy/signal/waveforms.py
Normal file
664
venv/Lib/site-packages/scipy/signal/waveforms.py
Normal file
|
@ -0,0 +1,664 @@
|
|||
# Author: Travis Oliphant
|
||||
# 2003
|
||||
#
|
||||
# Feb. 2010: Updated by Warren Weckesser:
|
||||
# Rewrote much of chirp()
|
||||
# Added sweep_poly()
|
||||
import numpy as np
|
||||
from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
|
||||
exp, cos, sin, polyval, polyint
|
||||
|
||||
|
||||
__all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly',
|
||||
'unit_impulse']
|
||||
|
||||
|
||||
def sawtooth(t, width=1):
|
||||
"""
|
||||
Return a periodic sawtooth or triangle waveform.
|
||||
|
||||
The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the
|
||||
interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval
|
||||
``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1].
|
||||
|
||||
Note that this is not band-limited. It produces an infinite number
|
||||
of harmonics, which are aliased back and forth across the frequency
|
||||
spectrum.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
t : array_like
|
||||
Time.
|
||||
width : array_like, optional
|
||||
Width of the rising ramp as a proportion of the total cycle.
|
||||
Default is 1, producing a rising ramp, while 0 produces a falling
|
||||
ramp. `width` = 0.5 produces a triangle wave.
|
||||
If an array, causes wave shape to change over time, and must be the
|
||||
same length as t.
|
||||
|
||||
Returns
|
||||
-------
|
||||
y : ndarray
|
||||
Output array containing the sawtooth waveform.
|
||||
|
||||
Examples
|
||||
--------
|
||||
A 5 Hz waveform sampled at 500 Hz for 1 second:
|
||||
|
||||
>>> from scipy import signal
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> t = np.linspace(0, 1, 500)
|
||||
>>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t))
|
||||
|
||||
"""
|
||||
t, w = asarray(t), asarray(width)
|
||||
w = asarray(w + (t - t))
|
||||
t = asarray(t + (w - w))
|
||||
if t.dtype.char in ['fFdD']:
|
||||
ytype = t.dtype.char
|
||||
else:
|
||||
ytype = 'd'
|
||||
y = zeros(t.shape, ytype)
|
||||
|
||||
# width must be between 0 and 1 inclusive
|
||||
mask1 = (w > 1) | (w < 0)
|
||||
place(y, mask1, nan)
|
||||
|
||||
# take t modulo 2*pi
|
||||
tmod = mod(t, 2 * pi)
|
||||
|
||||
# on the interval 0 to width*2*pi function is
|
||||
# tmod / (pi*w) - 1
|
||||
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
|
||||
tsub = extract(mask2, tmod)
|
||||
wsub = extract(mask2, w)
|
||||
place(y, mask2, tsub / (pi * wsub) - 1)
|
||||
|
||||
# on the interval width*2*pi to 2*pi function is
|
||||
# (pi*(w+1)-tmod) / (pi*(1-w))
|
||||
|
||||
mask3 = (1 - mask1) & (1 - mask2)
|
||||
tsub = extract(mask3, tmod)
|
||||
wsub = extract(mask3, w)
|
||||
place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub)))
|
||||
return y
|
||||
|
||||
|
||||
def square(t, duty=0.5):
|
||||
"""
|
||||
Return a periodic square-wave waveform.
|
||||
|
||||
The square wave has a period ``2*pi``, has value +1 from 0 to
|
||||
``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in
|
||||
the interval [0,1].
|
||||
|
||||
Note that this is not band-limited. It produces an infinite number
|
||||
of harmonics, which are aliased back and forth across the frequency
|
||||
spectrum.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
t : array_like
|
||||
The input time array.
|
||||
duty : array_like, optional
|
||||
Duty cycle. Default is 0.5 (50% duty cycle).
|
||||
If an array, causes wave shape to change over time, and must be the
|
||||
same length as t.
|
||||
|
||||
Returns
|
||||
-------
|
||||
y : ndarray
|
||||
Output array containing the square waveform.
|
||||
|
||||
Examples
|
||||
--------
|
||||
A 5 Hz waveform sampled at 500 Hz for 1 second:
|
||||
|
||||
>>> from scipy import signal
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> t = np.linspace(0, 1, 500, endpoint=False)
|
||||
>>> plt.plot(t, signal.square(2 * np.pi * 5 * t))
|
||||
>>> plt.ylim(-2, 2)
|
||||
|
||||
A pulse-width modulated sine wave:
|
||||
|
||||
>>> plt.figure()
|
||||
>>> sig = np.sin(2 * np.pi * t)
|
||||
>>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2)
|
||||
>>> plt.subplot(2, 1, 1)
|
||||
>>> plt.plot(t, sig)
|
||||
>>> plt.subplot(2, 1, 2)
|
||||
>>> plt.plot(t, pwm)
|
||||
>>> plt.ylim(-1.5, 1.5)
|
||||
|
||||
"""
|
||||
t, w = asarray(t), asarray(duty)
|
||||
w = asarray(w + (t - t))
|
||||
t = asarray(t + (w - w))
|
||||
if t.dtype.char in ['fFdD']:
|
||||
ytype = t.dtype.char
|
||||
else:
|
||||
ytype = 'd'
|
||||
|
||||
y = zeros(t.shape, ytype)
|
||||
|
||||
# width must be between 0 and 1 inclusive
|
||||
mask1 = (w > 1) | (w < 0)
|
||||
place(y, mask1, nan)
|
||||
|
||||
# on the interval 0 to duty*2*pi function is 1
|
||||
tmod = mod(t, 2 * pi)
|
||||
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
|
||||
place(y, mask2, 1)
|
||||
|
||||
# on the interval duty*2*pi to 2*pi function is
|
||||
# (pi*(w+1)-tmod) / (pi*(1-w))
|
||||
mask3 = (1 - mask1) & (1 - mask2)
|
||||
place(y, mask3, -1)
|
||||
return y
|
||||
|
||||
|
||||
def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False,
|
||||
retenv=False):
|
||||
"""
|
||||
Return a Gaussian modulated sinusoid:
|
||||
|
||||
``exp(-a t^2) exp(1j*2*pi*fc*t).``
|
||||
|
||||
If `retquad` is True, then return the real and imaginary parts
|
||||
(in-phase and quadrature).
|
||||
If `retenv` is True, then return the envelope (unmodulated signal).
|
||||
Otherwise, return the real part of the modulated sinusoid.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
t : ndarray or the string 'cutoff'
|
||||
Input array.
|
||||
fc : float, optional
|
||||
Center frequency (e.g. Hz). Default is 1000.
|
||||
bw : float, optional
|
||||
Fractional bandwidth in frequency domain of pulse (e.g. Hz).
|
||||
Default is 0.5.
|
||||
bwr : float, optional
|
||||
Reference level at which fractional bandwidth is calculated (dB).
|
||||
Default is -6.
|
||||
tpr : float, optional
|
||||
If `t` is 'cutoff', then the function returns the cutoff
|
||||
time for when the pulse amplitude falls below `tpr` (in dB).
|
||||
Default is -60.
|
||||
retquad : bool, optional
|
||||
If True, return the quadrature (imaginary) as well as the real part
|
||||
of the signal. Default is False.
|
||||
retenv : bool, optional
|
||||
If True, return the envelope of the signal. Default is False.
|
||||
|
||||
Returns
|
||||
-------
|
||||
yI : ndarray
|
||||
Real part of signal. Always returned.
|
||||
yQ : ndarray
|
||||
Imaginary part of signal. Only returned if `retquad` is True.
|
||||
yenv : ndarray
|
||||
Envelope of signal. Only returned if `retenv` is True.
|
||||
|
||||
See Also
|
||||
--------
|
||||
scipy.signal.morlet
|
||||
|
||||
Examples
|
||||
--------
|
||||
Plot real component, imaginary component, and envelope for a 5 Hz pulse,
|
||||
sampled at 100 Hz for 2 seconds:
|
||||
|
||||
>>> from scipy import signal
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> t = np.linspace(-1, 1, 2 * 100, endpoint=False)
|
||||
>>> i, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True)
|
||||
>>> plt.plot(t, i, t, q, t, e, '--')
|
||||
|
||||
"""
|
||||
if fc < 0:
|
||||
raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc)
|
||||
if bw <= 0:
|
||||
raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw)
|
||||
if bwr >= 0:
|
||||
raise ValueError("Reference level for bandwidth (bwr=%.2f) must "
|
||||
"be < 0 dB" % bwr)
|
||||
|
||||
# exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
|
||||
|
||||
ref = pow(10.0, bwr / 20.0)
|
||||
# fdel = fc*bw/2: g(fdel) = ref --- solve this for a
|
||||
#
|
||||
# pi^2/a * fc^2 * bw^2 /4=-log(ref)
|
||||
a = -(pi * fc * bw) ** 2 / (4.0 * log(ref))
|
||||
|
||||
if isinstance(t, str):
|
||||
if t == 'cutoff': # compute cut_off point
|
||||
# Solve exp(-a tc**2) = tref for tc
|
||||
# tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
|
||||
if tpr >= 0:
|
||||
raise ValueError("Reference level for time cutoff must "
|
||||
"be < 0 dB")
|
||||
tref = pow(10.0, tpr / 20.0)
|
||||
return sqrt(-log(tref) / a)
|
||||
else:
|
||||
raise ValueError("If `t` is a string, it must be 'cutoff'")
|
||||
|
||||
yenv = exp(-a * t * t)
|
||||
yI = yenv * cos(2 * pi * fc * t)
|
||||
yQ = yenv * sin(2 * pi * fc * t)
|
||||
if not retquad and not retenv:
|
||||
return yI
|
||||
if not retquad and retenv:
|
||||
return yI, yenv
|
||||
if retquad and not retenv:
|
||||
return yI, yQ
|
||||
if retquad and retenv:
|
||||
return yI, yQ, yenv
|
||||
|
||||
|
||||
def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True):
|
||||
"""Frequency-swept cosine generator.
|
||||
|
||||
In the following, 'Hz' should be interpreted as 'cycles per unit';
|
||||
there is no requirement here that the unit is one second. The
|
||||
important distinction is that the units of rotation are cycles, not
|
||||
radians. Likewise, `t` could be a measurement of space instead of time.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
t : array_like
|
||||
Times at which to evaluate the waveform.
|
||||
f0 : float
|
||||
Frequency (e.g. Hz) at time t=0.
|
||||
t1 : float
|
||||
Time at which `f1` is specified.
|
||||
f1 : float
|
||||
Frequency (e.g. Hz) of the waveform at time `t1`.
|
||||
method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional
|
||||
Kind of frequency sweep. If not given, `linear` is assumed. See
|
||||
Notes below for more details.
|
||||
phi : float, optional
|
||||
Phase offset, in degrees. Default is 0.
|
||||
vertex_zero : bool, optional
|
||||
This parameter is only used when `method` is 'quadratic'.
|
||||
It determines whether the vertex of the parabola that is the graph
|
||||
of the frequency is at t=0 or t=t1.
|
||||
|
||||
Returns
|
||||
-------
|
||||
y : ndarray
|
||||
A numpy array containing the signal evaluated at `t` with the
|
||||
requested time-varying frequency. More precisely, the function
|
||||
returns ``cos(phase + (pi/180)*phi)`` where `phase` is the integral
|
||||
(from 0 to `t`) of ``2*pi*f(t)``. ``f(t)`` is defined below.
|
||||
|
||||
See Also
|
||||
--------
|
||||
sweep_poly
|
||||
|
||||
Notes
|
||||
-----
|
||||
There are four options for the `method`. The following formulas give
|
||||
the instantaneous frequency (in Hz) of the signal generated by
|
||||
`chirp()`. For convenience, the shorter names shown below may also be
|
||||
used.
|
||||
|
||||
linear, lin, li:
|
||||
|
||||
``f(t) = f0 + (f1 - f0) * t / t1``
|
||||
|
||||
quadratic, quad, q:
|
||||
|
||||
The graph of the frequency f(t) is a parabola through (0, f0) and
|
||||
(t1, f1). By default, the vertex of the parabola is at (0, f0).
|
||||
If `vertex_zero` is False, then the vertex is at (t1, f1). The
|
||||
formula is:
|
||||
|
||||
if vertex_zero is True:
|
||||
|
||||
``f(t) = f0 + (f1 - f0) * t**2 / t1**2``
|
||||
|
||||
else:
|
||||
|
||||
``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2``
|
||||
|
||||
To use a more general quadratic function, or an arbitrary
|
||||
polynomial, use the function `scipy.signal.sweep_poly`.
|
||||
|
||||
logarithmic, log, lo:
|
||||
|
||||
``f(t) = f0 * (f1/f0)**(t/t1)``
|
||||
|
||||
f0 and f1 must be nonzero and have the same sign.
|
||||
|
||||
This signal is also known as a geometric or exponential chirp.
|
||||
|
||||
hyperbolic, hyp:
|
||||
|
||||
``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)``
|
||||
|
||||
f0 and f1 must be nonzero.
|
||||
|
||||
Examples
|
||||
--------
|
||||
The following will be used in the examples:
|
||||
|
||||
>>> from scipy.signal import chirp, spectrogram
|
||||
>>> import matplotlib.pyplot as plt
|
||||
|
||||
For the first example, we'll plot the waveform for a linear chirp
|
||||
from 6 Hz to 1 Hz over 10 seconds:
|
||||
|
||||
>>> t = np.linspace(0, 10, 1500)
|
||||
>>> w = chirp(t, f0=6, f1=1, t1=10, method='linear')
|
||||
>>> plt.plot(t, w)
|
||||
>>> plt.title("Linear Chirp, f(0)=6, f(10)=1")
|
||||
>>> plt.xlabel('t (sec)')
|
||||
>>> plt.show()
|
||||
|
||||
For the remaining examples, we'll use higher frequency ranges,
|
||||
and demonstrate the result using `scipy.signal.spectrogram`.
|
||||
We'll use a 4 second interval sampled at 7200 Hz.
|
||||
|
||||
>>> fs = 7200
|
||||
>>> T = 4
|
||||
>>> t = np.arange(0, int(T*fs)) / fs
|
||||
|
||||
We'll use this function to plot the spectrogram in each example.
|
||||
|
||||
>>> def plot_spectrogram(title, w, fs):
|
||||
... ff, tt, Sxx = spectrogram(w, fs=fs, nperseg=256, nfft=576)
|
||||
... plt.pcolormesh(tt, ff[:145], Sxx[:145], cmap='gray_r', shading='gouraud')
|
||||
... plt.title(title)
|
||||
... plt.xlabel('t (sec)')
|
||||
... plt.ylabel('Frequency (Hz)')
|
||||
... plt.grid()
|
||||
...
|
||||
|
||||
Quadratic chirp from 1500 Hz to 250 Hz
|
||||
(vertex of the parabolic curve of the frequency is at t=0):
|
||||
|
||||
>>> w = chirp(t, f0=1500, f1=250, t1=T, method='quadratic')
|
||||
>>> plot_spectrogram(f'Quadratic Chirp, f(0)=1500, f({T})=250', w, fs)
|
||||
>>> plt.show()
|
||||
|
||||
Quadratic chirp from 1500 Hz to 250 Hz
|
||||
(vertex of the parabolic curve of the frequency is at t=T):
|
||||
|
||||
>>> w = chirp(t, f0=1500, f1=250, t1=T, method='quadratic',
|
||||
... vertex_zero=False)
|
||||
>>> plot_spectrogram(f'Quadratic Chirp, f(0)=1500, f({T})=250\\n' +
|
||||
... '(vertex_zero=False)', w, fs)
|
||||
>>> plt.show()
|
||||
|
||||
Logarithmic chirp from 1500 Hz to 250 Hz:
|
||||
|
||||
>>> w = chirp(t, f0=1500, f1=250, t1=T, method='logarithmic')
|
||||
>>> plot_spectrogram(f'Logarithmic Chirp, f(0)=1500, f({T})=250', w, fs)
|
||||
>>> plt.show()
|
||||
|
||||
Hyperbolic chirp from 1500 Hz to 250 Hz:
|
||||
|
||||
>>> w = chirp(t, f0=1500, f1=250, t1=T, method='hyperbolic')
|
||||
>>> plot_spectrogram(f'Hyperbolic Chirp, f(0)=1500, f({T})=250', w, fs)
|
||||
>>> plt.show()
|
||||
|
||||
"""
|
||||
# 'phase' is computed in _chirp_phase, to make testing easier.
|
||||
phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero)
|
||||
# Convert phi to radians.
|
||||
phi *= pi / 180
|
||||
return cos(phase + phi)
|
||||
|
||||
|
||||
def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
|
||||
"""
|
||||
Calculate the phase used by `chirp` to generate its output.
|
||||
|
||||
See `chirp` for a description of the arguments.
|
||||
|
||||
"""
|
||||
t = asarray(t)
|
||||
f0 = float(f0)
|
||||
t1 = float(t1)
|
||||
f1 = float(f1)
|
||||
if method in ['linear', 'lin', 'li']:
|
||||
beta = (f1 - f0) / t1
|
||||
phase = 2 * pi * (f0 * t + 0.5 * beta * t * t)
|
||||
|
||||
elif method in ['quadratic', 'quad', 'q']:
|
||||
beta = (f1 - f0) / (t1 ** 2)
|
||||
if vertex_zero:
|
||||
phase = 2 * pi * (f0 * t + beta * t ** 3 / 3)
|
||||
else:
|
||||
phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3)
|
||||
|
||||
elif method in ['logarithmic', 'log', 'lo']:
|
||||
if f0 * f1 <= 0.0:
|
||||
raise ValueError("For a logarithmic chirp, f0 and f1 must be "
|
||||
"nonzero and have the same sign.")
|
||||
if f0 == f1:
|
||||
phase = 2 * pi * f0 * t
|
||||
else:
|
||||
beta = t1 / log(f1 / f0)
|
||||
phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0)
|
||||
|
||||
elif method in ['hyperbolic', 'hyp']:
|
||||
if f0 == 0 or f1 == 0:
|
||||
raise ValueError("For a hyperbolic chirp, f0 and f1 must be "
|
||||
"nonzero.")
|
||||
if f0 == f1:
|
||||
# Degenerate case: constant frequency.
|
||||
phase = 2 * pi * f0 * t
|
||||
else:
|
||||
# Singular point: the instantaneous frequency blows up
|
||||
# when t == sing.
|
||||
sing = -f1 * t1 / (f0 - f1)
|
||||
phase = 2 * pi * (-sing * f0) * log(np.abs(1 - t/sing))
|
||||
|
||||
else:
|
||||
raise ValueError("method must be 'linear', 'quadratic', 'logarithmic',"
|
||||
" or 'hyperbolic', but a value of %r was given."
|
||||
% method)
|
||||
|
||||
return phase
|
||||
|
||||
|
||||
def sweep_poly(t, poly, phi=0):
|
||||
"""
|
||||
Frequency-swept cosine generator, with a time-dependent frequency.
|
||||
|
||||
This function generates a sinusoidal function whose instantaneous
|
||||
frequency varies with time. The frequency at time `t` is given by
|
||||
the polynomial `poly`.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
t : ndarray
|
||||
Times at which to evaluate the waveform.
|
||||
poly : 1-D array_like or instance of numpy.poly1d
|
||||
The desired frequency expressed as a polynomial. If `poly` is
|
||||
a list or ndarray of length n, then the elements of `poly` are
|
||||
the coefficients of the polynomial, and the instantaneous
|
||||
frequency is
|
||||
|
||||
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
|
||||
|
||||
If `poly` is an instance of numpy.poly1d, then the
|
||||
instantaneous frequency is
|
||||
|
||||
``f(t) = poly(t)``
|
||||
|
||||
phi : float, optional
|
||||
Phase offset, in degrees, Default: 0.
|
||||
|
||||
Returns
|
||||
-------
|
||||
sweep_poly : ndarray
|
||||
A numpy array containing the signal evaluated at `t` with the
|
||||
requested time-varying frequency. More precisely, the function
|
||||
returns ``cos(phase + (pi/180)*phi)``, where `phase` is the integral
|
||||
(from 0 to t) of ``2 * pi * f(t)``; ``f(t)`` is defined above.
|
||||
|
||||
See Also
|
||||
--------
|
||||
chirp
|
||||
|
||||
Notes
|
||||
-----
|
||||
.. versionadded:: 0.8.0
|
||||
|
||||
If `poly` is a list or ndarray of length `n`, then the elements of
|
||||
`poly` are the coefficients of the polynomial, and the instantaneous
|
||||
frequency is:
|
||||
|
||||
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
|
||||
|
||||
If `poly` is an instance of `numpy.poly1d`, then the instantaneous
|
||||
frequency is:
|
||||
|
||||
``f(t) = poly(t)``
|
||||
|
||||
Finally, the output `s` is:
|
||||
|
||||
``cos(phase + (pi/180)*phi)``
|
||||
|
||||
where `phase` is the integral from 0 to `t` of ``2 * pi * f(t)``,
|
||||
``f(t)`` as defined above.
|
||||
|
||||
Examples
|
||||
--------
|
||||
Compute the waveform with instantaneous frequency::
|
||||
|
||||
f(t) = 0.025*t**3 - 0.36*t**2 + 1.25*t + 2
|
||||
|
||||
over the interval 0 <= t <= 10.
|
||||
|
||||
>>> from scipy.signal import sweep_poly
|
||||
>>> p = np.poly1d([0.025, -0.36, 1.25, 2.0])
|
||||
>>> t = np.linspace(0, 10, 5001)
|
||||
>>> w = sweep_poly(t, p)
|
||||
|
||||
Plot it:
|
||||
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> plt.subplot(2, 1, 1)
|
||||
>>> plt.plot(t, w)
|
||||
>>> plt.title("Sweep Poly\\nwith frequency " +
|
||||
... "$f(t) = 0.025t^3 - 0.36t^2 + 1.25t + 2$")
|
||||
>>> plt.subplot(2, 1, 2)
|
||||
>>> plt.plot(t, p(t), 'r', label='f(t)')
|
||||
>>> plt.legend()
|
||||
>>> plt.xlabel('t')
|
||||
>>> plt.tight_layout()
|
||||
>>> plt.show()
|
||||
|
||||
"""
|
||||
# 'phase' is computed in _sweep_poly_phase, to make testing easier.
|
||||
phase = _sweep_poly_phase(t, poly)
|
||||
# Convert to radians.
|
||||
phi *= pi / 180
|
||||
return cos(phase + phi)
|
||||
|
||||
|
||||
def _sweep_poly_phase(t, poly):
|
||||
"""
|
||||
Calculate the phase used by sweep_poly to generate its output.
|
||||
|
||||
See `sweep_poly` for a description of the arguments.
|
||||
|
||||
"""
|
||||
# polyint handles lists, ndarrays and instances of poly1d automatically.
|
||||
intpoly = polyint(poly)
|
||||
phase = 2 * pi * polyval(intpoly, t)
|
||||
return phase
|
||||
|
||||
|
||||
def unit_impulse(shape, idx=None, dtype=float):
|
||||
"""
|
||||
Unit impulse signal (discrete delta function) or unit basis vector.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
shape : int or tuple of int
|
||||
Number of samples in the output (1-D), or a tuple that represents the
|
||||
shape of the output (N-D).
|
||||
idx : None or int or tuple of int or 'mid', optional
|
||||
Index at which the value is 1. If None, defaults to the 0th element.
|
||||
If ``idx='mid'``, the impulse will be centered at ``shape // 2`` in
|
||||
all dimensions. If an int, the impulse will be at `idx` in all
|
||||
dimensions.
|
||||
dtype : data-type, optional
|
||||
The desired data-type for the array, e.g., ``numpy.int8``. Default is
|
||||
``numpy.float64``.
|
||||
|
||||
Returns
|
||||
-------
|
||||
y : ndarray
|
||||
Output array containing an impulse signal.
|
||||
|
||||
Notes
|
||||
-----
|
||||
The 1D case is also known as the Kronecker delta.
|
||||
|
||||
.. versionadded:: 0.19.0
|
||||
|
||||
Examples
|
||||
--------
|
||||
An impulse at the 0th element (:math:`\\delta[n]`):
|
||||
|
||||
>>> from scipy import signal
|
||||
>>> signal.unit_impulse(8)
|
||||
array([ 1., 0., 0., 0., 0., 0., 0., 0.])
|
||||
|
||||
Impulse offset by 2 samples (:math:`\\delta[n-2]`):
|
||||
|
||||
>>> signal.unit_impulse(7, 2)
|
||||
array([ 0., 0., 1., 0., 0., 0., 0.])
|
||||
|
||||
2-dimensional impulse, centered:
|
||||
|
||||
>>> signal.unit_impulse((3, 3), 'mid')
|
||||
array([[ 0., 0., 0.],
|
||||
[ 0., 1., 0.],
|
||||
[ 0., 0., 0.]])
|
||||
|
||||
Impulse at (2, 2), using broadcasting:
|
||||
|
||||
>>> signal.unit_impulse((4, 4), 2)
|
||||
array([[ 0., 0., 0., 0.],
|
||||
[ 0., 0., 0., 0.],
|
||||
[ 0., 0., 1., 0.],
|
||||
[ 0., 0., 0., 0.]])
|
||||
|
||||
Plot the impulse response of a 4th-order Butterworth lowpass filter:
|
||||
|
||||
>>> imp = signal.unit_impulse(100, 'mid')
|
||||
>>> b, a = signal.butter(4, 0.2)
|
||||
>>> response = signal.lfilter(b, a, imp)
|
||||
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> plt.plot(np.arange(-50, 50), imp)
|
||||
>>> plt.plot(np.arange(-50, 50), response)
|
||||
>>> plt.margins(0.1, 0.1)
|
||||
>>> plt.xlabel('Time [samples]')
|
||||
>>> plt.ylabel('Amplitude')
|
||||
>>> plt.grid(True)
|
||||
>>> plt.show()
|
||||
|
||||
"""
|
||||
out = zeros(shape, dtype)
|
||||
|
||||
shape = np.atleast_1d(shape)
|
||||
|
||||
if idx is None:
|
||||
idx = (0,) * len(shape)
|
||||
elif idx == 'mid':
|
||||
idx = tuple(shape // 2)
|
||||
elif not hasattr(idx, "__iter__"):
|
||||
idx = (idx,) * len(shape)
|
||||
|
||||
out[idx] = 1
|
||||
return out
|
481
venv/Lib/site-packages/scipy/signal/wavelets.py
Normal file
481
venv/Lib/site-packages/scipy/signal/wavelets.py
Normal file
|
@ -0,0 +1,481 @@
|
|||
import numpy as np
|
||||
from scipy.linalg import eig
|
||||
from scipy.special import comb
|
||||
from scipy.signal import convolve
|
||||
|
||||
__all__ = ['daub', 'qmf', 'cascade', 'morlet', 'ricker', 'morlet2', 'cwt']
|
||||
|
||||
|
||||
def daub(p):
|
||||
"""
|
||||
The coefficients for the FIR low-pass filter producing Daubechies wavelets.
|
||||
|
||||
p>=1 gives the order of the zero at f=1/2.
|
||||
There are 2p filter coefficients.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
p : int
|
||||
Order of the zero at f=1/2, can have values from 1 to 34.
|
||||
|
||||
Returns
|
||||
-------
|
||||
daub : ndarray
|
||||
Return
|
||||
|
||||
"""
|
||||
sqrt = np.sqrt
|
||||
if p < 1:
|
||||
raise ValueError("p must be at least 1.")
|
||||
if p == 1:
|
||||
c = 1 / sqrt(2)
|
||||
return np.array([c, c])
|
||||
elif p == 2:
|
||||
f = sqrt(2) / 8
|
||||
c = sqrt(3)
|
||||
return f * np.array([1 + c, 3 + c, 3 - c, 1 - c])
|
||||
elif p == 3:
|
||||
tmp = 12 * sqrt(10)
|
||||
z1 = 1.5 + sqrt(15 + tmp) / 6 - 1j * (sqrt(15) + sqrt(tmp - 15)) / 6
|
||||
z1c = np.conj(z1)
|
||||
f = sqrt(2) / 8
|
||||
d0 = np.real((1 - z1) * (1 - z1c))
|
||||
a0 = np.real(z1 * z1c)
|
||||
a1 = 2 * np.real(z1)
|
||||
return f / d0 * np.array([a0, 3 * a0 - a1, 3 * a0 - 3 * a1 + 1,
|
||||
a0 - 3 * a1 + 3, 3 - a1, 1])
|
||||
elif p < 35:
|
||||
# construct polynomial and factor it
|
||||
if p < 35:
|
||||
P = [comb(p - 1 + k, k, exact=1) for k in range(p)][::-1]
|
||||
yj = np.roots(P)
|
||||
else: # try different polynomial --- needs work
|
||||
P = [comb(p - 1 + k, k, exact=1) / 4.0**k
|
||||
for k in range(p)][::-1]
|
||||
yj = np.roots(P) / 4
|
||||
# for each root, compute two z roots, select the one with |z|>1
|
||||
# Build up final polynomial
|
||||
c = np.poly1d([1, 1])**p
|
||||
q = np.poly1d([1])
|
||||
for k in range(p - 1):
|
||||
yval = yj[k]
|
||||
part = 2 * sqrt(yval * (yval - 1))
|
||||
const = 1 - 2 * yval
|
||||
z1 = const + part
|
||||
if (abs(z1)) < 1:
|
||||
z1 = const - part
|
||||
q = q * [1, -z1]
|
||||
|
||||
q = c * np.real(q)
|
||||
# Normalize result
|
||||
q = q / np.sum(q) * sqrt(2)
|
||||
return q.c[::-1]
|
||||
else:
|
||||
raise ValueError("Polynomial factorization does not work "
|
||||
"well for p too large.")
|
||||
|
||||
|
||||
def qmf(hk):
|
||||
"""
|
||||
Return high-pass qmf filter from low-pass
|
||||
|
||||
Parameters
|
||||
----------
|
||||
hk : array_like
|
||||
Coefficients of high-pass filter.
|
||||
|
||||
"""
|
||||
N = len(hk) - 1
|
||||
asgn = [{0: 1, 1: -1}[k % 2] for k in range(N + 1)]
|
||||
return hk[::-1] * np.array(asgn)
|
||||
|
||||
|
||||
def cascade(hk, J=7):
|
||||
"""
|
||||
Return (x, phi, psi) at dyadic points ``K/2**J`` from filter coefficients.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
hk : array_like
|
||||
Coefficients of low-pass filter.
|
||||
J : int, optional
|
||||
Values will be computed at grid points ``K/2**J``. Default is 7.
|
||||
|
||||
Returns
|
||||
-------
|
||||
x : ndarray
|
||||
The dyadic points ``K/2**J`` for ``K=0...N * (2**J)-1`` where
|
||||
``len(hk) = len(gk) = N+1``.
|
||||
phi : ndarray
|
||||
The scaling function ``phi(x)`` at `x`:
|
||||
``phi(x) = sum(hk * phi(2x-k))``, where k is from 0 to N.
|
||||
psi : ndarray, optional
|
||||
The wavelet function ``psi(x)`` at `x`:
|
||||
``phi(x) = sum(gk * phi(2x-k))``, where k is from 0 to N.
|
||||
`psi` is only returned if `gk` is not None.
|
||||
|
||||
Notes
|
||||
-----
|
||||
The algorithm uses the vector cascade algorithm described by Strang and
|
||||
Nguyen in "Wavelets and Filter Banks". It builds a dictionary of values
|
||||
and slices for quick reuse. Then inserts vectors into final vector at the
|
||||
end.
|
||||
|
||||
"""
|
||||
N = len(hk) - 1
|
||||
|
||||
if (J > 30 - np.log2(N + 1)):
|
||||
raise ValueError("Too many levels.")
|
||||
if (J < 1):
|
||||
raise ValueError("Too few levels.")
|
||||
|
||||
# construct matrices needed
|
||||
nn, kk = np.ogrid[:N, :N]
|
||||
s2 = np.sqrt(2)
|
||||
# append a zero so that take works
|
||||
thk = np.r_[hk, 0]
|
||||
gk = qmf(hk)
|
||||
tgk = np.r_[gk, 0]
|
||||
|
||||
indx1 = np.clip(2 * nn - kk, -1, N + 1)
|
||||
indx2 = np.clip(2 * nn - kk + 1, -1, N + 1)
|
||||
m = np.zeros((2, 2, N, N), 'd')
|
||||
m[0, 0] = np.take(thk, indx1, 0)
|
||||
m[0, 1] = np.take(thk, indx2, 0)
|
||||
m[1, 0] = np.take(tgk, indx1, 0)
|
||||
m[1, 1] = np.take(tgk, indx2, 0)
|
||||
m *= s2
|
||||
|
||||
# construct the grid of points
|
||||
x = np.arange(0, N * (1 << J), dtype=float) / (1 << J)
|
||||
phi = 0 * x
|
||||
|
||||
psi = 0 * x
|
||||
|
||||
# find phi0, and phi1
|
||||
lam, v = eig(m[0, 0])
|
||||
ind = np.argmin(np.absolute(lam - 1))
|
||||
# a dictionary with a binary representation of the
|
||||
# evaluation points x < 1 -- i.e. position is 0.xxxx
|
||||
v = np.real(v[:, ind])
|
||||
# need scaling function to integrate to 1 so find
|
||||
# eigenvector normalized to sum(v,axis=0)=1
|
||||
sm = np.sum(v)
|
||||
if sm < 0: # need scaling function to integrate to 1
|
||||
v = -v
|
||||
sm = -sm
|
||||
bitdic = {'0': v / sm}
|
||||
bitdic['1'] = np.dot(m[0, 1], bitdic['0'])
|
||||
step = 1 << J
|
||||
phi[::step] = bitdic['0']
|
||||
phi[(1 << (J - 1))::step] = bitdic['1']
|
||||
psi[::step] = np.dot(m[1, 0], bitdic['0'])
|
||||
psi[(1 << (J - 1))::step] = np.dot(m[1, 1], bitdic['0'])
|
||||
# descend down the levels inserting more and more values
|
||||
# into bitdic -- store the values in the correct location once we
|
||||
# have computed them -- stored in the dictionary
|
||||
# for quicker use later.
|
||||
prevkeys = ['1']
|
||||
for level in range(2, J + 1):
|
||||
newkeys = ['%d%s' % (xx, yy) for xx in [0, 1] for yy in prevkeys]
|
||||
fac = 1 << (J - level)
|
||||
for key in newkeys:
|
||||
# convert key to number
|
||||
num = 0
|
||||
for pos in range(level):
|
||||
if key[pos] == '1':
|
||||
num += (1 << (level - 1 - pos))
|
||||
pastphi = bitdic[key[1:]]
|
||||
ii = int(key[0])
|
||||
temp = np.dot(m[0, ii], pastphi)
|
||||
bitdic[key] = temp
|
||||
phi[num * fac::step] = temp
|
||||
psi[num * fac::step] = np.dot(m[1, ii], pastphi)
|
||||
prevkeys = newkeys
|
||||
|
||||
return x, phi, psi
|
||||
|
||||
|
||||
def morlet(M, w=5.0, s=1.0, complete=True):
|
||||
"""
|
||||
Complex Morlet wavelet.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
M : int
|
||||
Length of the wavelet.
|
||||
w : float, optional
|
||||
Omega0. Default is 5
|
||||
s : float, optional
|
||||
Scaling factor, windowed from ``-s*2*pi`` to ``+s*2*pi``. Default is 1.
|
||||
complete : bool, optional
|
||||
Whether to use the complete or the standard version.
|
||||
|
||||
Returns
|
||||
-------
|
||||
morlet : (M,) ndarray
|
||||
|
||||
See Also
|
||||
--------
|
||||
morlet2 : Implementation of Morlet wavelet, compatible with `cwt`.
|
||||
scipy.signal.gausspulse
|
||||
|
||||
Notes
|
||||
-----
|
||||
The standard version::
|
||||
|
||||
pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2))
|
||||
|
||||
This commonly used wavelet is often referred to simply as the
|
||||
Morlet wavelet. Note that this simplified version can cause
|
||||
admissibility problems at low values of `w`.
|
||||
|
||||
The complete version::
|
||||
|
||||
pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2))
|
||||
|
||||
This version has a correction
|
||||
term to improve admissibility. For `w` greater than 5, the
|
||||
correction term is negligible.
|
||||
|
||||
Note that the energy of the return wavelet is not normalised
|
||||
according to `s`.
|
||||
|
||||
The fundamental frequency of this wavelet in Hz is given
|
||||
by ``f = 2*s*w*r / M`` where `r` is the sampling rate.
|
||||
|
||||
Note: This function was created before `cwt` and is not compatible
|
||||
with it.
|
||||
|
||||
"""
|
||||
x = np.linspace(-s * 2 * np.pi, s * 2 * np.pi, M)
|
||||
output = np.exp(1j * w * x)
|
||||
|
||||
if complete:
|
||||
output -= np.exp(-0.5 * (w**2))
|
||||
|
||||
output *= np.exp(-0.5 * (x**2)) * np.pi**(-0.25)
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def ricker(points, a):
|
||||
"""
|
||||
Return a Ricker wavelet, also known as the "Mexican hat wavelet".
|
||||
|
||||
It models the function:
|
||||
|
||||
``A * (1 - (x/a)**2) * exp(-0.5*(x/a)**2)``,
|
||||
|
||||
where ``A = 2/(sqrt(3*a)*(pi**0.25))``.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
points : int
|
||||
Number of points in `vector`.
|
||||
Will be centered around 0.
|
||||
a : scalar
|
||||
Width parameter of the wavelet.
|
||||
|
||||
Returns
|
||||
-------
|
||||
vector : (N,) ndarray
|
||||
Array of length `points` in shape of ricker curve.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy import signal
|
||||
>>> import matplotlib.pyplot as plt
|
||||
|
||||
>>> points = 100
|
||||
>>> a = 4.0
|
||||
>>> vec2 = signal.ricker(points, a)
|
||||
>>> print(len(vec2))
|
||||
100
|
||||
>>> plt.plot(vec2)
|
||||
>>> plt.show()
|
||||
|
||||
"""
|
||||
A = 2 / (np.sqrt(3 * a) * (np.pi**0.25))
|
||||
wsq = a**2
|
||||
vec = np.arange(0, points) - (points - 1.0) / 2
|
||||
xsq = vec**2
|
||||
mod = (1 - xsq / wsq)
|
||||
gauss = np.exp(-xsq / (2 * wsq))
|
||||
total = A * mod * gauss
|
||||
return total
|
||||
|
||||
|
||||
def morlet2(M, s, w=5):
|
||||
"""
|
||||
Complex Morlet wavelet, designed to work with `cwt`.
|
||||
|
||||
Returns the complete version of morlet wavelet, normalised
|
||||
according to `s`::
|
||||
|
||||
exp(1j*w*x/s) * exp(-0.5*(x/s)**2) * pi**(-0.25) * sqrt(1/s)
|
||||
|
||||
Parameters
|
||||
----------
|
||||
M : int
|
||||
Length of the wavelet.
|
||||
s : float
|
||||
Width parameter of the wavelet.
|
||||
w : float, optional
|
||||
Omega0. Default is 5
|
||||
|
||||
Returns
|
||||
-------
|
||||
morlet : (M,) ndarray
|
||||
|
||||
See Also
|
||||
--------
|
||||
morlet : Implementation of Morlet wavelet, incompatible with `cwt`
|
||||
|
||||
Notes
|
||||
-----
|
||||
|
||||
.. versionadded:: 1.4.0
|
||||
|
||||
This function was designed to work with `cwt`. Because `morlet2`
|
||||
returns an array of complex numbers, the `dtype` argument of `cwt`
|
||||
should be set to `complex128` for best results.
|
||||
|
||||
Note the difference in implementation with `morlet`.
|
||||
The fundamental frequency of this wavelet in Hz is given by::
|
||||
|
||||
f = w*fs / (2*s*np.pi)
|
||||
|
||||
where ``fs`` is the sampling rate and `s` is the wavelet width parameter.
|
||||
Similarly we can get the wavelet width parameter at ``f``::
|
||||
|
||||
s = w*fs / (2*f*np.pi)
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy import signal
|
||||
>>> import matplotlib.pyplot as plt
|
||||
|
||||
>>> M = 100
|
||||
>>> s = 4.0
|
||||
>>> w = 2.0
|
||||
>>> wavelet = signal.morlet2(M, s, w)
|
||||
>>> plt.plot(abs(wavelet))
|
||||
>>> plt.show()
|
||||
|
||||
This example shows basic use of `morlet2` with `cwt` in time-frequency
|
||||
analysis:
|
||||
|
||||
>>> from scipy import signal
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> t, dt = np.linspace(0, 1, 200, retstep=True)
|
||||
>>> fs = 1/dt
|
||||
>>> w = 6.
|
||||
>>> sig = np.cos(2*np.pi*(50 + 10*t)*t) + np.sin(40*np.pi*t)
|
||||
>>> freq = np.linspace(1, fs/2, 100)
|
||||
>>> widths = w*fs / (2*freq*np.pi)
|
||||
>>> cwtm = signal.cwt(sig, signal.morlet2, widths, w=w)
|
||||
>>> plt.pcolormesh(t, freq, np.abs(cwtm), cmap='viridis', shading='gouraud')
|
||||
>>> plt.show()
|
||||
|
||||
"""
|
||||
x = np.arange(0, M) - (M - 1.0) / 2
|
||||
x = x / s
|
||||
wavelet = np.exp(1j * w * x) * np.exp(-0.5 * x**2) * np.pi**(-0.25)
|
||||
output = np.sqrt(1/s) * wavelet
|
||||
return output
|
||||
|
||||
|
||||
def cwt(data, wavelet, widths, dtype=None, **kwargs):
|
||||
"""
|
||||
Continuous wavelet transform.
|
||||
|
||||
Performs a continuous wavelet transform on `data`,
|
||||
using the `wavelet` function. A CWT performs a convolution
|
||||
with `data` using the `wavelet` function, which is characterized
|
||||
by a width parameter and length parameter. The `wavelet` function
|
||||
is allowed to be complex.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
data : (N,) ndarray
|
||||
data on which to perform the transform.
|
||||
wavelet : function
|
||||
Wavelet function, which should take 2 arguments.
|
||||
The first argument is the number of points that the returned vector
|
||||
will have (len(wavelet(length,width)) == length).
|
||||
The second is a width parameter, defining the size of the wavelet
|
||||
(e.g. standard deviation of a gaussian). See `ricker`, which
|
||||
satisfies these requirements.
|
||||
widths : (M,) sequence
|
||||
Widths to use for transform.
|
||||
dtype : data-type, optional
|
||||
The desired data type of output. Defaults to ``float64`` if the
|
||||
output of `wavelet` is real and ``complex128`` if it is complex.
|
||||
|
||||
.. versionadded:: 1.4.0
|
||||
|
||||
kwargs
|
||||
Keyword arguments passed to wavelet function.
|
||||
|
||||
.. versionadded:: 1.4.0
|
||||
|
||||
Returns
|
||||
-------
|
||||
cwt: (M, N) ndarray
|
||||
Will have shape of (len(widths), len(data)).
|
||||
|
||||
Notes
|
||||
-----
|
||||
|
||||
.. versionadded:: 1.4.0
|
||||
|
||||
For non-symmetric, complex-valued wavelets, the input signal is convolved
|
||||
with the time-reversed complex-conjugate of the wavelet data [1].
|
||||
|
||||
::
|
||||
|
||||
length = min(10 * width[ii], len(data))
|
||||
cwt[ii,:] = signal.convolve(data, np.conj(wavelet(length, width[ii],
|
||||
**kwargs))[::-1], mode='same')
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] S. Mallat, "A Wavelet Tour of Signal Processing (3rd Edition)",
|
||||
Academic Press, 2009.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy import signal
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> t = np.linspace(-1, 1, 200, endpoint=False)
|
||||
>>> sig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2)
|
||||
>>> widths = np.arange(1, 31)
|
||||
>>> cwtmatr = signal.cwt(sig, signal.ricker, widths)
|
||||
>>> plt.imshow(cwtmatr, extent=[-1, 1, 1, 31], cmap='PRGn', aspect='auto',
|
||||
... vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
|
||||
>>> plt.show()
|
||||
"""
|
||||
if wavelet == ricker:
|
||||
window_size = kwargs.pop('window_size', None)
|
||||
# Determine output type
|
||||
if dtype is None:
|
||||
if np.asarray(wavelet(1, widths[0], **kwargs)).dtype.char in 'FDG':
|
||||
dtype = np.complex128
|
||||
else:
|
||||
dtype = np.float64
|
||||
|
||||
output = np.zeros((len(widths), len(data)), dtype=dtype)
|
||||
for ind, width in enumerate(widths):
|
||||
N = np.min([10 * width, len(data)])
|
||||
# the conditional block below and the window_size
|
||||
# kwarg pop above may be removed eventually; these
|
||||
# are shims for 32-bit arch + NumPy <= 1.14.5 to
|
||||
# address gh-11095
|
||||
if wavelet == ricker and window_size is None:
|
||||
ceil = np.ceil(N)
|
||||
if ceil != N:
|
||||
N = int(N)
|
||||
wavelet_data = np.conj(wavelet(N, width, **kwargs)[::-1])
|
||||
output[ind] = convolve(data, wavelet_data, mode='same')
|
||||
return output
|
47
venv/Lib/site-packages/scipy/signal/windows/__init__.py
Normal file
47
venv/Lib/site-packages/scipy/signal/windows/__init__.py
Normal file
|
@ -0,0 +1,47 @@
|
|||
"""
|
||||
Window functions (:mod:`scipy.signal.windows`)
|
||||
==============================================
|
||||
|
||||
The suite of window functions for filtering and spectral estimation.
|
||||
|
||||
.. currentmodule:: scipy.signal.windows
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated/
|
||||
|
||||
get_window -- Return a window of a given length and type.
|
||||
|
||||
barthann -- Bartlett-Hann window
|
||||
bartlett -- Bartlett window
|
||||
blackman -- Blackman window
|
||||
blackmanharris -- Minimum 4-term Blackman-Harris window
|
||||
bohman -- Bohman window
|
||||
boxcar -- Boxcar window
|
||||
chebwin -- Dolph-Chebyshev window
|
||||
cosine -- Cosine window
|
||||
dpss -- Discrete prolate spheroidal sequences
|
||||
exponential -- Exponential window
|
||||
flattop -- Flat top window
|
||||
gaussian -- Gaussian window
|
||||
general_cosine -- Generalized Cosine window
|
||||
general_gaussian -- Generalized Gaussian window
|
||||
general_hamming -- Generalized Hamming window
|
||||
hamming -- Hamming window
|
||||
hann -- Hann window
|
||||
hanning -- Hann window
|
||||
kaiser -- Kaiser window
|
||||
nuttall -- Nuttall's minimum 4-term Blackman-Harris window
|
||||
parzen -- Parzen window
|
||||
slepian -- Slepian window
|
||||
triang -- Triangular window
|
||||
tukey -- Tukey window
|
||||
|
||||
"""
|
||||
|
||||
from .windows import *
|
||||
|
||||
__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall',
|
||||
'blackmanharris', 'flattop', 'bartlett', 'hanning', 'barthann',
|
||||
'hamming', 'kaiser', 'gaussian', 'general_gaussian', 'general_cosine',
|
||||
'general_hamming', 'chebwin', 'slepian', 'cosine', 'hann',
|
||||
'exponential', 'tukey', 'get_window', 'dpss']
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
9
venv/Lib/site-packages/scipy/signal/windows/setup.py
Normal file
9
venv/Lib/site-packages/scipy/signal/windows/setup.py
Normal file
|
@ -0,0 +1,9 @@
|
|||
|
||||
def configuration(parent_package='', top_path=None):
|
||||
from numpy.distutils.misc_util import Configuration
|
||||
|
||||
config = Configuration('windows', parent_package, top_path)
|
||||
|
||||
config.add_data_dir('tests')
|
||||
|
||||
return config
|
2121
venv/Lib/site-packages/scipy/signal/windows/windows.py
Normal file
2121
venv/Lib/site-packages/scipy/signal/windows/windows.py
Normal file
File diff suppressed because it is too large
Load diff
Loading…
Add table
Add a link
Reference in a new issue