Fixed database typo and removed unnecessary class identifier.

This commit is contained in:
Batuhan Berk Başoğlu 2020-10-14 10:10:37 -04:00
parent 00ad49a143
commit 45fb349a7d
5098 changed files with 952558 additions and 85 deletions

View file

@ -0,0 +1,163 @@
"""
=========================================================
Multidimensional image processing (:mod:`scipy.ndimage`)
=========================================================
.. currentmodule:: scipy.ndimage
This package contains various functions for multidimensional image
processing.
Filters
=======
.. autosummary::
:toctree: generated/
convolve - Multidimensional convolution
convolve1d - 1-D convolution along the given axis
correlate - Multidimensional correlation
correlate1d - 1-D correlation along the given axis
gaussian_filter
gaussian_filter1d
gaussian_gradient_magnitude
gaussian_laplace
generic_filter - Multidimensional filter using a given function
generic_filter1d - 1-D generic filter along the given axis
generic_gradient_magnitude
generic_laplace
laplace - N-D Laplace filter based on approximate second derivatives
maximum_filter
maximum_filter1d
median_filter - Calculates a multidimensional median filter
minimum_filter
minimum_filter1d
percentile_filter - Calculates a multidimensional percentile filter
prewitt
rank_filter - Calculates a multidimensional rank filter
sobel
uniform_filter - Multidimensional uniform filter
uniform_filter1d - 1-D uniform filter along the given axis
Fourier filters
===============
.. autosummary::
:toctree: generated/
fourier_ellipsoid
fourier_gaussian
fourier_shift
fourier_uniform
Interpolation
=============
.. autosummary::
:toctree: generated/
affine_transform - Apply an affine transformation
geometric_transform - Apply an arbritrary geometric transform
map_coordinates - Map input array to new coordinates by interpolation
rotate - Rotate an array
shift - Shift an array
spline_filter
spline_filter1d
zoom - Zoom an array
Measurements
============
.. autosummary::
:toctree: generated/
center_of_mass - The center of mass of the values of an array at labels
extrema - Min's and max's of an array at labels, with their positions
find_objects - Find objects in a labeled array
histogram - Histogram of the values of an array, optionally at labels
label - Label features in an array
labeled_comprehension
maximum
maximum_position
mean - Mean of the values of an array at labels
median
minimum
minimum_position
standard_deviation - Standard deviation of an N-D image array
sum - Sum of the values of the array
variance - Variance of the values of an N-D image array
watershed_ift
Morphology
==========
.. autosummary::
:toctree: generated/
binary_closing
binary_dilation
binary_erosion
binary_fill_holes
binary_hit_or_miss
binary_opening
binary_propagation
black_tophat
distance_transform_bf
distance_transform_cdt
distance_transform_edt
generate_binary_structure
grey_closing
grey_dilation
grey_erosion
grey_opening
iterate_structure
morphological_gradient
morphological_laplace
white_tophat
"""
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .filters import *
from .fourier import *
from .interpolation import *
from .measurements import *
from .morphology import *
__version__ = '2.0'
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester

View file

@ -0,0 +1,128 @@
"""Docstring components common to several ndimage functions."""
from scipy._lib import doccer
__all__ = ['docfiller']
_input_doc = (
"""input : array_like
The input array.""")
_axis_doc = (
"""axis : int, optional
The axis of `input` along which to calculate. Default is -1.""")
_output_doc = (
"""output : array or dtype, optional
The array in which to place the output, or the dtype of the
returned array. By default an array of the same dtype as input
will be created.""")
_size_foot_doc = (
"""size : scalar or tuple, optional
See footprint, below. Ignored if footprint is given.
footprint : array, optional
Either `size` or `footprint` must be defined. `size` gives
the shape that is taken from the input array, at every element
position, to define the input to the filter function.
`footprint` is a boolean array that specifies (implicitly) a
shape, but also which of the elements within this shape will get
passed to the filter function. Thus ``size=(n,m)`` is equivalent
to ``footprint=np.ones((n,m))``. We adjust `size` to the number
of dimensions of the input array, so that, if the input array is
shape (10,10,10), and `size` is 2, then the actual size used is
(2,2,2). When `footprint` is given, `size` is ignored.""")
_mode_doc = (
"""mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the input array is extended
beyond its boundaries. Default is 'reflect'. Behavior for each valid
value is as follows:
'reflect' (`d c b a | a b c d | d c b a`)
The input is extended by reflecting about the edge of the last
pixel.
'constant' (`k k k k | a b c d | k k k k`)
The input is extended by filling all values beyond the edge with
the same constant value, defined by the `cval` parameter.
'nearest' (`a a a a | a b c d | d d d d`)
The input is extended by replicating the last pixel.
'mirror' (`d c b | a b c d | c b a`)
The input is extended by reflecting about the center of the last
pixel.
'wrap' (`a b c d | a b c d | a b c d`)
The input is extended by wrapping around to the opposite edge.""")
_mode_multiple_doc = (
"""mode : str or sequence, optional
The `mode` parameter determines how the input array is extended
when the filter overlaps a border. By passing a sequence of modes
with length equal to the number of dimensions of the input array,
different modes can be specified along each axis. Default value is
'reflect'. The valid values and their behavior is as follows:
'reflect' (`d c b a | a b c d | d c b a`)
The input is extended by reflecting about the edge of the last
pixel.
'constant' (`k k k k | a b c d | k k k k`)
The input is extended by filling all values beyond the edge with
the same constant value, defined by the `cval` parameter.
'nearest' (`a a a a | a b c d | d d d d`)
The input is extended by replicating the last pixel.
'mirror' (`d c b | a b c d | c b a`)
The input is extended by reflecting about the center of the last
pixel.
'wrap' (`a b c d | a b c d | a b c d`)
The input is extended by wrapping around to the opposite edge.""")
_cval_doc = (
"""cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0.""")
_origin_doc = (
"""origin : int, optional
Controls the placement of the filter on the input array's pixels.
A value of 0 (the default) centers the filter over the pixel, with
positive values shifting the filter to the left, and negative ones
to the right.""")
_origin_multiple_doc = (
"""origin : int or sequence, optional
Controls the placement of the filter on the input array's pixels.
A value of 0 (the default) centers the filter over the pixel, with
positive values shifting the filter to the left, and negative ones
to the right. By passing a sequence of origins with length equal to
the number of dimensions of the input array, different shifts can
be specified along each axis.""")
_extra_arguments_doc = (
"""extra_arguments : sequence, optional
Sequence of extra positional arguments to pass to passed function.""")
_extra_keywords_doc = (
"""extra_keywords : dict, optional
dict of extra keyword arguments to pass to passed function.""")
_prefilter_doc = (
"""prefilter : bool, optional
Determines if the input array is prefiltered with `spline_filter`
before interpolation. The default is True, which will create a
temporary `float64` array of filtered values if `order > 1`. If
setting this to False, the output will be slightly blurred if
`order > 1`, unless the input is prefiltered, i.e. it is the result
of calling `spline_filter` on the original input.""")
docdict = {
'input': _input_doc,
'axis': _axis_doc,
'output': _output_doc,
'size_foot': _size_foot_doc,
'mode': _mode_doc,
'mode_multiple': _mode_multiple_doc,
'cval': _cval_doc,
'origin': _origin_doc,
'origin_multiple': _origin_multiple_doc,
'extra_arguments': _extra_arguments_doc,
'extra_keywords': _extra_keywords_doc,
'prefilter': _prefilter_doc
}
docfiller = doccer.filldoc(docdict)

View file

@ -0,0 +1,81 @@
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from collections.abc import Iterable
import numpy
def _extend_mode_to_code(mode):
"""Convert an extension mode to the corresponding integer code.
"""
if mode == 'nearest':
return 0
elif mode == 'wrap':
return 1
elif mode == 'reflect':
return 2
elif mode == 'mirror':
return 3
elif mode == 'constant':
return 4
else:
raise RuntimeError('boundary mode not supported')
def _normalize_sequence(input, rank):
"""If input is a scalar, create a sequence of length equal to the
rank by duplicating the input. If input is a sequence,
check if its length is equal to the length of array.
"""
is_str = isinstance(input, str)
if not is_str and isinstance(input, Iterable):
normalized = list(input)
if len(normalized) != rank:
err = "sequence argument must have length equal to input rank"
raise RuntimeError(err)
else:
normalized = [input] * rank
return normalized
def _get_output(output, input, shape=None):
if shape is None:
shape = input.shape
if output is None:
output = numpy.zeros(shape, dtype=input.dtype.name)
elif isinstance(output, (type, numpy.dtype)):
# Classes (like `np.float32`) and dtypes are interpreted as dtype
output = numpy.zeros(shape, dtype=output)
elif isinstance(output, str):
output = numpy.typeDict[output]
output = numpy.zeros(shape, dtype=output)
elif output.shape != shape:
raise RuntimeError("output shape not correct")
return output

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,305 @@
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy
from numpy.core.multiarray import normalize_axis_index
from . import _ni_support
from . import _nd_image
__all__ = ['fourier_gaussian', 'fourier_uniform', 'fourier_ellipsoid',
'fourier_shift']
def _get_output_fourier(output, input):
if output is None:
if input.dtype.type in [numpy.complex64, numpy.complex128,
numpy.float32]:
output = numpy.zeros(input.shape, dtype=input.dtype)
else:
output = numpy.zeros(input.shape, dtype=numpy.float64)
elif type(output) is type:
if output not in [numpy.complex64, numpy.complex128,
numpy.float32, numpy.float64]:
raise RuntimeError("output type not supported")
output = numpy.zeros(input.shape, dtype=output)
elif output.shape != input.shape:
raise RuntimeError("output shape not correct")
return output
def _get_output_fourier_complex(output, input):
if output is None:
if input.dtype.type in [numpy.complex64, numpy.complex128]:
output = numpy.zeros(input.shape, dtype=input.dtype)
else:
output = numpy.zeros(input.shape, dtype=numpy.complex128)
elif type(output) is type:
if output not in [numpy.complex64, numpy.complex128]:
raise RuntimeError("output type not supported")
output = numpy.zeros(input.shape, dtype=output)
elif output.shape != input.shape:
raise RuntimeError("output shape not correct")
return output
def fourier_gaussian(input, sigma, n=-1, axis=-1, output=None):
"""
Multidimensional Gaussian fourier filter.
The array is multiplied with the fourier transform of a Gaussian
kernel.
Parameters
----------
input : array_like
The input array.
sigma : float or sequence
The sigma of the Gaussian kernel. If a float, `sigma` is the same for
all axes. If a sequence, `sigma` has to contain one value for each
axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : ndarray, optional
If given, the result of filtering the input is placed in this array.
None is returned in this case.
Returns
-------
fourier_gaussian : ndarray
The filtered input.
Examples
--------
>>> from scipy import ndimage, misc
>>> import numpy.fft
>>> import matplotlib.pyplot as plt
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> input_ = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_gaussian(input_, sigma=4)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result.real) # the imaginary part is an artifact
>>> plt.show()
"""
input = numpy.asarray(input)
output = _get_output_fourier(output, input)
axis = normalize_axis_index(axis, input.ndim)
sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
sigmas = numpy.asarray(sigmas, dtype=numpy.float64)
if not sigmas.flags.contiguous:
sigmas = sigmas.copy()
_nd_image.fourier_filter(input, sigmas, n, axis, output, 0)
return output
def fourier_uniform(input, size, n=-1, axis=-1, output=None):
"""
Multidimensional uniform fourier filter.
The array is multiplied with the Fourier transform of a box of given
size.
Parameters
----------
input : array_like
The input array.
size : float or sequence
The size of the box used for filtering.
If a float, `size` is the same for all axes. If a sequence, `size` has
to contain one value for each axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : ndarray, optional
If given, the result of filtering the input is placed in this array.
None is returned in this case.
Returns
-------
fourier_uniform : ndarray
The filtered input.
Examples
--------
>>> from scipy import ndimage, misc
>>> import numpy.fft
>>> import matplotlib.pyplot as plt
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> input_ = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_uniform(input_, size=20)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result.real) # the imaginary part is an artifact
>>> plt.show()
"""
input = numpy.asarray(input)
output = _get_output_fourier(output, input)
axis = normalize_axis_index(axis, input.ndim)
sizes = _ni_support._normalize_sequence(size, input.ndim)
sizes = numpy.asarray(sizes, dtype=numpy.float64)
if not sizes.flags.contiguous:
sizes = sizes.copy()
_nd_image.fourier_filter(input, sizes, n, axis, output, 1)
return output
def fourier_ellipsoid(input, size, n=-1, axis=-1, output=None):
"""
Multidimensional ellipsoid Fourier filter.
The array is multiplied with the fourier transform of a ellipsoid of
given sizes.
Parameters
----------
input : array_like
The input array.
size : float or sequence
The size of the box used for filtering.
If a float, `size` is the same for all axes. If a sequence, `size` has
to contain one value for each axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : ndarray, optional
If given, the result of filtering the input is placed in this array.
None is returned in this case.
Returns
-------
fourier_ellipsoid : ndarray
The filtered input.
Notes
-----
This function is implemented for arrays of rank 1, 2, or 3.
Examples
--------
>>> from scipy import ndimage, misc
>>> import numpy.fft
>>> import matplotlib.pyplot as plt
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> input_ = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_ellipsoid(input_, size=20)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result.real) # the imaginary part is an artifact
>>> plt.show()
"""
input = numpy.asarray(input)
output = _get_output_fourier(output, input)
axis = normalize_axis_index(axis, input.ndim)
sizes = _ni_support._normalize_sequence(size, input.ndim)
sizes = numpy.asarray(sizes, dtype=numpy.float64)
if not sizes.flags.contiguous:
sizes = sizes.copy()
_nd_image.fourier_filter(input, sizes, n, axis, output, 2)
return output
def fourier_shift(input, shift, n=-1, axis=-1, output=None):
"""
Multidimensional Fourier shift filter.
The array is multiplied with the Fourier transform of a shift operation.
Parameters
----------
input : array_like
The input array.
shift : float or sequence
The size of the box used for filtering.
If a float, `shift` is the same for all axes. If a sequence, `shift`
has to contain one value for each axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : ndarray, optional
If given, the result of shifting the input is placed in this array.
None is returned in this case.
Returns
-------
fourier_shift : ndarray
The shifted input.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> import numpy.fft
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> input_ = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_shift(input_, shift=200)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result.real) # the imaginary part is an artifact
>>> plt.show()
"""
input = numpy.asarray(input)
output = _get_output_fourier_complex(output, input)
axis = normalize_axis_index(axis, input.ndim)
shifts = _ni_support._normalize_sequence(shift, input.ndim)
shifts = numpy.asarray(shifts, dtype=numpy.float64)
if not shifts.flags.contiguous:
shifts = shifts.copy()
_nd_image.fourier_shift(input, shifts, n, axis, output)
return output

View file

@ -0,0 +1,787 @@
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import itertools
import warnings
import numpy
from numpy.core.multiarray import normalize_axis_index
from . import _ni_support
from . import _nd_image
from ._ni_docstrings import docdict
from scipy._lib import doccer
# Change the default 'reflect' to 'constant' via modifying a copy of docdict
docdict_copy = docdict.copy()
del docdict
docdict_copy['mode'] = docdict_copy['mode'].replace("Default is 'reflect'",
"Default is 'constant'")
docfiller = doccer.filldoc(docdict_copy)
__all__ = ['spline_filter1d', 'spline_filter', 'geometric_transform',
'map_coordinates', 'affine_transform', 'shift', 'zoom', 'rotate']
@docfiller
def spline_filter1d(input, order=3, axis=-1, output=numpy.float64,
mode='mirror'):
"""
Calculate a 1-D spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
Parameters
----------
%(input)s
order : int, optional
The order of the spline, default is 3.
axis : int, optional
The axis along which the spline filter is applied. Default is the last
axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array. Default is ``numpy.float64``.
%(mode)s
Returns
-------
spline_filter1d : ndarray
The filtered input.
Notes
-----
All functions in `ndimage.interpolation` do spline interpolation of
the input image. If using B-splines of `order > 1`, the input image
values have to be converted to B-spline coefficients first, which is
done by applying this 1-D filter sequentially along all
axes of the input. All functions that require B-spline coefficients
will automatically filter their inputs, a behavior controllable with
the `prefilter` keyword argument. For functions that accept a `mode`
parameter, the result will only be correct if it matches the `mode`
used when filtering.
See Also
--------
spline_filter : Multidimensional spline filter.
Examples
--------
We can filter an image using 1-D spline along the given axis:
>>> from scipy.ndimage import spline_filter1d
>>> import matplotlib.pyplot as plt
>>> orig_img = np.eye(20) # create an image
>>> orig_img[10, :] = 1.0
>>> sp_filter_axis_0 = spline_filter1d(orig_img, axis=0)
>>> sp_filter_axis_1 = spline_filter1d(orig_img, axis=1)
>>> f, ax = plt.subplots(1, 3, sharex=True)
>>> for ind, data in enumerate([[orig_img, "original image"],
... [sp_filter_axis_0, "spline filter (axis=0)"],
... [sp_filter_axis_1, "spline filter (axis=1)"]]):
... ax[ind].imshow(data[0], cmap='gray_r')
... ax[ind].set_title(data[1])
>>> plt.tight_layout()
>>> plt.show()
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output = _ni_support._get_output(output, input)
if order in [0, 1]:
output[...] = numpy.array(input)
else:
mode = _ni_support._extend_mode_to_code(mode)
axis = normalize_axis_index(axis, input.ndim)
_nd_image.spline_filter1d(input, order, axis, output, mode)
return output
def spline_filter(input, order=3, output=numpy.float64, mode='mirror'):
"""
Multidimensional spline filter.
For more details, see `spline_filter1d`.
See Also
--------
spline_filter1d : Calculate a 1-D spline filter along the given axis.
Notes
-----
The multidimensional filter is implemented as a sequence of
1-D spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
Examples
--------
We can filter an image using multidimentional splines:
>>> from scipy.ndimage import spline_filter
>>> import matplotlib.pyplot as plt
>>> orig_img = np.eye(20) # create an image
>>> orig_img[10, :] = 1.0
>>> sp_filter = spline_filter(orig_img, order=3)
>>> f, ax = plt.subplots(1, 2, sharex=True)
>>> for ind, data in enumerate([[orig_img, "original image"],
... [sp_filter, "spline filter"]]):
... ax[ind].imshow(data[0], cmap='gray_r')
... ax[ind].set_title(data[1])
>>> plt.tight_layout()
>>> plt.show()
"""
if order < 2 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output = _ni_support._get_output(output, input)
if order not in [0, 1] and input.ndim > 0:
for axis in range(input.ndim):
spline_filter1d(input, order, axis, output=output, mode=mode)
input = output
else:
output[...] = input[...]
return output
@docfiller
def geometric_transform(input, mapping, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True,
extra_arguments=(), extra_keywords={}):
"""
Apply an arbitrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
Parameters
----------
%(input)s
mapping : {callable, scipy.LowLevelCallable}
A callable object that accepts a tuple of length equal to the output
array rank, and returns the corresponding input coordinates as a tuple
of length equal to the input array rank.
output_shape : tuple of ints, optional
Shape tuple.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode)s
%(cval)s
%(prefilter)s
extra_arguments : tuple, optional
Extra arguments passed to `mapping`.
extra_keywords : dict, optional
Extra keywords passed to `mapping`.
Returns
-------
output : ndarray
The filtered input.
See Also
--------
map_coordinates, affine_transform, spline_filter1d
Notes
-----
This function also accepts low-level callback functions with one
the following signatures and wrapped in `scipy.LowLevelCallable`:
.. code:: c
int mapping(npy_intp *output_coordinates, double *input_coordinates,
int output_rank, int input_rank, void *user_data)
int mapping(intptr_t *output_coordinates, double *input_coordinates,
int output_rank, int input_rank, void *user_data)
The calling function iterates over the elements of the output array,
calling the callback function at each element. The coordinates of the
current output element are passed through ``output_coordinates``. The
callback function must return the coordinates at which the input must
be interpolated in ``input_coordinates``. The rank of the input and
output arrays are given by ``input_rank`` and ``output_rank``
respectively. ``user_data`` is the data pointer provided
to `scipy.LowLevelCallable` as-is.
The callback function must return an integer error status that is zero
if something went wrong and one otherwise. If an error occurs, you should
normally set the Python error status with an informative message
before returning, otherwise a default error message is set by the
calling function.
In addition, some other low-level function pointer specifications
are accepted, but these are for backward compatibility only and should
not be used in new code.
Examples
--------
>>> import numpy as np
>>> from scipy.ndimage import geometric_transform
>>> a = np.arange(12.).reshape((4, 3))
>>> def shift_func(output_coords):
... return (output_coords[0] - 0.5, output_coords[1] - 0.5)
...
>>> geometric_transform(a, shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.362, 2.738],
[ 0. , 4.812, 6.187],
[ 0. , 8.263, 9.637]])
>>> b = [1, 2, 3, 4, 5]
>>> def shift_func(output_coords):
... return (output_coords[0] - 3,)
...
>>> geometric_transform(b, shift_func, mode='constant')
array([0, 0, 0, 1, 2])
>>> geometric_transform(b, shift_func, mode='nearest')
array([1, 1, 1, 1, 2])
>>> geometric_transform(b, shift_func, mode='reflect')
array([3, 2, 1, 1, 2])
>>> geometric_transform(b, shift_func, mode='wrap')
array([2, 3, 4, 1, 2])
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _ni_support._extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
output = _ni_support._get_output(output, input, shape=output_shape)
_nd_image.geometric_transform(filtered, mapping, None, None, None, output,
order, mode, cval, extra_arguments,
extra_keywords)
return output
@docfiller
def map_coordinates(input, coordinates, output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input. The value of the input at
those coordinates is determined by spline interpolation of the
requested order.
The shape of the output is derived from that of the coordinate
array by dropping the first axis. The values of the array along
the first axis are the coordinates in the input array at which the
output value is found.
Parameters
----------
%(input)s
coordinates : array_like
The coordinates at which `input` is evaluated.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode)s
%(cval)s
%(prefilter)s
Returns
-------
map_coordinates : ndarray
The result of transforming the input. The shape of the output is
derived from that of `coordinates` by dropping the first axis.
See Also
--------
spline_filter, geometric_transform, scipy.interpolate
Examples
--------
>>> from scipy import ndimage
>>> a = np.arange(12.).reshape((4, 3))
>>> a
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
>>> ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
array([ 2., 7.])
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
a[2, 1] is output[1].
>>> inds = np.array([[0.5, 2], [0.5, 4]])
>>> ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
array([ 2. , -33.3])
>>> ndimage.map_coordinates(a, inds, order=1, mode='nearest')
array([ 2., 8.])
>>> ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
array([ True, False], dtype=bool)
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
coordinates = numpy.asarray(coordinates)
if numpy.iscomplexobj(coordinates):
raise TypeError('Complex type not supported')
output_shape = coordinates.shape[1:]
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
if coordinates.shape[0] != input.ndim:
raise RuntimeError('invalid shape for coordinate array')
mode = _ni_support._extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
output = _ni_support._get_output(output, input,
shape=output_shape)
_nd_image.geometric_transform(filtered, None, coordinates, None, None,
output, order, mode, cval, None, None)
return output
@docfiller
def affine_transform(input, matrix, offset=0.0, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Apply an affine transformation.
Given an output image pixel index vector ``o``, the pixel value
is determined from the input image at position
``np.dot(matrix, o) + offset``.
This does 'pull' (or 'backward') resampling, transforming the output space
to the input to locate data. Affine transformations are often described in
the 'push' (or 'forward') direction, transforming input to output. If you
have a matrix for the 'push' transformation, use its inverse
(:func:`numpy.linalg.inv`) in this function.
Parameters
----------
%(input)s
matrix : ndarray
The inverse coordinate transformation matrix, mapping output
coordinates to input coordinates. If ``ndim`` is the number of
dimensions of ``input``, the given matrix must have one of the
following shapes:
- ``(ndim, ndim)``: the linear transformation matrix for each
output coordinate.
- ``(ndim,)``: assume that the 2-D transformation matrix is
diagonal, with the diagonal specified by the given value. A more
efficient algorithm is then used that exploits the separability
of the problem.
- ``(ndim + 1, ndim + 1)``: assume that the transformation is
specified using homogeneous coordinates [1]_. In this case, any
value passed to ``offset`` is ignored.
- ``(ndim, ndim + 1)``: as above, but the bottom row of a
homogeneous transformation matrix is always ``[0, 0, ..., 1]``,
and may be omitted.
offset : float or sequence, optional
The offset into the array where the transform is applied. If a float,
`offset` is the same for each axis. If a sequence, `offset` should
contain one value for each axis.
output_shape : tuple of ints, optional
Shape tuple.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode)s
%(cval)s
%(prefilter)s
Returns
-------
affine_transform : ndarray
The transformed input.
Notes
-----
The given matrix and offset are used to find for each point in the
output the corresponding coordinates in the input by an affine
transformation. The value of the input at those coordinates is
determined by spline interpolation of the requested order. Points
outside the boundaries of the input are filled according to the given
mode.
.. versionchanged:: 0.18.0
Previously, the exact interpretation of the affine transformation
depended on whether the matrix was supplied as a 1-D or a
2-D array. If a 1-D array was supplied
to the matrix parameter, the output pixel value at index ``o``
was determined from the input image at position
``matrix * (o + offset)``.
References
----------
.. [1] https://en.wikipedia.org/wiki/Homogeneous_coordinates
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _ni_support._extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
output = _ni_support._get_output(output, input,
shape=output_shape)
matrix = numpy.asarray(matrix, dtype=numpy.float64)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError('no proper affine matrix provided')
if (matrix.ndim == 2 and matrix.shape[1] == input.ndim + 1 and
(matrix.shape[0] in [input.ndim, input.ndim + 1])):
if matrix.shape[0] == input.ndim + 1:
exptd = [0] * input.ndim + [1]
if not numpy.all(matrix[input.ndim] == exptd):
msg = ('Expected homogeneous transformation matrix with '
'shape %s for image shape %s, but bottom row was '
'not equal to %s' % (matrix.shape, input.shape, exptd))
raise ValueError(msg)
# assume input is homogeneous coordinate transformation matrix
offset = matrix[:input.ndim, input.ndim]
matrix = matrix[:input.ndim, :input.ndim]
if matrix.shape[0] != input.ndim:
raise RuntimeError('affine matrix has wrong number of rows')
if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
raise RuntimeError('affine matrix has wrong number of columns')
if not matrix.flags.contiguous:
matrix = matrix.copy()
offset = _ni_support._normalize_sequence(offset, input.ndim)
offset = numpy.asarray(offset, dtype=numpy.float64)
if offset.ndim != 1 or offset.shape[0] < 1:
raise RuntimeError('no proper offset provided')
if not offset.flags.contiguous:
offset = offset.copy()
if matrix.ndim == 1:
warnings.warn(
"The behavior of affine_transform with a 1-D "
"array supplied for the matrix parameter has changed in "
"SciPy 0.18.0."
)
_nd_image.zoom_shift(filtered, matrix, offset/matrix, output, order,
mode, cval)
else:
_nd_image.geometric_transform(filtered, None, None, matrix, offset,
output, order, mode, cval, None, None)
return output
@docfiller
def shift(input, shift, output=None, order=3, mode='constant', cval=0.0,
prefilter=True):
"""
Shift an array.
The array is shifted using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode.
Parameters
----------
%(input)s
shift : float or sequence
The shift along the axes. If a float, `shift` is the same for each
axis. If a sequence, `shift` should contain one value for each axis.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode)s
%(cval)s
%(prefilter)s
Returns
-------
shift : ndarray
The shifted input.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _ni_support._extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
output = _ni_support._get_output(output, input)
shift = _ni_support._normalize_sequence(shift, input.ndim)
shift = [-ii for ii in shift]
shift = numpy.asarray(shift, dtype=numpy.float64)
if not shift.flags.contiguous:
shift = shift.copy()
_nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval)
return output
@docfiller
def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0,
prefilter=True):
"""
Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Parameters
----------
%(input)s
zoom : float or sequence
The zoom factor along the axes. If a float, `zoom` is the same for each
axis. If a sequence, `zoom` should contain one value for each axis.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode)s
%(cval)s
%(prefilter)s
Returns
-------
zoom : ndarray
The zoomed input.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.zoom(ascent, 3.0)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
>>> print(ascent.shape)
(512, 512)
>>> print(result.shape)
(1536, 1536)
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _ni_support._extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
zoom = _ni_support._normalize_sequence(zoom, input.ndim)
output_shape = tuple(
[int(round(ii * jj)) for ii, jj in zip(input.shape, zoom)])
zoom_div = numpy.array(output_shape, float) - 1
# Zooming to infinite values is unpredictable, so just choose
# zoom factor 1 instead
zoom = numpy.divide(numpy.array(input.shape) - 1, zoom_div,
out=numpy.ones_like(input.shape, dtype=numpy.float64),
where=zoom_div != 0)
output = _ni_support._get_output(output, input,
shape=output_shape)
zoom = numpy.ascontiguousarray(zoom)
_nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval)
return output
@docfiller
def rotate(input, angle, axes=(1, 0), reshape=True, output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Rotate an array.
The array is rotated in the plane defined by the two axes given by the
`axes` parameter using spline interpolation of the requested order.
Parameters
----------
%(input)s
angle : float
The rotation angle in degrees.
axes : tuple of 2 ints, optional
The two axes that define the plane of rotation. Default is the first
two axes.
reshape : bool, optional
If `reshape` is true, the output shape is adapted so that the input
array is contained completely in the output. Default is True.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode)s
%(cval)s
%(prefilter)s
Returns
-------
rotate : ndarray
The rotated input.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure(figsize=(10, 3))
>>> ax1, ax2, ax3 = fig.subplots(1, 3)
>>> img = misc.ascent()
>>> img_45 = ndimage.rotate(img, 45, reshape=False)
>>> full_img_45 = ndimage.rotate(img, 45, reshape=True)
>>> ax1.imshow(img, cmap='gray')
>>> ax1.set_axis_off()
>>> ax2.imshow(img_45, cmap='gray')
>>> ax2.set_axis_off()
>>> ax3.imshow(full_img_45, cmap='gray')
>>> ax3.set_axis_off()
>>> fig.set_tight_layout(True)
>>> plt.show()
>>> print(img.shape)
(512, 512)
>>> print(img_45.shape)
(512, 512)
>>> print(full_img_45.shape)
(724, 724)
"""
input_arr = numpy.asarray(input)
ndim = input_arr.ndim
if ndim < 2:
raise ValueError('input array should be at least 2D')
axes = list(axes)
if len(axes) != 2:
raise ValueError('axes should contain exactly two values')
if not all([float(ax).is_integer() for ax in axes]):
raise ValueError('axes should contain only integer values')
if axes[0] < 0:
axes[0] += ndim
if axes[1] < 0:
axes[1] += ndim
if axes[0] < 0 or axes[1] < 0 or axes[0] >= ndim or axes[1] >= ndim:
raise ValueError('invalid rotation plane specified')
axes.sort()
angle_rad = numpy.deg2rad(angle)
c, s = numpy.cos(angle_rad), numpy.sin(angle_rad)
rot_matrix = numpy.array([[c, s],
[-s, c]])
img_shape = numpy.asarray(input_arr.shape)
in_plane_shape = img_shape[axes]
if reshape:
# Compute transformed input bounds
iy, ix = in_plane_shape
out_bounds = rot_matrix @ [[0, 0, iy, iy],
[0, ix, 0, ix]]
# Compute the shape of the transformed input plane
out_plane_shape = (out_bounds.ptp(axis=1) + 0.5).astype(int)
else:
out_plane_shape = img_shape[axes]
out_center = rot_matrix @ ((out_plane_shape - 1) / 2)
in_center = (in_plane_shape - 1) / 2
offset = in_center - out_center
output_shape = img_shape
output_shape[axes] = out_plane_shape
output_shape = tuple(output_shape)
output = _ni_support._get_output(output, input_arr, shape=output_shape)
if ndim <= 2:
affine_transform(input_arr, rot_matrix, offset, output_shape, output,
order, mode, cval, prefilter)
else:
# If ndim > 2, the rotation is applied over all the planes
# parallel to axes
planes_coord = itertools.product(
*[[slice(None)] if ax in axes else range(img_shape[ax])
for ax in range(ndim)])
out_plane_shape = tuple(out_plane_shape)
for coordinates in planes_coord:
ia = input_arr[coordinates]
oa = output[coordinates]
affine_transform(ia, rot_matrix, offset, out_plane_shape,
oa, order, mode, cval, prefilter)
return output

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,57 @@
import os
from numpy.distutils.core import setup
from numpy.distutils.misc_util import Configuration
from numpy import get_include
from scipy._build_utils import numpy_nodepr_api
def configuration(parent_package='', top_path=None):
config = Configuration('ndimage', parent_package, top_path)
include_dirs = ['src',
get_include(),
os.path.join(os.path.dirname(__file__), '..', '_lib', 'src')]
config.add_extension("_nd_image",
sources=["src/nd_image.c",
"src/ni_filters.c",
"src/ni_fourier.c",
"src/ni_interpolation.c",
"src/ni_measure.c",
"src/ni_morphology.c",
"src/ni_splines.c",
"src/ni_support.c"],
include_dirs=include_dirs,
**numpy_nodepr_api)
# Cython wants the .c and .pyx to have the underscore.
config.add_extension("_ni_label",
sources=["src/_ni_label.c",],
include_dirs=['src']+[get_include()])
config.add_extension("_ctest",
sources=["src/_ctest.c"],
include_dirs=[get_include()],
**numpy_nodepr_api)
_define_macros = [("OLDAPI", 1)]
if 'define_macros' in numpy_nodepr_api:
_define_macros.extend(numpy_nodepr_api['define_macros'])
config.add_extension("_ctest_oldapi",
sources=["src/_ctest.c"],
include_dirs=[get_include()],
define_macros=_define_macros)
config.add_extension("_cytest",
sources=["src/_cytest.c"])
config.add_data_dir('tests')
return config
if __name__ == '__main__':
setup(**configuration(top_path='').todict())

View file

@ -0,0 +1,4 @@
label_inputs.txt, label_strels.txt, and label_results.txt are test
vectors generated using ndimage.label from scipy version 0.10.0, and
are used to verify that the cython version behaves as expected. The
script to generate them is in ../../utils/generate_label_testvectors.py

View file

@ -0,0 +1,21 @@
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 0 1 1 1
1 1 0 0 0 1 1
1 0 1 0 1 0 1
0 0 0 1 0 0 0
1 0 1 0 1 0 1
1 1 0 0 0 1 1
1 1 1 0 1 1 1
1 0 1 1 1 0 1
0 0 0 1 0 0 0
1 0 0 1 0 0 1
1 1 1 1 1 1 1
1 0 0 1 0 0 1
0 0 0 1 0 0 0
1 0 1 1 1 0 1

View file

@ -0,0 +1,294 @@
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
2 2 2 2 2 2 2
3 3 3 3 3 3 3
4 4 4 4 4 4 4
5 5 5 5 5 5 5
6 6 6 6 6 6 6
7 7 7 7 7 7 7
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 2 3 4 5 6 7
8 9 10 11 12 13 14
15 16 17 18 19 20 21
22 23 24 25 26 27 28
29 30 31 32 33 34 35
36 37 38 39 40 41 42
43 44 45 46 47 48 49
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 2 3 4 5 6 7
8 1 2 3 4 5 6
9 8 1 2 3 4 5
10 9 8 1 2 3 4
11 10 9 8 1 2 3
12 11 10 9 8 1 2
13 12 11 10 9 8 1
1 2 3 4 5 6 7
1 2 3 4 5 6 7
1 2 3 4 5 6 7
1 2 3 4 5 6 7
1 2 3 4 5 6 7
1 2 3 4 5 6 7
1 2 3 4 5 6 7
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 2 1 2 1 2 1
2 1 2 1 2 1 2
1 2 1 2 1 2 1
2 1 2 1 2 1 2
1 2 1 2 1 2 1
2 1 2 1 2 1 2
1 2 1 2 1 2 1
1 2 3 4 5 6 7
2 3 4 5 6 7 8
3 4 5 6 7 8 9
4 5 6 7 8 9 10
5 6 7 8 9 10 11
6 7 8 9 10 11 12
7 8 9 10 11 12 13
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 0 2 2 2
1 1 0 0 0 2 2
1 0 3 0 2 0 4
0 0 0 2 0 0 0
5 0 2 0 6 0 7
2 2 0 0 0 7 7
2 2 2 0 7 7 7
1 1 1 0 2 2 2
1 1 0 0 0 2 2
3 0 1 0 4 0 2
0 0 0 1 0 0 0
5 0 6 0 1 0 7
5 5 0 0 0 1 1
5 5 5 0 1 1 1
1 1 1 0 2 2 2
3 3 0 0 0 4 4
5 0 6 0 7 0 8
0 0 0 9 0 0 0
10 0 11 0 12 0 13
14 14 0 0 0 15 15
16 16 16 0 17 17 17
1 1 1 0 2 3 3
1 1 0 0 0 3 3
1 0 4 0 3 0 3
0 0 0 3 0 0 0
3 0 3 0 5 0 6
3 3 0 0 0 6 6
3 3 7 0 6 6 6
1 2 3 0 4 5 6
7 8 0 0 0 9 10
11 0 12 0 13 0 14
0 0 0 15 0 0 0
16 0 17 0 18 0 19
20 21 0 0 0 22 23
24 25 26 0 27 28 29
1 1 1 0 2 2 2
1 1 0 0 0 2 2
1 0 3 0 2 0 2
0 0 0 2 0 0 0
2 0 2 0 4 0 5
2 2 0 0 0 5 5
2 2 2 0 5 5 5
1 1 1 0 2 2 2
1 1 0 0 0 2 2
1 0 3 0 4 0 2
0 0 0 5 0 0 0
6 0 7 0 8 0 9
6 6 0 0 0 9 9
6 6 6 0 9 9 9
1 2 3 0 4 5 6
7 1 0 0 0 4 5
8 0 1 0 9 0 4
0 0 0 1 0 0 0
10 0 11 0 1 0 12
13 10 0 0 0 1 14
15 13 10 0 16 17 1
1 2 3 0 4 5 6
1 2 0 0 0 5 6
1 0 7 0 8 0 6
0 0 0 9 0 0 0
10 0 11 0 12 0 13
10 14 0 0 0 15 13
10 14 16 0 17 15 13
1 1 1 0 1 1 1
1 1 0 0 0 1 1
1 0 1 0 1 0 1
0 0 0 1 0 0 0
1 0 1 0 1 0 1
1 1 0 0 0 1 1
1 1 1 0 1 1 1
1 1 2 0 3 3 3
1 1 0 0 0 3 3
1 0 1 0 4 0 3
0 0 0 1 0 0 0
5 0 6 0 1 0 1
5 5 0 0 0 1 1
5 5 5 0 7 1 1
1 2 1 0 1 3 1
2 1 0 0 0 1 3
1 0 1 0 1 0 1
0 0 0 1 0 0 0
1 0 1 0 1 0 1
4 1 0 0 0 1 5
1 4 1 0 1 5 1
1 2 3 0 4 5 6
2 3 0 0 0 6 7
3 0 8 0 6 0 9
0 0 0 6 0 0 0
10 0 6 0 11 0 12
13 6 0 0 0 12 14
6 15 16 0 12 14 17
1 1 1 0 2 2 2
1 1 0 0 0 2 2
1 0 1 0 3 0 2
0 0 0 1 0 0 0
4 0 5 0 1 0 1
4 4 0 0 0 1 1
4 4 4 0 1 1 1
1 0 2 2 2 0 3
0 0 0 2 0 0 0
4 0 0 5 0 0 5
5 5 5 5 5 5 5
5 0 0 5 0 0 6
0 0 0 7 0 0 0
8 0 7 7 7 0 9
1 0 2 2 2 0 3
0 0 0 2 0 0 0
4 0 0 4 0 0 5
4 4 4 4 4 4 4
6 0 0 4 0 0 4
0 0 0 7 0 0 0
8 0 7 7 7 0 9
1 0 2 2 2 0 3
0 0 0 4 0 0 0
5 0 0 6 0 0 7
8 8 8 8 8 8 8
9 0 0 10 0 0 11
0 0 0 12 0 0 0
13 0 14 14 14 0 15
1 0 2 3 3 0 4
0 0 0 3 0 0 0
5 0 0 3 0 0 6
5 5 3 3 3 6 6
5 0 0 3 0 0 6
0 0 0 3 0 0 0
7 0 3 3 8 0 9
1 0 2 3 4 0 5
0 0 0 6 0 0 0
7 0 0 8 0 0 9
10 11 12 13 14 15 16
17 0 0 18 0 0 19
0 0 0 20 0 0 0
21 0 22 23 24 0 25
1 0 2 2 2 0 3
0 0 0 2 0 0 0
2 0 0 2 0 0 2
2 2 2 2 2 2 2
2 0 0 2 0 0 2
0 0 0 2 0 0 0
4 0 2 2 2 0 5
1 0 2 2 2 0 3
0 0 0 2 0 0 0
2 0 0 2 0 0 2
2 2 2 2 2 2 2
2 0 0 2 0 0 2
0 0 0 2 0 0 0
4 0 2 2 2 0 5
1 0 2 3 4 0 5
0 0 0 2 0 0 0
6 0 0 7 0 0 8
9 6 10 11 7 12 13
14 0 0 10 0 0 12
0 0 0 15 0 0 0
16 0 17 18 15 0 19
1 0 2 3 4 0 5
0 0 0 3 0 0 0
6 0 0 3 0 0 7
6 8 9 3 10 11 7
6 0 0 3 0 0 7
0 0 0 3 0 0 0
12 0 13 3 14 0 15
1 0 2 2 2 0 3
0 0 0 2 0 0 0
2 0 0 2 0 0 2
2 2 2 2 2 2 2
2 0 0 2 0 0 2
0 0 0 2 0 0 0
4 0 2 2 2 0 5
1 0 2 2 3 0 4
0 0 0 2 0 0 0
5 0 0 2 0 0 6
5 5 2 2 2 6 6
5 0 0 2 0 0 6
0 0 0 2 0 0 0
7 0 8 2 2 0 9
1 0 2 3 2 0 4
0 0 0 2 0 0 0
5 0 0 6 0 0 7
8 5 6 9 6 7 10
5 0 0 6 0 0 7
0 0 0 11 0 0 0
12 0 11 13 11 0 14
1 0 2 3 4 0 5
0 0 0 4 0 0 0
6 0 0 7 0 0 8
9 10 7 11 12 8 13
10 0 0 12 0 0 14
0 0 0 15 0 0 0
16 0 15 17 18 0 19
1 0 2 2 2 0 3
0 0 0 2 0 0 0
2 0 0 2 0 0 2
2 2 2 2 2 2 2
2 0 0 2 0 0 2
0 0 0 2 0 0 0
4 0 2 2 2 0 5

View file

@ -0,0 +1,42 @@
0 0 1
1 1 1
1 0 0
1 0 0
1 1 1
0 0 1
0 0 0
1 1 1
0 0 0
0 1 1
0 1 0
1 1 0
0 0 0
0 0 0
0 0 0
0 1 1
1 1 1
1 1 0
0 1 0
1 1 1
0 1 0
1 0 0
0 1 0
0 0 1
0 1 0
0 1 0
0 1 0
1 1 1
1 1 1
1 1 1
1 1 0
0 1 0
0 1 1
1 0 1
0 1 0
1 0 1
0 0 1
0 1 0
1 0 0
1 1 0
1 1 1
0 1 1

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.1 KiB

View file

@ -0,0 +1,98 @@
import numpy as np
from numpy.testing import assert_allclose
from scipy import ndimage
from scipy.ndimage import _ctest
from scipy.ndimage import _ctest_oldapi
from scipy.ndimage import _cytest
from scipy._lib._ccallback import LowLevelCallable
FILTER1D_FUNCTIONS = [
lambda filter_size: _ctest.filter1d(filter_size),
lambda filter_size: _ctest_oldapi.filter1d(filter_size),
lambda filter_size: _cytest.filter1d(filter_size, with_signature=False),
lambda filter_size: LowLevelCallable(_cytest.filter1d(filter_size, with_signature=True)),
lambda filter_size: LowLevelCallable.from_cython(_cytest, "_filter1d",
_cytest.filter1d_capsule(filter_size)),
]
FILTER2D_FUNCTIONS = [
lambda weights: _ctest.filter2d(weights),
lambda weights: _ctest_oldapi.filter2d(weights),
lambda weights: _cytest.filter2d(weights, with_signature=False),
lambda weights: LowLevelCallable(_cytest.filter2d(weights, with_signature=True)),
lambda weights: LowLevelCallable.from_cython(_cytest, "_filter2d", _cytest.filter2d_capsule(weights)),
]
TRANSFORM_FUNCTIONS = [
lambda shift: _ctest.transform(shift),
lambda shift: _ctest_oldapi.transform(shift),
lambda shift: _cytest.transform(shift, with_signature=False),
lambda shift: LowLevelCallable(_cytest.transform(shift, with_signature=True)),
lambda shift: LowLevelCallable.from_cython(_cytest, "_transform", _cytest.transform_capsule(shift)),
]
def test_generic_filter():
def filter2d(footprint_elements, weights):
return (weights*footprint_elements).sum()
def check(j):
func = FILTER2D_FUNCTIONS[j]
im = np.ones((20, 20))
im[:10,:10] = 0
footprint = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
footprint_size = np.count_nonzero(footprint)
weights = np.ones(footprint_size)/footprint_size
res = ndimage.generic_filter(im, func(weights),
footprint=footprint)
std = ndimage.generic_filter(im, filter2d, footprint=footprint,
extra_arguments=(weights,))
assert_allclose(res, std, err_msg="#{} failed".format(j))
for j, func in enumerate(FILTER2D_FUNCTIONS):
check(j)
def test_generic_filter1d():
def filter1d(input_line, output_line, filter_size):
for i in range(output_line.size):
output_line[i] = 0
for j in range(filter_size):
output_line[i] += input_line[i+j]
output_line /= filter_size
def check(j):
func = FILTER1D_FUNCTIONS[j]
im = np.tile(np.hstack((np.zeros(10), np.ones(10))), (10, 1))
filter_size = 3
res = ndimage.generic_filter1d(im, func(filter_size),
filter_size)
std = ndimage.generic_filter1d(im, filter1d, filter_size,
extra_arguments=(filter_size,))
assert_allclose(res, std, err_msg="#{} failed".format(j))
for j, func in enumerate(FILTER1D_FUNCTIONS):
check(j)
def test_geometric_transform():
def transform(output_coordinates, shift):
return output_coordinates[0] - shift, output_coordinates[1] - shift
def check(j):
func = TRANSFORM_FUNCTIONS[j]
im = np.arange(12).reshape(4, 3).astype(np.float64)
shift = 0.5
res = ndimage.geometric_transform(im, func(shift))
std = ndimage.geometric_transform(im, transform, extra_arguments=(shift,))
assert_allclose(res, std, err_msg="#{} failed".format(j))
for j, func in enumerate(TRANSFORM_FUNCTIONS):
check(j)

View file

@ -0,0 +1,66 @@
""" Testing data types for ndimage calls
"""
import sys
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_
import pytest
from scipy import ndimage
def test_map_coordinates_dts():
# check that ndimage accepts different data types for interpolation
data = np.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
shifted_data = np.array([[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
idx = np.indices(data.shape)
dts = (np.uint8, np.uint16, np.uint32, np.uint64,
np.int8, np.int16, np.int32, np.int64,
np.intp, np.uintp, np.float32, np.float64)
for order in range(0, 6):
for data_dt in dts:
these_data = data.astype(data_dt)
for coord_dt in dts:
# affine mapping
mat = np.eye(2, dtype=coord_dt)
off = np.zeros((2,), dtype=coord_dt)
out = ndimage.affine_transform(these_data, mat, off)
assert_array_almost_equal(these_data, out)
# map coordinates
coords_m1 = idx.astype(coord_dt) - 1
coords_p10 = idx.astype(coord_dt) + 10
out = ndimage.map_coordinates(these_data, coords_m1, order=order)
assert_array_almost_equal(out, shifted_data)
# check constant fill works
out = ndimage.map_coordinates(these_data, coords_p10, order=order)
assert_array_almost_equal(out, np.zeros((3,4)))
# check shift and zoom
out = ndimage.shift(these_data, 1)
assert_array_almost_equal(out, shifted_data)
out = ndimage.zoom(these_data, 1)
assert_array_almost_equal(these_data, out)
@pytest.mark.xfail(not sys.platform == 'darwin', reason="runs only on darwin")
def test_uint64_max():
# Test interpolation respects uint64 max. Reported to fail at least on
# win32 (due to the 32 bit visual C compiler using signed int64 when
# converting between uint64 to double) and Debian on s390x.
# Interpolation is always done in double precision floating point, so
# we use the largest uint64 value for which int(float(big)) still fits
# in a uint64.
big = 2**64 - 1025
arr = np.array([big, big, big], dtype=np.uint64)
# Tests geometric transform (map_coordinates, affine_transform)
inds = np.indices(arr.shape) - 0.1
x = ndimage.map_coordinates(arr, inds)
assert_(x[1] == int(float(big)))
assert_(x[2] == int(float(big)))
# Tests zoom / shift
x = ndimage.shift(arr, 0.1)
assert_(x[1] == int(float(big)))
assert_(x[2] == int(float(big)))

View file

@ -0,0 +1,443 @@
''' Some tests for filters '''
import numpy as np
from numpy.testing import (assert_equal, assert_allclose,
assert_array_equal, assert_almost_equal,
suppress_warnings)
from pytest import raises as assert_raises
import scipy.ndimage as sndi
from scipy.ndimage.filters import _gaussian_kernel1d, rank_filter
def test_ticket_701():
# Test generic filter sizes
arr = np.arange(4).reshape((2,2))
func = lambda x: np.min(x)
res = sndi.generic_filter(arr, func, size=(1,1))
# The following raises an error unless ticket 701 is fixed
res2 = sndi.generic_filter(arr, func, size=1)
assert_equal(res, res2)
def test_gh_5430():
# At least one of these raises an error unless gh-5430 is
# fixed. In py2k an int is implemented using a C long, so
# which one fails depends on your system. In py3k there is only
# one arbitrary precision integer type, so both should fail.
sigma = np.int32(1)
out = sndi._ni_support._normalize_sequence(sigma, 1)
assert_equal(out, [sigma])
sigma = np.int64(1)
out = sndi._ni_support._normalize_sequence(sigma, 1)
assert_equal(out, [sigma])
# This worked before; make sure it still works
sigma = 1
out = sndi._ni_support._normalize_sequence(sigma, 1)
assert_equal(out, [sigma])
# This worked before; make sure it still works
sigma = [1, 1]
out = sndi._ni_support._normalize_sequence(sigma, 2)
assert_equal(out, sigma)
# Also include the OPs original example to make sure we fixed the issue
x = np.random.normal(size=(256, 256))
perlin = np.zeros_like(x)
for i in 2**np.arange(6):
perlin += sndi.filters.gaussian_filter(x, i, mode="wrap") * i**2
# This also fixes gh-4106, show that the OPs example now runs.
x = np.int64(21)
sndi._ni_support._normalize_sequence(x, 0)
def test_gaussian_kernel1d():
radius = 10
sigma = 2
sigma2 = sigma * sigma
x = np.arange(-radius, radius + 1, dtype=np.double)
phi_x = np.exp(-0.5 * x * x / sigma2)
phi_x /= phi_x.sum()
assert_allclose(phi_x, _gaussian_kernel1d(sigma, 0, radius))
assert_allclose(-phi_x * x / sigma2, _gaussian_kernel1d(sigma, 1, radius))
assert_allclose(phi_x * (x * x / sigma2 - 1) / sigma2,
_gaussian_kernel1d(sigma, 2, radius))
assert_allclose(phi_x * (3 - x * x / sigma2) * x / (sigma2 * sigma2),
_gaussian_kernel1d(sigma, 3, radius))
def test_orders_gauss():
# Check order inputs to Gaussians
arr = np.zeros((1,))
assert_equal(0, sndi.gaussian_filter(arr, 1, order=0))
assert_equal(0, sndi.gaussian_filter(arr, 1, order=3))
assert_raises(ValueError, sndi.gaussian_filter, arr, 1, -1)
assert_equal(0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=0))
assert_equal(0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=3))
assert_raises(ValueError, sndi.gaussian_filter1d, arr, 1, -1, -1)
def test_valid_origins():
"""Regression test for #1311."""
func = lambda x: np.mean(x)
data = np.array([1,2,3,4,5], dtype=np.float64)
assert_raises(ValueError, sndi.generic_filter, data, func, size=3,
origin=2)
assert_raises(ValueError, sndi.generic_filter1d, data, func,
filter_size=3, origin=2)
assert_raises(ValueError, sndi.percentile_filter, data, 0.2, size=3,
origin=2)
for filter in [sndi.uniform_filter, sndi.minimum_filter,
sndi.maximum_filter, sndi.maximum_filter1d,
sndi.median_filter, sndi.minimum_filter1d]:
# This should work, since for size == 3, the valid range for origin is
# -1 to 1.
list(filter(data, 3, origin=-1))
list(filter(data, 3, origin=1))
# Just check this raises an error instead of silently accepting or
# segfaulting.
assert_raises(ValueError, filter, data, 3, origin=2)
def test_bad_convolve_and_correlate_origins():
"""Regression test for gh-822."""
# Before gh-822 was fixed, these would generate seg. faults or
# other crashes on many system.
assert_raises(ValueError, sndi.correlate1d,
[0, 1, 2, 3, 4, 5], [1, 1, 2, 0], origin=2)
assert_raises(ValueError, sndi.correlate,
[0, 1, 2, 3, 4, 5], [0, 1, 2], origin=[2])
assert_raises(ValueError, sndi.correlate,
np.ones((3, 5)), np.ones((2, 2)), origin=[0, 1])
assert_raises(ValueError, sndi.convolve1d,
np.arange(10), np.ones(3), origin=-2)
assert_raises(ValueError, sndi.convolve,
np.arange(10), np.ones(3), origin=[-2])
assert_raises(ValueError, sndi.convolve,
np.ones((3, 5)), np.ones((2, 2)), origin=[0, -2])
def test_multiple_modes():
# Test that the filters with multiple mode cababilities for different
# dimensions give the same result as applying a single mode.
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
mode1 = 'reflect'
mode2 = ['reflect', 'reflect']
assert_equal(sndi.gaussian_filter(arr, 1, mode=mode1),
sndi.gaussian_filter(arr, 1, mode=mode2))
assert_equal(sndi.prewitt(arr, mode=mode1),
sndi.prewitt(arr, mode=mode2))
assert_equal(sndi.sobel(arr, mode=mode1),
sndi.sobel(arr, mode=mode2))
assert_equal(sndi.laplace(arr, mode=mode1),
sndi.laplace(arr, mode=mode2))
assert_equal(sndi.gaussian_laplace(arr, 1, mode=mode1),
sndi.gaussian_laplace(arr, 1, mode=mode2))
assert_equal(sndi.maximum_filter(arr, size=5, mode=mode1),
sndi.maximum_filter(arr, size=5, mode=mode2))
assert_equal(sndi.minimum_filter(arr, size=5, mode=mode1),
sndi.minimum_filter(arr, size=5, mode=mode2))
assert_equal(sndi.gaussian_gradient_magnitude(arr, 1, mode=mode1),
sndi.gaussian_gradient_magnitude(arr, 1, mode=mode2))
assert_equal(sndi.uniform_filter(arr, 5, mode=mode1),
sndi.uniform_filter(arr, 5, mode=mode2))
def test_multiple_modes_sequentially():
# Test that the filters with multiple mode cababilities for different
# dimensions give the same result as applying the filters with
# different modes sequentially
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
modes = ['reflect', 'wrap']
expected = sndi.gaussian_filter1d(arr, 1, axis=0, mode=modes[0])
expected = sndi.gaussian_filter1d(expected, 1, axis=1, mode=modes[1])
assert_equal(expected,
sndi.gaussian_filter(arr, 1, mode=modes))
expected = sndi.uniform_filter1d(arr, 5, axis=0, mode=modes[0])
expected = sndi.uniform_filter1d(expected, 5, axis=1, mode=modes[1])
assert_equal(expected,
sndi.uniform_filter(arr, 5, mode=modes))
expected = sndi.maximum_filter1d(arr, size=5, axis=0, mode=modes[0])
expected = sndi.maximum_filter1d(expected, size=5, axis=1, mode=modes[1])
assert_equal(expected,
sndi.maximum_filter(arr, size=5, mode=modes))
expected = sndi.minimum_filter1d(arr, size=5, axis=0, mode=modes[0])
expected = sndi.minimum_filter1d(expected, size=5, axis=1, mode=modes[1])
assert_equal(expected,
sndi.minimum_filter(arr, size=5, mode=modes))
def test_multiple_modes_prewitt():
# Test prewitt filter for multiple extrapolation modes
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = np.array([[1., -3., 2.],
[1., -2., 1.],
[1., -1., 0.]])
modes = ['reflect', 'wrap']
assert_equal(expected,
sndi.prewitt(arr, mode=modes))
def test_multiple_modes_sobel():
# Test sobel filter for multiple extrapolation modes
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = np.array([[1., -4., 3.],
[2., -3., 1.],
[1., -1., 0.]])
modes = ['reflect', 'wrap']
assert_equal(expected,
sndi.sobel(arr, mode=modes))
def test_multiple_modes_laplace():
# Test laplace filter for multiple extrapolation modes
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = np.array([[-2., 2., 1.],
[-2., -3., 2.],
[1., 1., 0.]])
modes = ['reflect', 'wrap']
assert_equal(expected,
sndi.laplace(arr, mode=modes))
def test_multiple_modes_gaussian_laplace():
# Test gaussian_laplace filter for multiple extrapolation modes
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = np.array([[-0.28438687, 0.01559809, 0.19773499],
[-0.36630503, -0.20069774, 0.07483620],
[0.15849176, 0.18495566, 0.21934094]])
modes = ['reflect', 'wrap']
assert_almost_equal(expected,
sndi.gaussian_laplace(arr, 1, mode=modes))
def test_multiple_modes_gaussian_gradient_magnitude():
# Test gaussian_gradient_magnitude filter for multiple
# extrapolation modes
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = np.array([[0.04928965, 0.09745625, 0.06405368],
[0.23056905, 0.14025305, 0.04550846],
[0.19894369, 0.14950060, 0.06796850]])
modes = ['reflect', 'wrap']
calculated = sndi.gaussian_gradient_magnitude(arr, 1, mode=modes)
assert_almost_equal(expected, calculated)
def test_multiple_modes_uniform():
# Test uniform filter for multiple extrapolation modes
arr = np.array([[1., 0., 0.],
[1., 1., 0.],
[0., 0., 0.]])
expected = np.array([[0.32, 0.40, 0.48],
[0.20, 0.28, 0.32],
[0.28, 0.32, 0.40]])
modes = ['reflect', 'wrap']
assert_almost_equal(expected,
sndi.uniform_filter(arr, 5, mode=modes))
def test_gaussian_truncate():
# Test that Gaussian filters can be truncated at different widths.
# These tests only check that the result has the expected number
# of nonzero elements.
arr = np.zeros((100, 100), float)
arr[50, 50] = 1
num_nonzeros_2 = (sndi.gaussian_filter(arr, 5, truncate=2) > 0).sum()
assert_equal(num_nonzeros_2, 21**2)
num_nonzeros_5 = (sndi.gaussian_filter(arr, 5, truncate=5) > 0).sum()
assert_equal(num_nonzeros_5, 51**2)
# Test truncate when sigma is a sequence.
f = sndi.gaussian_filter(arr, [0.5, 2.5], truncate=3.5)
fpos = f > 0
n0 = fpos.any(axis=0).sum()
# n0 should be 2*int(2.5*3.5 + 0.5) + 1
assert_equal(n0, 19)
n1 = fpos.any(axis=1).sum()
# n1 should be 2*int(0.5*3.5 + 0.5) + 1
assert_equal(n1, 5)
# Test gaussian_filter1d.
x = np.zeros(51)
x[25] = 1
f = sndi.gaussian_filter1d(x, sigma=2, truncate=3.5)
n = (f > 0).sum()
assert_equal(n, 15)
# Test gaussian_laplace
y = sndi.gaussian_laplace(x, sigma=2, truncate=3.5)
nonzero_indices = np.nonzero(y != 0)[0]
n = nonzero_indices.ptp() + 1
assert_equal(n, 15)
# Test gaussian_gradient_magnitude
y = sndi.gaussian_gradient_magnitude(x, sigma=2, truncate=3.5)
nonzero_indices = np.nonzero(y != 0)[0]
n = nonzero_indices.ptp() + 1
assert_equal(n, 15)
class TestThreading(object):
def check_func_thread(self, n, fun, args, out):
from threading import Thread
thrds = [Thread(target=fun, args=args, kwargs={'output': out[x]}) for x in range(n)]
[t.start() for t in thrds]
[t.join() for t in thrds]
def check_func_serial(self, n, fun, args, out):
for i in range(n):
fun(*args, output=out[i])
def test_correlate1d(self):
d = np.random.randn(5000)
os = np.empty((4, d.size))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.correlate1d, (d, np.arange(5)), os)
self.check_func_thread(4, sndi.correlate1d, (d, np.arange(5)), ot)
assert_array_equal(os, ot)
def test_correlate(self):
d = np.random.randn(500, 500)
k = np.random.randn(10, 10)
os = np.empty([4] + list(d.shape))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.correlate, (d, k), os)
self.check_func_thread(4, sndi.correlate, (d, k), ot)
assert_array_equal(os, ot)
def test_median_filter(self):
d = np.random.randn(500, 500)
os = np.empty([4] + list(d.shape))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.median_filter, (d, 3), os)
self.check_func_thread(4, sndi.median_filter, (d, 3), ot)
assert_array_equal(os, ot)
def test_uniform_filter1d(self):
d = np.random.randn(5000)
os = np.empty((4, d.size))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.uniform_filter1d, (d, 5), os)
self.check_func_thread(4, sndi.uniform_filter1d, (d, 5), ot)
assert_array_equal(os, ot)
def test_minmax_filter(self):
d = np.random.randn(500, 500)
os = np.empty([4] + list(d.shape))
ot = np.empty_like(os)
self.check_func_serial(4, sndi.maximum_filter, (d, 3), os)
self.check_func_thread(4, sndi.maximum_filter, (d, 3), ot)
assert_array_equal(os, ot)
self.check_func_serial(4, sndi.minimum_filter, (d, 3), os)
self.check_func_thread(4, sndi.minimum_filter, (d, 3), ot)
assert_array_equal(os, ot)
def test_minmaximum_filter1d():
# Regression gh-3898
in_ = np.arange(10)
out = sndi.minimum_filter1d(in_, 1)
assert_equal(in_, out)
out = sndi.maximum_filter1d(in_, 1)
assert_equal(in_, out)
# Test reflect
out = sndi.minimum_filter1d(in_, 5, mode='reflect')
assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out)
out = sndi.maximum_filter1d(in_, 5, mode='reflect')
assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out)
#Test constant
out = sndi.minimum_filter1d(in_, 5, mode='constant', cval=-1)
assert_equal([-1, -1, 0, 1, 2, 3, 4, 5, -1, -1], out)
out = sndi.maximum_filter1d(in_, 5, mode='constant', cval=10)
assert_equal([10, 10, 4, 5, 6, 7, 8, 9, 10, 10], out)
# Test nearest
out = sndi.minimum_filter1d(in_, 5, mode='nearest')
assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 6, 7], out)
out = sndi.maximum_filter1d(in_, 5, mode='nearest')
assert_equal([2, 3, 4, 5, 6, 7, 8, 9, 9, 9], out)
# Test wrap
out = sndi.minimum_filter1d(in_, 5, mode='wrap')
assert_equal([0, 0, 0, 1, 2, 3, 4, 5, 0, 0], out)
out = sndi.maximum_filter1d(in_, 5, mode='wrap')
assert_equal([9, 9, 4, 5, 6, 7, 8, 9, 9, 9], out)
def test_uniform_filter1d_roundoff_errors():
# gh-6930
in_ = np.repeat([0, 1, 0], [9, 9, 9])
for filter_size in range(3, 10):
out = sndi.uniform_filter1d(in_, filter_size)
assert_equal(out.sum(), 10 - filter_size)
def test_footprint_all_zeros():
# regression test for gh-6876: footprint of all zeros segfaults
arr = np.random.randint(0, 100, (100, 100))
kernel = np.zeros((3, 3), bool)
with assert_raises(ValueError):
sndi.maximum_filter(arr, footprint=kernel)
def test_gaussian_filter():
# Test gaussian filter with np.float16
# gh-8207
data = np.array([1],dtype = np.float16)
sigma = 1.0
with assert_raises(RuntimeError):
sndi.gaussian_filter(data,sigma)
def test_rank_filter_noninteger_rank():
# regression test for issue 9388: ValueError for
# non integer rank when performing rank_filter
arr = np.random.random((10, 20, 30))
assert_raises(TypeError, rank_filter, arr, 0.5,
footprint=np.ones((1, 1, 10), dtype=bool))
def test_size_footprint_both_set():
# test for input validation, expect user warning when
# size and footprint is set
with suppress_warnings() as sup:
sup.filter(UserWarning,
"ignoring size because footprint is set")
arr = np.random.random((10, 20, 30))
rank_filter(arr, 5, size=2, footprint=np.ones((1, 1, 10), dtype=bool))

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,49 @@
import numpy
from pytest import raises as assert_raises
import scipy.ndimage as sndi
def test_binary_erosion_noninteger_iterations():
# regression test for gh-9905, gh-9909: ValueError for
# non integer iterations
data = numpy.ones([1])
assert_raises(TypeError, sndi.binary_erosion, data, iterations=0.5)
assert_raises(TypeError, sndi.binary_erosion, data, iterations=1.5)
def test_binary_dilation_noninteger_iterations():
# regression test for gh-9905, gh-9909: ValueError for
# non integer iterations
data = numpy.ones([1])
assert_raises(TypeError, sndi.binary_dilation, data, iterations=0.5)
assert_raises(TypeError, sndi.binary_dilation, data, iterations=1.5)
def test_binary_opening_noninteger_iterations():
# regression test for gh-9905, gh-9909: ValueError for
# non integer iterations
data = numpy.ones([1])
assert_raises(TypeError, sndi.binary_opening, data, iterations=0.5)
assert_raises(TypeError, sndi.binary_opening, data, iterations=1.5)
def test_binary_closing_noninteger_iterations():
# regression test for gh-9905, gh-9909: ValueError for
# non integer iterations
data = numpy.ones([1])
assert_raises(TypeError, sndi.binary_closing, data, iterations=0.5)
assert_raises(TypeError, sndi.binary_closing, data, iterations=1.5)
def test_binary_closing_noninteger_brute_force_passes_when_true():
# regression test for gh-9905, gh-9909: ValueError for
# non integer iterations
data = numpy.ones([1])
assert sndi.binary_erosion(
data, iterations=2, brute_force=1.5
) == sndi.binary_erosion(data, iterations=2, brute_force=bool(1.5))
assert sndi.binary_erosion(
data, iterations=2, brute_force=0.0
) == sndi.binary_erosion(data, iterations=2, brute_force=bool(0.0))

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,45 @@
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy.ndimage as ndimage
def test_byte_order_median():
"""Regression test for #413: median_filter does not handle bytes orders."""
a = np.arange(9, dtype='<f4').reshape(3, 3)
ref = ndimage.filters.median_filter(a,(3, 3))
b = np.arange(9, dtype='>f4').reshape(3, 3)
t = ndimage.filters.median_filter(b, (3, 3))
assert_array_almost_equal(ref, t)
def test_zoom_output_shape():
"""Ticket #643"""
x = np.arange(12).reshape((3,4))
ndimage.zoom(x, 2, output=np.zeros((6,8)))
def test_ticket_742():
def SE(img, thresh=.7, size=4):
mask = img > thresh
rank = len(mask.shape)
la, co = ndimage.label(mask,
ndimage.generate_binary_structure(rank, rank))
_ = ndimage.find_objects(la)
if np.dtype(np.intp) != np.dtype('i'):
shape = (3,1240,1240)
a = np.random.rand(np.prod(shape)).reshape(shape)
# shouldn't crash
SE(a)
def test_gh_issue_3025():
"""Github issue #3025 - improper merging of labels"""
d = np.zeros((60,320))
d[:,:257] = 1
d[:,260:] = 1
d[36,257] = 1
d[35,258] = 1
d[35,259] = 1
assert ndimage.label(d, np.ones((3,3)))[1] == 1

View file

@ -0,0 +1,65 @@
"""Tests for spline filtering."""
import numpy as np
import pytest
from numpy.testing import assert_almost_equal
from scipy import ndimage
def get_spline_knot_values(order):
"""Knot values to the right of a B-spline's center."""
knot_values = {0: [1],
1: [1],
2: [6, 1],
3: [4, 1],
4: [230, 76, 1],
5: [66, 26, 1]}
return knot_values[order]
def make_spline_knot_matrix(n, order, mode='mirror'):
"""Matrix to invert to find the spline coefficients."""
knot_values = get_spline_knot_values(order)
matrix = np.zeros((n, n))
for diag, knot_value in enumerate(knot_values):
indices = np.arange(diag, n)
if diag == 0:
matrix[indices, indices] = knot_value
else:
matrix[indices, indices - diag] = knot_value
matrix[indices - diag, indices] = knot_value
knot_values_sum = knot_values[0] + 2 * sum(knot_values[1:])
if mode == 'mirror':
start, step = 1, 1
elif mode == 'reflect':
start, step = 0, 1
elif mode == 'wrap':
start, step = -1, -1
else:
raise ValueError('unsupported mode {}'.format(mode))
for row in range(len(knot_values) - 1):
for idx, knot_value in enumerate(knot_values[row + 1:]):
matrix[row, start + step*idx] += knot_value
matrix[-row - 1, -start - 1 - step*idx] += knot_value
return matrix / knot_values_sum
@pytest.mark.parametrize('order', [0, 1, 2, 3, 4, 5])
@pytest.mark.parametrize('mode', ['mirror', 'wrap', 'reflect'])
def test_spline_filter_vs_matrix_solution(order, mode):
n = 100
eye = np.eye(n, dtype=float)
spline_filter_axis_0 = ndimage.spline_filter1d(eye, axis=0, order=order,
mode=mode)
spline_filter_axis_1 = ndimage.spline_filter1d(eye, axis=1, order=order,
mode=mode)
matrix = make_spline_knot_matrix(n, order, mode=mode)
assert_almost_equal(eye, np.dot(spline_filter_axis_0, matrix))
assert_almost_equal(eye, np.dot(spline_filter_axis_1, matrix.T))