Fixed database typo and removed unnecessary class identifier.

This commit is contained in:
Batuhan Berk Başoğlu 2020-10-14 10:10:37 -04:00
parent 00ad49a143
commit 45fb349a7d
5098 changed files with 952558 additions and 85 deletions

View file

@ -0,0 +1,57 @@
from .histogram_matching import match_histograms
from .hough_transform import (hough_line, hough_line_peaks,
probabilistic_hough_line, hough_circle,
hough_circle_peaks, hough_ellipse)
from .radon_transform import (radon, iradon, iradon_sart,
order_angles_golden_ratio)
from .finite_radon_transform import frt2, ifrt2
from .integral import integral_image, integrate
from ._geometric import (estimate_transform,
matrix_transform, EuclideanTransform,
SimilarityTransform, AffineTransform,
ProjectiveTransform, FundamentalMatrixTransform,
EssentialMatrixTransform, PolynomialTransform,
PiecewiseAffineTransform)
from ._warps import (swirl, resize, rotate, rescale,
downscale_local_mean, warp, warp_coords, warp_polar)
from .pyramids import (pyramid_reduce, pyramid_expand,
pyramid_gaussian, pyramid_laplacian)
__all__ = ['match_histograms',
'hough_circle',
'hough_ellipse',
'hough_line',
'probabilistic_hough_line',
'hough_circle_peaks',
'hough_line_peaks',
'radon',
'iradon',
'iradon_sart',
'order_angles_golden_ratio',
'frt2',
'ifrt2',
'integral_image',
'integrate',
'warp',
'warp_coords',
'warp_polar',
'estimate_transform',
'matrix_transform',
'EuclideanTransform',
'SimilarityTransform',
'AffineTransform',
'ProjectiveTransform',
'EssentialMatrixTransform',
'FundamentalMatrixTransform',
'PolynomialTransform',
'PiecewiseAffineTransform',
'swirl',
'resize',
'rotate',
'rescale',
'downscale_local_mean',
'pyramid_reduce',
'pyramid_expand',
'pyramid_gaussian',
'pyramid_laplacian']

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,134 @@
"""
:author: Gary Ruben, 2009
:license: modified BSD
"""
__all__ = ["frt2", "ifrt2"]
import numpy as np
from numpy import roll, newaxis
def frt2(a):
"""Compute the 2-dimensional finite radon transform (FRT) for an n x n
integer array.
Parameters
----------
a : array_like
A 2-D square n x n integer array.
Returns
-------
FRT : 2-D ndarray
Finite Radon Transform array of (n+1) x n integer coefficients.
See Also
--------
ifrt2 : The two-dimensional inverse FRT.
Notes
-----
The FRT has a unique inverse if and only if n is prime. [FRT]
The idea for this algorithm is due to Vlad Negnevitski.
Examples
--------
Generate a test image:
Use a prime number for the array dimensions
>>> SIZE = 59
>>> img = np.tri(SIZE, dtype=np.int32)
Apply the Finite Radon Transform:
>>> f = frt2(img)
References
----------
.. [FRT] A. Kingston and I. Svalbe, "Projective transforms on periodic
discrete image arrays," in P. Hawkes (Ed), Advances in Imaging
and Electron Physics, 139 (2006)
"""
if a.ndim != 2 or a.shape[0] != a.shape[1]:
raise ValueError("Input must be a square, 2-D array")
ai = a.copy()
n = ai.shape[0]
f = np.empty((n + 1, n), np.uint32)
f[0] = ai.sum(axis=0)
for m in range(1, n):
# Roll the pth row of ai left by p places
for row in range(1, n):
ai[row] = roll(ai[row], -row)
f[m] = ai.sum(axis=0)
f[n] = ai.sum(axis=1)
return f
def ifrt2(a):
"""Compute the 2-dimensional inverse finite radon transform (iFRT) for
an (n+1) x n integer array.
Parameters
----------
a : array_like
A 2-D (n+1) row x n column integer array.
Returns
-------
iFRT : 2-D n x n ndarray
Inverse Finite Radon Transform array of n x n integer coefficients.
See Also
--------
frt2 : The two-dimensional FRT
Notes
-----
The FRT has a unique inverse if and only if n is prime.
See [1]_ for an overview.
The idea for this algorithm is due to Vlad Negnevitski.
Examples
--------
>>> SIZE = 59
>>> img = np.tri(SIZE, dtype=np.int32)
Apply the Finite Radon Transform:
>>> f = frt2(img)
Apply the Inverse Finite Radon Transform to recover the input
>>> fi = ifrt2(f)
Check that it's identical to the original
>>> assert len(np.nonzero(img-fi)[0]) == 0
References
----------
.. [1] A. Kingston and I. Svalbe, "Projective transforms on periodic
discrete image arrays," in P. Hawkes (Ed), Advances in Imaging
and Electron Physics, 139 (2006)
"""
if a.ndim != 2 or a.shape[0] != a.shape[1] + 1:
raise ValueError("Input must be an (n+1) row x n column, 2-D array")
ai = a.copy()[:-1]
n = ai.shape[1]
f = np.empty((n, n), np.uint32)
f[0] = ai.sum(axis=0)
for m in range(1, n):
# Rolls the pth row of ai right by p places.
for row in range(1, ai.shape[0]):
ai[row] = roll(ai[row], row)
f[m] = ai.sum(axis=0)
f += a[-1][newaxis].T
f = (f - ai[0].sum()) / n
return f

View file

@ -0,0 +1,25 @@
from warnings import warn
from ..exposure import match_histograms as mh
def match_histograms(image, reference, multichannel=False):
warn('DEPRECATED: skimage.transform.match_histograms has been moved to '
'skimage.exposure.match_histograms. It will be removed from '
'skimage.transform in version 0.18.', stacklevel=2)
return mh(image, reference, multichannel=False)
if mh.__doc__ is not None:
match_histograms.__doc__ = mh.__doc__ + """
Warns
-----
Deprecated:
.. versionadded:: 0.16
This function is deprecated and will be removed in scikit-image 0.18.
Please use ``match_histograms`` from ``exposure`` module instead.
See also
--------
skimage.exposure.match_histograms
"""

View file

@ -0,0 +1,427 @@
import numpy as np
from scipy.spatial import cKDTree
from ._hough_transform import (_hough_circle,
_hough_ellipse,
_hough_line,
_probabilistic_hough_line as _prob_hough_line)
def hough_line_peaks(hspace, angles, dists, min_distance=9, min_angle=10,
threshold=None, num_peaks=np.inf):
"""Return peaks in a straight line Hough transform.
Identifies most prominent lines separated by a certain angle and distance
in a Hough transform. Non-maximum suppression with different sizes is
applied separately in the first (distances) and second (angles) dimension
of the Hough space to identify peaks.
Parameters
----------
hspace : (N, M) array
Hough space returned by the `hough_line` function.
angles : (M,) array
Angles returned by the `hough_line` function. Assumed to be continuous.
(`angles[-1] - angles[0] == PI`).
dists : (N, ) array
Distances returned by the `hough_line` function.
min_distance : int, optional
Minimum distance separating lines (maximum filter size for first
dimension of hough space).
min_angle : int, optional
Minimum angle separating lines (maximum filter size for second
dimension of hough space).
threshold : float, optional
Minimum intensity of peaks. Default is `0.5 * max(hspace)`.
num_peaks : int, optional
Maximum number of peaks. When the number of peaks exceeds `num_peaks`,
return `num_peaks` coordinates based on peak intensity.
Returns
-------
accum, angles, dists : tuple of array
Peak values in Hough space, angles and distances.
Examples
--------
>>> from skimage.transform import hough_line, hough_line_peaks
>>> from skimage.draw import line
>>> img = np.zeros((15, 15), dtype=np.bool_)
>>> rr, cc = line(0, 0, 14, 14)
>>> img[rr, cc] = 1
>>> rr, cc = line(0, 14, 14, 0)
>>> img[cc, rr] = 1
>>> hspace, angles, dists = hough_line(img)
>>> hspace, angles, dists = hough_line_peaks(hspace, angles, dists)
>>> len(angles)
2
"""
from ..feature.peak import _prominent_peaks
h, a, d = _prominent_peaks(hspace, min_xdistance=min_angle,
min_ydistance=min_distance,
threshold=threshold,
num_peaks=num_peaks)
if a.any():
return (h, angles[a], dists[d])
else:
return (h, np.array([]), np.array([]))
def hough_circle(image, radius, normalize=True, full_output=False):
"""Perform a circular Hough transform.
Parameters
----------
image : (M, N) ndarray
Input image with nonzero values representing edges.
radius : scalar or sequence of scalars
Radii at which to compute the Hough transform.
Floats are converted to integers.
normalize : boolean, optional (default True)
Normalize the accumulator with the number
of pixels used to draw the radius.
full_output : boolean, optional (default False)
Extend the output size by twice the largest
radius in order to detect centers outside the
input picture.
Returns
-------
H : 3D ndarray (radius index, (M + 2R, N + 2R) ndarray)
Hough transform accumulator for each radius.
R designates the larger radius if full_output is True.
Otherwise, R = 0.
Examples
--------
>>> from skimage.transform import hough_circle
>>> from skimage.draw import circle_perimeter
>>> img = np.zeros((100, 100), dtype=np.bool_)
>>> rr, cc = circle_perimeter(25, 35, 23)
>>> img[rr, cc] = 1
>>> try_radii = np.arange(5, 50)
>>> res = hough_circle(img, try_radii)
>>> ridx, r, c = np.unravel_index(np.argmax(res), res.shape)
>>> r, c, try_radii[ridx]
(25, 35, 23)
"""
radius = np.atleast_1d(np.asarray(radius))
return _hough_circle(image, radius.astype(np.intp),
normalize=normalize, full_output=full_output)
def hough_ellipse(image, threshold=4, accuracy=1, min_size=4, max_size=None):
"""Perform an elliptical Hough transform.
Parameters
----------
image : (M, N) ndarray
Input image with nonzero values representing edges.
threshold : int, optional
Accumulator threshold value.
accuracy : double, optional
Bin size on the minor axis used in the accumulator.
min_size : int, optional
Minimal major axis length.
max_size : int, optional
Maximal minor axis length.
If None, the value is set to the half of the smaller
image dimension.
Returns
-------
result : ndarray with fields [(accumulator, yc, xc, a, b, orientation)].
Where ``(yc, xc)`` is the center, ``(a, b)`` the major and minor
axes, respectively. The `orientation` value follows
`skimage.draw.ellipse_perimeter` convention.
Examples
--------
>>> from skimage.transform import hough_ellipse
>>> from skimage.draw import ellipse_perimeter
>>> img = np.zeros((25, 25), dtype=np.uint8)
>>> rr, cc = ellipse_perimeter(10, 10, 6, 8)
>>> img[cc, rr] = 1
>>> result = hough_ellipse(img, threshold=8)
>>> result.tolist()
[(10, 10.0, 10.0, 8.0, 6.0, 0.0)]
Notes
-----
The accuracy must be chosen to produce a peak in the accumulator
distribution. In other words, a flat accumulator distribution with low
values may be caused by a too low bin size.
References
----------
.. [1] Xie, Yonghong, and Qiang Ji. "A new efficient ellipse detection
method." Pattern Recognition, 2002. Proceedings. 16th International
Conference on. Vol. 2. IEEE, 2002
"""
return _hough_ellipse(image, threshold=threshold, accuracy=accuracy,
min_size=min_size, max_size=max_size)
def hough_line(image, theta=None):
"""Perform a straight line Hough transform.
Parameters
----------
image : (M, N) ndarray
Input image with nonzero values representing edges.
theta : 1D ndarray of double, optional
Angles at which to compute the transform, in radians.
Defaults to a vector of 180 angles evenly spaced from -pi/2 to pi/2.
Returns
-------
hspace : 2-D ndarray of uint64
Hough transform accumulator.
angles : ndarray
Angles at which the transform is computed, in radians.
distances : ndarray
Distance values.
Notes
-----
The origin is the top left corner of the original image.
X and Y axis are horizontal and vertical edges respectively.
The distance is the minimal algebraic distance from the origin
to the detected line.
The angle accuracy can be improved by decreasing the step size in
the `theta` array.
Examples
--------
Generate a test image:
>>> img = np.zeros((100, 150), dtype=bool)
>>> img[30, :] = 1
>>> img[:, 65] = 1
>>> img[35:45, 35:50] = 1
>>> for i in range(90):
... img[i, i] = 1
>>> img += np.random.random(img.shape) > 0.95
Apply the Hough transform:
>>> out, angles, d = hough_line(img)
.. plot:: hough_tf.py
"""
if image.ndim != 2:
raise ValueError('The input image `image` must be 2D.')
if theta is None:
# These values are approximations of pi/2
theta = np.linspace(-np.pi / 2, np.pi / 2, 180)
return _hough_line(image, theta=theta)
def probabilistic_hough_line(image, threshold=10, line_length=50, line_gap=10,
theta=None, seed=None):
"""Return lines from a progressive probabilistic line Hough transform.
Parameters
----------
image : (M, N) ndarray
Input image with nonzero values representing edges.
threshold : int, optional
Threshold
line_length : int, optional
Minimum accepted length of detected lines.
Increase the parameter to extract longer lines.
line_gap : int, optional
Maximum gap between pixels to still form a line.
Increase the parameter to merge broken lines more aggressively.
theta : 1D ndarray, dtype=double, optional
Angles at which to compute the transform, in radians.
If None, use a range from -pi/2 to pi/2.
seed : int, optional
Seed to initialize the random number generator.
Returns
-------
lines : list
List of lines identified, lines in format ((x0, y0), (x1, y1)),
indicating line start and end.
References
----------
.. [1] C. Galamhos, J. Matas and J. Kittler, "Progressive probabilistic
Hough transform for line detection", in IEEE Computer Society
Conference on Computer Vision and Pattern Recognition, 1999.
"""
if image.ndim != 2:
raise ValueError('The input image `image` must be 2D.')
if theta is None:
theta = np.pi / 2 - np.arange(180) / 180.0 * np.pi
return _prob_hough_line(image, threshold=threshold, line_length=line_length,
line_gap=line_gap, theta=theta, seed=seed)
def hough_circle_peaks(hspaces, radii, min_xdistance=1, min_ydistance=1,
threshold=None, num_peaks=np.inf,
total_num_peaks=np.inf, normalize=False):
"""Return peaks in a circle Hough transform.
Identifies most prominent circles separated by certain distances in given
Hough spaces. Non-maximum suppression with different sizes is applied
separately in the first and second dimension of the Hough space to
identify peaks. For circles with different radius but close in distance,
only the one with highest peak is kept.
Parameters
----------
hspaces : (N, M) array
Hough spaces returned by the `hough_circle` function.
radii : (M,) array
Radii corresponding to Hough spaces.
min_xdistance : int, optional
Minimum distance separating centers in the x dimension.
min_ydistance : int, optional
Minimum distance separating centers in the y dimension.
threshold : float, optional
Minimum intensity of peaks in each Hough space.
Default is `0.5 * max(hspace)`.
num_peaks : int, optional
Maximum number of peaks in each Hough space. When the
number of peaks exceeds `num_peaks`, only `num_peaks`
coordinates based on peak intensity are considered for the
corresponding radius.
total_num_peaks : int, optional
Maximum number of peaks. When the number of peaks exceeds `num_peaks`,
return `num_peaks` coordinates based on peak intensity.
normalize : bool, optional
If True, normalize the accumulator by the radius to sort the prominent
peaks.
Returns
-------
accum, cx, cy, rad : tuple of array
Peak values in Hough space, x and y center coordinates and radii.
Examples
--------
>>> from skimage import transform, draw
>>> img = np.zeros((120, 100), dtype=int)
>>> radius, x_0, y_0 = (20, 99, 50)
>>> y, x = draw.circle_perimeter(y_0, x_0, radius)
>>> img[x, y] = 1
>>> hspaces = transform.hough_circle(img, radius)
>>> accum, cx, cy, rad = hough_circle_peaks(hspaces, [radius,])
Notes
-----
Circles with bigger radius have higher peaks in Hough space. If larger
circles are preferred over smaller ones, `normalize` should be False.
Otherwise, circles will be returned in the order of decreasing voting
number.
"""
from ..feature.peak import _prominent_peaks
r = []
cx = []
cy = []
accum = []
for rad, hp in zip(radii, hspaces):
h_p, x_p, y_p = _prominent_peaks(hp,
min_xdistance=min_xdistance,
min_ydistance=min_ydistance,
threshold=threshold,
num_peaks=num_peaks)
r.extend((rad,)*len(h_p))
cx.extend(x_p)
cy.extend(y_p)
accum.extend(h_p)
r = np.array(r)
cx = np.array(cx)
cy = np.array(cy)
accum = np.array(accum)
if normalize:
s = np.argsort(accum / r)
else:
s = np.argsort(accum)
accum_sorted, cx_sorted, cy_sorted, r_sorted = \
accum[s][::-1], cx[s][::-1], cy[s][::-1], r[s][::-1]
tnp = len(accum_sorted) if total_num_peaks == np.inf else total_num_peaks
# Skip searching for neighboring circles
# if default min_xdistance and min_ydistance are used
# or if no peak was detected
if (min_xdistance == 1 and min_ydistance == 1) or len(accum_sorted) == 0:
return (accum_sorted[:tnp],
cx_sorted[:tnp],
cy_sorted[:tnp],
r_sorted[:tnp])
# For circles with centers too close, only keep the one with
# the highest peak
should_keep = label_distant_points(
cx_sorted, cy_sorted, min_xdistance, min_ydistance, tnp
)
return (accum_sorted[should_keep],
cx_sorted[should_keep],
cy_sorted[should_keep],
r_sorted[should_keep])
def label_distant_points(xs, ys, min_xdistance, min_ydistance, max_points):
"""Keep points that are separated by certain distance in each dimension.
The first point is always accpeted and all subsequent points are selected
so that they are distant from all their preceding ones.
Parameters
----------
xs : array
X coordinates of points.
ys : array
Y coordinates of points.
min_xdistance : int
Minimum distance separating points in the x dimension.
min_ydistance : int
Minimum distance separating points in the y dimension.
max_points : int
Max number of distant points to keep.
Returns
-------
should_keep : array of bool
A mask array for distant points to keep.
"""
is_neighbor = np.zeros(len(xs), dtype=bool)
coordinates = np.stack([xs, ys], axis=1)
# Use a KDTree to search for neighboring points effectively
kd_tree = cKDTree(coordinates)
n_pts = 0
for i in range(len(xs)):
if n_pts >= max_points:
# Ignore the point if points to keep reaches maximum
is_neighbor[i] = True
elif not is_neighbor[i]:
# Find a short list of candidates to remove
# by searching within a circle
neighbors_i = kd_tree.query_ball_point(
(xs[i], ys[i]),
np.hypot(min_xdistance, min_ydistance)
)
# Check distance in both dimensions and mark if close
for ni in neighbors_i:
x_close = abs(xs[ni] - xs[i]) <= min_xdistance
y_close = abs(ys[ni] - ys[i]) <= min_ydistance
if x_close and y_close and ni > i:
is_neighbor[ni] = True
n_pts += 1
should_keep = ~is_neighbor
return should_keep

View file

@ -0,0 +1,131 @@
import numpy as np
import collections
from .._shared.utils import warn
def integral_image(image):
r"""Integral image / summed area table.
The integral image contains the sum of all elements above and to the
left of it, i.e.:
.. math::
S[m, n] = \sum_{i \leq m} \sum_{j \leq n} X[i, j]
Parameters
----------
image : ndarray
Input image.
Returns
-------
S : ndarray
Integral image/summed area table of same shape as input image.
References
----------
.. [1] F.C. Crow, "Summed-area tables for texture mapping,"
ACM SIGGRAPH Computer Graphics, vol. 18, 1984, pp. 207-212.
"""
S = image
for i in range(image.ndim):
S = S.cumsum(axis=i)
return S
def integrate(ii, start, end):
"""Use an integral image to integrate over a given window.
Parameters
----------
ii : ndarray
Integral image.
start : List of tuples, each tuple of length equal to dimension of `ii`
Coordinates of top left corner of window(s).
Each tuple in the list contains the starting row, col, ... index
i.e `[(row_win1, col_win1, ...), (row_win2, col_win2,...), ...]`.
end : List of tuples, each tuple of length equal to dimension of `ii`
Coordinates of bottom right corner of window(s).
Each tuple in the list containing the end row, col, ... index i.e
`[(row_win1, col_win1, ...), (row_win2, col_win2, ...), ...]`.
Returns
-------
S : scalar or ndarray
Integral (sum) over the given window(s).
Examples
--------
>>> arr = np.ones((5, 6), dtype=np.float)
>>> ii = integral_image(arr)
>>> integrate(ii, (1, 0), (1, 2)) # sum from (1, 0) to (1, 2)
array([3.])
>>> integrate(ii, [(3, 3)], [(4, 5)]) # sum from (3, 3) to (4, 5)
array([6.])
>>> # sum from (1, 0) to (1, 2) and from (3, 3) to (4, 5)
>>> integrate(ii, [(1, 0), (3, 3)], [(1, 2), (4, 5)])
array([3., 6.])
"""
start = np.atleast_2d(np.array(start))
end = np.atleast_2d(np.array(end))
rows = start.shape[0]
total_shape = ii.shape
total_shape = np.tile(total_shape, [rows, 1])
# convert negative indices into equivalent positive indices
start_negatives = start < 0
end_negatives = end < 0
start = (start + total_shape) * start_negatives + \
start * ~(start_negatives)
end = (end + total_shape) * end_negatives + \
end * ~(end_negatives)
if np.any((end - start) < 0):
raise IndexError('end coordinates must be greater or equal to start')
# bit_perm is the total number of terms in the expression
# of S. For example, in the case of a 4x4 2D image
# sum of image from (1,1) to (2,2) is given by
# S = + ii[2, 2]
# - ii[0, 2] - ii[2, 0]
# + ii[0, 0]
# The total terms = 4 = 2 ** 2(dims)
S = np.zeros(rows)
bit_perm = 2 ** ii.ndim
width = len(bin(bit_perm - 1)[2:])
# Sum of a (hyper)cube, from an integral image is computed using
# values at the corners of the cube. The corners of cube are
# selected using binary numbers as described in the following example.
# In a 3D cube there are 8 corners. The corners are selected using
# binary numbers 000 to 111. Each number is called a permutation, where
# perm(000) means, select end corner where none of the coordinates
# is replaced, i.e ii[end_row, end_col, end_depth]. Similarly, perm(001)
# means replace last coordinate by start - 1, i.e
# ii[end_row, end_col, start_depth - 1], and so on.
# Sign of even permutations is positive, while those of odd is negative.
# If 'start_coord - 1' is -ve it is labeled bad and not considered in
# the final sum.
for i in range(bit_perm): # for all permutations
# boolean permutation array eg [True, False] for '10'
binary = bin(i)[2:].zfill(width)
bool_mask = [bit == '1' for bit in binary]
sign = (-1)**sum(bool_mask) # determine sign of permutation
bad = [np.any(((start[r] - 1) * bool_mask) < 0)
for r in range(rows)] # find out bad start rows
corner_points = (end * (np.invert(bool_mask))) + \
((start - 1) * bool_mask) # find corner for each row
S += [sign * ii[tuple(corner_points[r])] if(not bad[r]) else 0
for r in range(rows)] # add only good rows
return S

View file

@ -0,0 +1,317 @@
import math
import numpy as np
from scipy import ndimage as ndi
from ..transform import resize
from .._shared.utils import convert_to_float
def _smooth(image, sigma, mode, cval, multichannel=None):
"""Return image with each channel smoothed by the Gaussian filter."""
smoothed = np.empty_like(image)
# apply Gaussian filter to all channels independently
if multichannel:
sigma = (sigma, ) * (image.ndim - 1) + (0, )
ndi.gaussian_filter(image, sigma, output=smoothed,
mode=mode, cval=cval)
return smoothed
def _check_factor(factor):
if factor <= 1:
raise ValueError('scale factor must be greater than 1')
def pyramid_reduce(image, downscale=2, sigma=None, order=1,
mode='reflect', cval=0, multichannel=False,
preserve_range=False):
"""Smooth and then downsample image.
Parameters
----------
image : ndarray
Input image.
downscale : float, optional
Downscale factor.
sigma : float, optional
Sigma for Gaussian filter. Default is `2 * downscale / 6.0` which
corresponds to a filter mask twice the size of the scale factor that
covers more than 99% of the Gaussian distribution.
order : int, optional
Order of splines used in interpolation of downsampling. See
`skimage.transform.warp` for detail.
mode : {'reflect', 'constant', 'edge', 'symmetric', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
cval : float, optional
Value to fill past edges of input if mode is 'constant'.
multichannel : bool, optional
Whether the last axis of the image is to be interpreted as multiple
channels or another spatial dimension.
preserve_range : bool, optional
Whether to keep the original range of values. Otherwise, the input
image is converted according to the conventions of `img_as_float`.
Also see https://scikit-image.org/docs/dev/user_guide/data_types.html
Returns
-------
out : array
Smoothed and downsampled float image.
References
----------
.. [1] http://persci.mit.edu/pub_pdfs/pyramid83.pdf
"""
_check_factor(downscale)
image = convert_to_float(image, preserve_range)
out_shape = tuple([math.ceil(d / float(downscale)) for d in image.shape])
if multichannel:
out_shape = out_shape[:-1]
if sigma is None:
# automatically determine sigma which covers > 99% of distribution
sigma = 2 * downscale / 6.0
smoothed = _smooth(image, sigma, mode, cval, multichannel)
out = resize(smoothed, out_shape, order=order, mode=mode, cval=cval,
anti_aliasing=False)
return out
def pyramid_expand(image, upscale=2, sigma=None, order=1,
mode='reflect', cval=0, multichannel=False,
preserve_range=False):
"""Upsample and then smooth image.
Parameters
----------
image : ndarray
Input image.
upscale : float, optional
Upscale factor.
sigma : float, optional
Sigma for Gaussian filter. Default is `2 * upscale / 6.0` which
corresponds to a filter mask twice the size of the scale factor that
covers more than 99% of the Gaussian distribution.
order : int, optional
Order of splines used in interpolation of upsampling. See
`skimage.transform.warp` for detail.
mode : {'reflect', 'constant', 'edge', 'symmetric', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
cval : float, optional
Value to fill past edges of input if mode is 'constant'.
multichannel : bool, optional
Whether the last axis of the image is to be interpreted as multiple
channels or another spatial dimension.
preserve_range : bool, optional
Whether to keep the original range of values. Otherwise, the input
image is converted according to the conventions of `img_as_float`.
Also see https://scikit-image.org/docs/dev/user_guide/data_types.html
Returns
-------
out : array
Upsampled and smoothed float image.
References
----------
.. [1] http://persci.mit.edu/pub_pdfs/pyramid83.pdf
"""
_check_factor(upscale)
image = convert_to_float(image, preserve_range)
out_shape = tuple([math.ceil(upscale * d) for d in image.shape])
if multichannel:
out_shape = out_shape[:-1]
if sigma is None:
# automatically determine sigma which covers > 99% of distribution
sigma = 2 * upscale / 6.0
resized = resize(image, out_shape, order=order,
mode=mode, cval=cval, anti_aliasing=False)
out = _smooth(resized, sigma, mode, cval, multichannel)
return out
def pyramid_gaussian(image, max_layer=-1, downscale=2, sigma=None, order=1,
mode='reflect', cval=0, multichannel=False,
preserve_range=False):
"""Yield images of the Gaussian pyramid formed by the input image.
Recursively applies the `pyramid_reduce` function to the image, and yields
the downscaled images.
Note that the first image of the pyramid will be the original, unscaled
image. The total number of images is `max_layer + 1`. In case all layers
are computed, the last image is either a one-pixel image or the image where
the reduction does not change its shape.
Parameters
----------
image : ndarray
Input image.
max_layer : int, optional
Number of layers for the pyramid. 0th layer is the original image.
Default is -1 which builds all possible layers.
downscale : float, optional
Downscale factor.
sigma : float, optional
Sigma for Gaussian filter. Default is `2 * downscale / 6.0` which
corresponds to a filter mask twice the size of the scale factor that
covers more than 99% of the Gaussian distribution.
order : int, optional
Order of splines used in interpolation of downsampling. See
`skimage.transform.warp` for detail.
mode : {'reflect', 'constant', 'edge', 'symmetric', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
cval : float, optional
Value to fill past edges of input if mode is 'constant'.
multichannel : bool, optional
Whether the last axis of the image is to be interpreted as multiple
channels or another spatial dimension.
preserve_range : bool, optional
Whether to keep the original range of values. Otherwise, the input
image is converted according to the conventions of `img_as_float`.
Also see https://scikit-image.org/docs/dev/user_guide/data_types.html
Returns
-------
pyramid : generator
Generator yielding pyramid layers as float images.
References
----------
.. [1] http://persci.mit.edu/pub_pdfs/pyramid83.pdf
"""
_check_factor(downscale)
# cast to float for consistent data type in pyramid
image = convert_to_float(image, preserve_range)
layer = 0
current_shape = image.shape
prev_layer_image = image
yield image
# build downsampled images until max_layer is reached or downscale process
# does not change image size
while layer != max_layer:
layer += 1
layer_image = pyramid_reduce(prev_layer_image, downscale, sigma, order,
mode, cval, multichannel=multichannel)
prev_shape = np.asarray(current_shape)
prev_layer_image = layer_image
current_shape = np.asarray(layer_image.shape)
# no change to previous pyramid layer
if np.all(current_shape == prev_shape):
break
yield layer_image
def pyramid_laplacian(image, max_layer=-1, downscale=2, sigma=None, order=1,
mode='reflect', cval=0, multichannel=False,
preserve_range=False):
"""Yield images of the laplacian pyramid formed by the input image.
Each layer contains the difference between the downsampled and the
downsampled, smoothed image::
layer = resize(prev_layer) - smooth(resize(prev_layer))
Note that the first image of the pyramid will be the difference between the
original, unscaled image and its smoothed version. The total number of
images is `max_layer + 1`. In case all layers are computed, the last image
is either a one-pixel image or the image where the reduction does not
change its shape.
Parameters
----------
image : ndarray
Input image.
max_layer : int, optional
Number of layers for the pyramid. 0th layer is the original image.
Default is -1 which builds all possible layers.
downscale : float, optional
Downscale factor.
sigma : float, optional
Sigma for Gaussian filter. Default is `2 * downscale / 6.0` which
corresponds to a filter mask twice the size of the scale factor that
covers more than 99% of the Gaussian distribution.
order : int, optional
Order of splines used in interpolation of downsampling. See
`skimage.transform.warp` for detail.
mode : {'reflect', 'constant', 'edge', 'symmetric', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
cval : float, optional
Value to fill past edges of input if mode is 'constant'.
multichannel : bool, optional
Whether the last axis of the image is to be interpreted as multiple
channels or another spatial dimension.
preserve_range : bool, optional
Whether to keep the original range of values. Otherwise, the input
image is converted according to the conventions of `img_as_float`.
Also see https://scikit-image.org/docs/dev/user_guide/data_types.html
Returns
-------
pyramid : generator
Generator yielding pyramid layers as float images.
References
----------
.. [1] http://persci.mit.edu/pub_pdfs/pyramid83.pdf
.. [2] http://sepwww.stanford.edu/data/media/public/sep/morgan/texturematch/paper_html/node3.html
"""
_check_factor(downscale)
# cast to float for consistent data type in pyramid
image = convert_to_float(image, preserve_range)
if sigma is None:
# automatically determine sigma which covers > 99% of distribution
sigma = 2 * downscale / 6.0
current_shape = image.shape
smoothed_image = _smooth(image, sigma, mode, cval, multichannel)
yield image - smoothed_image
# build downsampled images until max_layer is reached or downscale process
# does not change image size
if max_layer == -1:
max_layer = int(np.ceil(math.log(np.max(current_shape), downscale)))
for layer in range(max_layer):
out_shape = tuple(
[math.ceil(d / float(downscale)) for d in current_shape])
if multichannel:
out_shape = out_shape[:-1]
resized_image = resize(smoothed_image, out_shape, order=order,
mode=mode, cval=cval, anti_aliasing=False)
smoothed_image = _smooth(resized_image, sigma, mode, cval,
multichannel)
current_shape = np.asarray(resized_image.shape)
yield resized_image - smoothed_image

View file

@ -0,0 +1,514 @@
import numpy as np
from scipy.interpolate import interp1d
from scipy.constants import golden_ratio
from ._warps import warp
from ._radon_transform import sart_projection_update
from .._shared.fft import fftmodule
from .._shared.utils import deprecate_kwarg, convert_to_float
from warnings import warn
from functools import partial
if fftmodule is np.fft:
# fallback from scipy.fft to scipy.fftpack instead of numpy.fft
# (fftpack preserves single precision while numpy.fft does not)
from scipy.fftpack import fft, ifft
else:
fft = fftmodule.fft
ifft = fftmodule.ifft
__all__ = ['radon', 'order_angles_golden_ratio', 'iradon', 'iradon_sart']
def radon(image, theta=None, circle=True, *, preserve_range=None):
"""
Calculates the radon transform of an image given specified
projection angles.
Parameters
----------
image : array_like
Input image. The rotation axis will be located in the pixel with
indices ``(image.shape[0] // 2, image.shape[1] // 2)``.
theta : array_like, optional
Projection angles (in degrees). If `None`, the value is set to
np.arange(180).
circle : boolean, optional
Assume image is zero outside the inscribed circle, making the
width of each projection (the first dimension of the sinogram)
equal to ``min(image.shape)``.
preserve_range : bool, optional
Whether to keep the original range of values. Otherwise, the input
image is converted according to the conventions of `img_as_float`.
Also see https://scikit-image.org/docs/dev/user_guide/data_types.html
Returns
-------
radon_image : ndarray
Radon transform (sinogram). The tomography rotation axis will lie
at the pixel index ``radon_image.shape[0] // 2`` along the 0th
dimension of ``radon_image``.
References
----------
.. [1] AC Kak, M Slaney, "Principles of Computerized Tomographic
Imaging", IEEE Press 1988.
.. [2] B.R. Ramesh, N. Srinivasa, K. Rajgopal, "An Algorithm for Computing
the Discrete Radon Transform With Some Applications", Proceedings of
the Fourth IEEE Region 10 International Conference, TENCON '89, 1989
Notes
-----
Based on code of Justin K. Romberg
(https://www.clear.rice.edu/elec431/projects96/DSP/bpanalysis.html)
"""
if image.ndim != 2:
raise ValueError('The input image must be 2-D')
if theta is None:
theta = np.arange(180)
if preserve_range is None and np.issubdtype(image.dtype, np.integer):
warn('Image dtype is not float. By default radon will assume '
'you want to preserve the range of your image '
'(preserve_range=True). In scikit-image 0.18 this behavior will '
'change to preserve_range=False. To avoid this warning, '
'explicitly specify the preserve_range parameter.',
stacklevel=2)
preserve_range = True
image = convert_to_float(image, preserve_range)
if circle:
shape_min = min(image.shape)
radius = shape_min // 2
img_shape = np.array(image.shape)
coords = np.array(np.ogrid[:image.shape[0], :image.shape[1]])
dist = ((coords - img_shape // 2) ** 2).sum(0)
outside_reconstruction_circle = dist > radius ** 2
if np.any(image[outside_reconstruction_circle]):
warn('Radon transform: image must be zero outside the '
'reconstruction circle')
# Crop image to make it square
slices = tuple(slice(int(np.ceil(excess / 2)),
int(np.ceil(excess / 2) + shape_min))
if excess > 0 else slice(None)
for excess in (img_shape - shape_min))
padded_image = image[slices]
else:
diagonal = np.sqrt(2) * max(image.shape)
pad = [int(np.ceil(diagonal - s)) for s in image.shape]
new_center = [(s + p) // 2 for s, p in zip(image.shape, pad)]
old_center = [s // 2 for s in image.shape]
pad_before = [nc - oc for oc, nc in zip(old_center, new_center)]
pad_width = [(pb, p - pb) for pb, p in zip(pad_before, pad)]
padded_image = np.pad(image, pad_width, mode='constant',
constant_values=0)
# padded_image is always square
if padded_image.shape[0] != padded_image.shape[1]:
raise ValueError('padded_image must be a square')
center = padded_image.shape[0] // 2
radon_image = np.zeros((padded_image.shape[0], len(theta)),
dtype=image.dtype)
for i, angle in enumerate(np.deg2rad(theta)):
cos_a, sin_a = np.cos(angle), np.sin(angle)
R = np.array([[cos_a, sin_a, -center * (cos_a + sin_a - 1)],
[-sin_a, cos_a, -center * (cos_a - sin_a - 1)],
[0, 0, 1]])
rotated = warp(padded_image, R, clip=False)
radon_image[:, i] = rotated.sum(0)
return radon_image
def _sinogram_circle_to_square(sinogram):
diagonal = int(np.ceil(np.sqrt(2) * sinogram.shape[0]))
pad = diagonal - sinogram.shape[0]
old_center = sinogram.shape[0] // 2
new_center = diagonal // 2
pad_before = new_center - old_center
pad_width = ((pad_before, pad - pad_before), (0, 0))
return np.pad(sinogram, pad_width, mode='constant', constant_values=0)
def _get_fourier_filter(size, filter_name):
"""Construct the Fourier filter.
This computation lessens artifacts and removes a small bias as
explained in [1], Chap 3. Equation 61.
Parameters
----------
size: int
filter size. Must be even.
filter_name: str
Filter used in frequency domain filtering. Filters available:
ramp, shepp-logan, cosine, hamming, hann. Assign None to use
no filter.
Returns
-------
fourier_filter: ndarray
The computed Fourier filter.
References
----------
.. [1] AC Kak, M Slaney, "Principles of Computerized Tomographic
Imaging", IEEE Press 1988.
"""
n = np.concatenate((np.arange(1, size / 2 + 1, 2, dtype=np.int),
np.arange(size / 2 - 1, 0, -2, dtype=np.int)))
f = np.zeros(size)
f[0] = 0.25
f[1::2] = -1 / (np.pi * n) ** 2
# Computing the ramp filter from the fourier transform of its
# frequency domain representation lessens artifacts and removes a
# small bias as explained in [1], Chap 3. Equation 61
fourier_filter = 2 * np.real(fft(f)) # ramp filter
if filter_name == "ramp":
pass
elif filter_name == "shepp-logan":
# Start from first element to avoid divide by zero
omega = np.pi * fftmodule.fftfreq(size)[1:]
fourier_filter[1:] *= np.sin(omega) / omega
elif filter_name == "cosine":
freq = np.linspace(0, np.pi, size, endpoint=False)
cosine_filter = fftmodule.fftshift(np.sin(freq))
fourier_filter *= cosine_filter
elif filter_name == "hamming":
fourier_filter *= fftmodule.fftshift(np.hamming(size))
elif filter_name == "hann":
fourier_filter *= fftmodule.fftshift(np.hanning(size))
elif filter_name is None:
fourier_filter[:] = 1
return fourier_filter[:, np.newaxis]
@deprecate_kwarg(kwarg_mapping={'filter': 'filter_name'},
removed_version="0.19")
def iradon(radon_image, theta=None, output_size=None,
filter_name="ramp", interpolation="linear", circle=True):
"""Inverse radon transform.
Reconstruct an image from the radon transform, using the filtered
back projection algorithm.
Parameters
----------
radon_image : array_like, dtype=float
Image containing radon transform (sinogram). Each column of
the image corresponds to a projection along a different
angle. The tomography rotation axis should lie at the pixel
index ``radon_image.shape[0] // 2`` along the 0th dimension of
``radon_image``.
theta : array_like, dtype=float, optional
Reconstruction angles (in degrees). Default: m angles evenly spaced
between 0 and 180 (if the shape of `radon_image` is (N, M)).
output_size : int, optional
Number of rows and columns in the reconstruction.
filter_name : str, optional
Filter used in frequency domain filtering. Ramp filter used by default.
Filters available: ramp, shepp-logan, cosine, hamming, hann.
Assign None to use no filter.
interpolation : str, optional
Interpolation method used in reconstruction. Methods available:
'linear', 'nearest', and 'cubic' ('cubic' is slow).
circle : boolean, optional
Assume the reconstructed image is zero outside the inscribed circle.
Also changes the default output_size to match the behaviour of
``radon`` called with ``circle=True``.
Returns
-------
reconstructed : ndarray
Reconstructed image. The rotation axis will be located in the pixel
with indices
``(reconstructed.shape[0] // 2, reconstructed.shape[1] // 2)``.
.. versionchanged :: 0.19
In ``iradon``, ``filter`` argument is deprecated in favor of
``filter_name``.
References
----------
.. [1] AC Kak, M Slaney, "Principles of Computerized Tomographic
Imaging", IEEE Press 1988.
.. [2] B.R. Ramesh, N. Srinivasa, K. Rajgopal, "An Algorithm for Computing
the Discrete Radon Transform With Some Applications", Proceedings of
the Fourth IEEE Region 10 International Conference, TENCON '89, 1989
Notes
-----
It applies the Fourier slice theorem to reconstruct an image by
multiplying the frequency domain of the filter with the FFT of the
projection data. This algorithm is called filtered back projection.
"""
if radon_image.ndim != 2:
raise ValueError('The input image must be 2-D')
if theta is None:
theta = np.linspace(0, 180, radon_image.shape[1], endpoint=False)
angles_count = len(theta)
if angles_count != radon_image.shape[1]:
raise ValueError("The given ``theta`` does not match the number of "
"projections in ``radon_image``.")
interpolation_types = ('linear', 'nearest', 'cubic')
if interpolation not in interpolation_types:
raise ValueError("Unknown interpolation: %s" % interpolation)
filter_types = ('ramp', 'shepp-logan', 'cosine', 'hamming', 'hann', None)
if filter_name not in filter_types:
raise ValueError("Unknown filter: %s" % filter_name)
img_shape = radon_image.shape[0]
if output_size is None:
# If output size not specified, estimate from input radon image
if circle:
output_size = img_shape
else:
output_size = int(np.floor(np.sqrt((img_shape) ** 2 / 2.0)))
if circle:
radon_image = _sinogram_circle_to_square(radon_image)
img_shape = radon_image.shape[0]
# Resize image to next power of two (but no less than 64) for
# Fourier analysis; speeds up Fourier and lessens artifacts
projection_size_padded = max(64, int(2 ** np.ceil(np.log2(2 * img_shape))))
pad_width = ((0, projection_size_padded - img_shape), (0, 0))
img = np.pad(radon_image, pad_width, mode='constant', constant_values=0)
# Apply filter in Fourier domain
fourier_filter = _get_fourier_filter(projection_size_padded, filter_name)
projection = fft(img, axis=0) * fourier_filter
radon_filtered = np.real(ifft(projection, axis=0)[:img_shape, :])
# Reconstruct image by interpolation
reconstructed = np.zeros((output_size, output_size))
radius = output_size // 2
xpr, ypr = np.mgrid[:output_size, :output_size] - radius
x = np.arange(img_shape) - img_shape // 2
for col, angle in zip(radon_filtered.T, np.deg2rad(theta)):
t = ypr * np.cos(angle) - xpr * np.sin(angle)
if interpolation == 'linear':
interpolant = partial(np.interp, xp=x, fp=col, left=0, right=0)
else:
interpolant = interp1d(x, col, kind=interpolation,
bounds_error=False, fill_value=0)
reconstructed += interpolant(t)
if circle:
out_reconstruction_circle = (xpr ** 2 + ypr ** 2) > radius ** 2
reconstructed[out_reconstruction_circle] = 0.
return reconstructed * np.pi / (2 * angles_count)
def order_angles_golden_ratio(theta):
"""Order angles to reduce the amount of correlated information in
subsequent projections.
Parameters
----------
theta : 1D array of floats
Projection angles in degrees. Duplicate angles are not allowed.
Returns
-------
indices_generator : generator yielding unsigned integers
The returned generator yields indices into ``theta`` such that
``theta[indices]`` gives the approximate golden ratio ordering
of the projections. In total, ``len(theta)`` indices are yielded.
All non-negative integers < ``len(theta)`` are yielded exactly once.
Notes
-----
The method used here is that of the golden ratio introduced
by T. Kohler.
References
----------
.. [1] Kohler, T. "A projection access scheme for iterative
reconstruction based on the golden section." Nuclear Science
Symposium Conference Record, 2004 IEEE. Vol. 6. IEEE, 2004.
.. [2] Winkelmann, Stefanie, et al. "An optimal radial profile order
based on the Golden Ratio for time-resolved MRI."
Medical Imaging, IEEE Transactions on 26.1 (2007): 68-76.
"""
interval = 180
remaining_indices = list(np.argsort(theta)) # indices into theta
# yield an arbitrary angle to start things off
angle = theta[remaining_indices[0]]
yield remaining_indices.pop(0)
# determine subsequent angles using the golden ratio method
angle_increment = interval / golden_ratio ** 2
while remaining_indices:
remaining_angles = theta[remaining_indices]
angle = (angle + angle_increment) % interval
index_above = np.searchsorted(remaining_angles, angle)
index_below = index_above - 1
index_above %= len(remaining_indices)
diff_below = abs(angle - remaining_angles[index_below])
distance_below = min(diff_below % interval, diff_below % -interval)
diff_above = abs(angle - remaining_angles[index_above])
distance_above = min(diff_above % interval, diff_above % -interval)
if distance_below < distance_above:
yield remaining_indices.pop(index_below)
else:
yield remaining_indices.pop(index_above)
def iradon_sart(radon_image, theta=None, image=None, projection_shifts=None,
clip=None, relaxation=0.15, dtype=None):
"""Inverse radon transform.
Reconstruct an image from the radon transform, using a single iteration of
the Simultaneous Algebraic Reconstruction Technique (SART) algorithm.
Parameters
----------
radon_image : 2D array
Image containing radon transform (sinogram). Each column of
the image corresponds to a projection along a different angle. The
tomography rotation axis should lie at the pixel index
``radon_image.shape[0] // 2`` along the 0th dimension of
``radon_image``.
theta : 1D array, optional
Reconstruction angles (in degrees). Default: m angles evenly spaced
between 0 and 180 (if the shape of `radon_image` is (N, M)).
image : 2D array, optional
Image containing an initial reconstruction estimate. Shape of this
array should be ``(radon_image.shape[0], radon_image.shape[0])``. The
default is an array of zeros.
projection_shifts : 1D array, optional
Shift the projections contained in ``radon_image`` (the sinogram) by
this many pixels before reconstructing the image. The i'th value
defines the shift of the i'th column of ``radon_image``.
clip : length-2 sequence of floats, optional
Force all values in the reconstructed tomogram to lie in the range
``[clip[0], clip[1]]``
relaxation : float, optional
Relaxation parameter for the update step. A higher value can
improve the convergence rate, but one runs the risk of instabilities.
Values close to or higher than 1 are not recommended.
dtype : dtype, optional
Output data type, must be floating point. By default, if input
data type is not float, input is cast to double, otherwise
dtype is set to input data type.
Returns
-------
reconstructed : ndarray
Reconstructed image. The rotation axis will be located in the pixel
with indices
``(reconstructed.shape[0] // 2, reconstructed.shape[1] // 2)``.
Notes
-----
Algebraic Reconstruction Techniques are based on formulating the tomography
reconstruction problem as a set of linear equations. Along each ray,
the projected value is the sum of all the values of the cross section along
the ray. A typical feature of SART (and a few other variants of algebraic
techniques) is that it samples the cross section at equidistant points
along the ray, using linear interpolation between the pixel values of the
cross section. The resulting set of linear equations are then solved using
a slightly modified Kaczmarz method.
When using SART, a single iteration is usually sufficient to obtain a good
reconstruction. Further iterations will tend to enhance high-frequency
information, but will also often increase the noise.
References
----------
.. [1] AC Kak, M Slaney, "Principles of Computerized Tomographic
Imaging", IEEE Press 1988.
.. [2] AH Andersen, AC Kak, "Simultaneous algebraic reconstruction
technique (SART): a superior implementation of the ART algorithm",
Ultrasonic Imaging 6 pp 81--94 (1984)
.. [3] S Kaczmarz, "Angenäherte auflösung von systemen linearer
gleichungen", Bulletin International de lAcademie Polonaise des
Sciences et des Lettres 35 pp 355--357 (1937)
.. [4] Kohler, T. "A projection access scheme for iterative
reconstruction based on the golden section." Nuclear Science
Symposium Conference Record, 2004 IEEE. Vol. 6. IEEE, 2004.
.. [5] Kaczmarz' method, Wikipedia,
https://en.wikipedia.org/wiki/Kaczmarz_method
"""
if radon_image.ndim != 2:
raise ValueError('radon_image must be two dimensional')
if dtype is None:
if radon_image.dtype.char in 'fd':
dtype = radon_image.dtype
else:
warn("Only floating point data type are valid for SART inverse "
"radon transform. Input data is cast to float. To disable "
"this warning, please cast image_radon to float.")
dtype = np.dtype(float)
elif np.dtype(dtype).char not in 'fd':
raise ValueError("Only floating point data type are valid for inverse "
"radon transform.")
dtype = np.dtype(dtype)
radon_image = radon_image.astype(dtype, copy=False)
reconstructed_shape = (radon_image.shape[0], radon_image.shape[0])
if theta is None:
theta = np.linspace(0, 180, radon_image.shape[1],
endpoint=False, dtype=dtype)
elif len(theta) != radon_image.shape[1]:
raise ValueError('Shape of theta (%s) does not match the '
'number of projections (%d)'
% (len(theta), radon_image.shape[1]))
else:
theta = np.asarray(theta, dtype=dtype)
if image is None:
image = np.zeros(reconstructed_shape, dtype=dtype)
elif image.shape != reconstructed_shape:
raise ValueError('Shape of image (%s) does not match first dimension '
'of radon_image (%s)'
% (image.shape, reconstructed_shape))
elif image.dtype != dtype:
warn("image dtype does not match output dtype: "
"image is cast to {}".format(dtype))
image = np.asarray(image, dtype=dtype)
if projection_shifts is None:
projection_shifts = np.zeros((radon_image.shape[1],), dtype=dtype)
elif len(projection_shifts) != radon_image.shape[1]:
raise ValueError('Shape of projection_shifts (%s) does not match the '
'number of projections (%d)'
% (len(projection_shifts), radon_image.shape[1]))
else:
projection_shifts = np.asarray(projection_shifts, dtype=dtype)
if clip is not None:
if len(clip) != 2:
raise ValueError('clip must be a length-2 sequence')
clip = np.asarray(clip, dtype=dtype)
for angle_index in order_angles_golden_ratio(theta):
image_update = sart_projection_update(image, theta[angle_index],
radon_image[:, angle_index],
projection_shifts[angle_index])
image += relaxation * image_update
if clip is not None:
image = np.clip(image, clip[0], clip[1])
return image

View file

@ -0,0 +1,40 @@
#!/usr/bin/env python
import os
from skimage._build import cython
base_path = os.path.abspath(os.path.dirname(__file__))
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
config = Configuration('transform', parent_package, top_path)
cython(['_hough_transform.pyx',
'_warps_cy.pyx',
'_radon_transform.pyx'], working_path=base_path)
config.add_extension('_hough_transform', sources=['_hough_transform.c'],
include_dirs=[get_numpy_include_dirs()])
config.add_extension('_warps_cy', sources=['_warps_cy.c'],
include_dirs=[get_numpy_include_dirs(), '../_shared'])
config.add_extension('_radon_transform',
sources=['_radon_transform.c'],
include_dirs=[get_numpy_include_dirs()])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(maintainer='scikit-image Developers',
author='scikit-image Developers',
maintainer_email='scikit-image@python.org',
description='Transforms',
url='https://github.com/scikit-image/scikit-image',
license='SciPy License (BSD Style)',
**(configuration(top_path='').todict())
)

View file

@ -0,0 +1,9 @@
from ..._shared.testing import setup_test, teardown_test
def setup():
setup_test()
def teardown():
teardown_test()

View file

@ -0,0 +1,18 @@
import numpy as np
from skimage.transform import frt2, ifrt2
def test_frt():
SIZE = 59
try:
import sympy.ntheory as sn
assert sn.isprime(SIZE) == True
except ImportError:
pass
# Generate a test image
L = np.tri(SIZE, dtype=np.int32) + np.tri(SIZE, dtype=np.int32)[::-1]
f = frt2(L)
fi = ifrt2(f)
assert len(np.nonzero(L - fi)[0]) == 0

View file

@ -0,0 +1,523 @@
import numpy as np
import re
from skimage.transform._geometric import GeometricTransform
from skimage.transform import (estimate_transform, matrix_transform,
EuclideanTransform, SimilarityTransform,
AffineTransform, FundamentalMatrixTransform,
EssentialMatrixTransform, ProjectiveTransform,
PolynomialTransform, PiecewiseAffineTransform)
from skimage._shared import testing
from skimage._shared.testing import assert_equal, assert_almost_equal
import textwrap
SRC = np.array([
[-12.3705, -10.5075],
[-10.7865, 15.4305],
[8.6985, 10.8675],
[11.4975, -9.5715],
[7.8435, 7.4835],
[-5.3325, 6.5025],
[6.7905, -6.3765],
[-6.1695, -0.8235],
])
DST = np.array([
[0, 0],
[0, 5800],
[4900, 5800],
[4900, 0],
[4479, 4580],
[1176, 3660],
[3754, 790],
[1024, 1931],
])
def test_estimate_transform():
for tform in ('euclidean', 'similarity', 'affine', 'projective',
'polynomial'):
estimate_transform(tform, SRC[:2, :], DST[:2, :])
with testing.raises(ValueError):
estimate_transform('foobar', SRC[:2, :], DST[:2, :])
def test_matrix_transform():
tform = AffineTransform(scale=(0.1, 0.5), rotation=2)
assert_equal(tform(SRC), matrix_transform(SRC, tform.params))
def test_euclidean_estimation():
# exact solution
tform = estimate_transform('euclidean', SRC[:2, :], SRC[:2, :] + 10)
assert_almost_equal(tform(SRC[:2, :]), SRC[:2, :] + 10)
assert_almost_equal(tform.params[0, 0], tform.params[1, 1])
assert_almost_equal(tform.params[0, 1], - tform.params[1, 0])
# over-determined
tform2 = estimate_transform('euclidean', SRC, DST)
assert_almost_equal(tform2.inverse(tform2(SRC)), SRC)
assert_almost_equal(tform2.params[0, 0], tform2.params[1, 1])
assert_almost_equal(tform2.params[0, 1], - tform2.params[1, 0])
# via estimate method
tform3 = EuclideanTransform()
tform3.estimate(SRC, DST)
assert_almost_equal(tform3.params, tform2.params)
def test_euclidean_init():
# init with implicit parameters
rotation = 1
translation = (1, 1)
tform = EuclideanTransform(rotation=rotation, translation=translation)
assert_almost_equal(tform.rotation, rotation)
assert_almost_equal(tform.translation, translation)
# init with transformation matrix
tform2 = EuclideanTransform(tform.params)
assert_almost_equal(tform2.rotation, rotation)
assert_almost_equal(tform2.translation, translation)
# test special case for scale if rotation=0
rotation = 0
translation = (1, 1)
tform = EuclideanTransform(rotation=rotation, translation=translation)
assert_almost_equal(tform.rotation, rotation)
assert_almost_equal(tform.translation, translation)
# test special case for scale if rotation=90deg
rotation = np.pi / 2
translation = (1, 1)
tform = EuclideanTransform(rotation=rotation, translation=translation)
assert_almost_equal(tform.rotation, rotation)
assert_almost_equal(tform.translation, translation)
def test_similarity_estimation():
# exact solution
tform = estimate_transform('similarity', SRC[:2, :], DST[:2, :])
assert_almost_equal(tform(SRC[:2, :]), DST[:2, :])
assert_almost_equal(tform.params[0, 0], tform.params[1, 1])
assert_almost_equal(tform.params[0, 1], - tform.params[1, 0])
# over-determined
tform2 = estimate_transform('similarity', SRC, DST)
assert_almost_equal(tform2.inverse(tform2(SRC)), SRC)
assert_almost_equal(tform2.params[0, 0], tform2.params[1, 1])
assert_almost_equal(tform2.params[0, 1], - tform2.params[1, 0])
# via estimate method
tform3 = SimilarityTransform()
tform3.estimate(SRC, DST)
assert_almost_equal(tform3.params, tform2.params)
def test_similarity_init():
# init with implicit parameters
scale = 0.1
rotation = 1
translation = (1, 1)
tform = SimilarityTransform(scale=scale, rotation=rotation,
translation=translation)
assert_almost_equal(tform.scale, scale)
assert_almost_equal(tform.rotation, rotation)
assert_almost_equal(tform.translation, translation)
# init with transformation matrix
tform2 = SimilarityTransform(tform.params)
assert_almost_equal(tform2.scale, scale)
assert_almost_equal(tform2.rotation, rotation)
assert_almost_equal(tform2.translation, translation)
# test special case for scale if rotation=0
scale = 0.1
rotation = 0
translation = (1, 1)
tform = SimilarityTransform(scale=scale, rotation=rotation,
translation=translation)
assert_almost_equal(tform.scale, scale)
assert_almost_equal(tform.rotation, rotation)
assert_almost_equal(tform.translation, translation)
# test special case for scale if rotation=90deg
scale = 0.1
rotation = np.pi / 2
translation = (1, 1)
tform = SimilarityTransform(scale=scale, rotation=rotation,
translation=translation)
assert_almost_equal(tform.scale, scale)
assert_almost_equal(tform.rotation, rotation)
assert_almost_equal(tform.translation, translation)
# test special case for scale where the rotation isn't exactly 90deg,
# but very close
scale = 1.0
rotation = np.pi / 2
translation = (0, 0)
params = np.array([[0, -1, 1.33226763e-15],
[1, 2.22044605e-16, -1.33226763e-15],
[0, 0, 1]])
tform = SimilarityTransform(params)
assert_almost_equal(tform.scale, scale)
assert_almost_equal(tform.rotation, rotation)
assert_almost_equal(tform.translation, translation)
def test_affine_estimation():
# exact solution
tform = estimate_transform('affine', SRC[:3, :], DST[:3, :])
assert_almost_equal(tform(SRC[:3, :]), DST[:3, :])
# over-determined
tform2 = estimate_transform('affine', SRC, DST)
assert_almost_equal(tform2.inverse(tform2(SRC)), SRC)
# via estimate method
tform3 = AffineTransform()
tform3.estimate(SRC, DST)
assert_almost_equal(tform3.params, tform2.params)
def test_affine_init():
# init with implicit parameters
scale = (0.1, 0.13)
rotation = 1
shear = 0.1
translation = (1, 1)
tform = AffineTransform(scale=scale, rotation=rotation, shear=shear,
translation=translation)
assert_almost_equal(tform.scale, scale)
assert_almost_equal(tform.rotation, rotation)
assert_almost_equal(tform.shear, shear)
assert_almost_equal(tform.translation, translation)
# init with transformation matrix
tform2 = AffineTransform(tform.params)
assert_almost_equal(tform2.scale, scale)
assert_almost_equal(tform2.rotation, rotation)
assert_almost_equal(tform2.shear, shear)
assert_almost_equal(tform2.translation, translation)
# scalar vs. tuple scale arguments
assert_almost_equal(AffineTransform(scale=0.5).scale, AffineTransform(scale=(0.5, 0.5)).scale)
def test_piecewise_affine():
tform = PiecewiseAffineTransform()
tform.estimate(SRC, DST)
# make sure each single affine transform is exactly estimated
assert_almost_equal(tform(SRC), DST)
assert_almost_equal(tform.inverse(DST), SRC)
def test_fundamental_matrix_estimation():
src = np.array([1.839035, 1.924743, 0.543582, 0.375221,
0.473240, 0.142522, 0.964910, 0.598376,
0.102388, 0.140092, 15.994343, 9.622164,
0.285901, 0.430055, 0.091150, 0.254594]).reshape(-1, 2)
dst = np.array([1.002114, 1.129644, 1.521742, 1.846002,
1.084332, 0.275134, 0.293328, 0.588992,
0.839509, 0.087290, 1.779735, 1.116857,
0.878616, 0.602447, 0.642616, 1.028681]).reshape(-1, 2)
tform = estimate_transform('fundamental', src, dst)
# Reference values obtained using COLMAP SfM library.
tform_ref = np.array([[-0.217859, 0.419282, -0.0343075],
[-0.0717941, 0.0451643, 0.0216073],
[0.248062, -0.429478, 0.0221019]])
assert_almost_equal(tform.params, tform_ref, 6)
def test_fundamental_matrix_residuals():
essential_matrix_tform = EssentialMatrixTransform(
rotation=np.eye(3), translation=np.array([1, 0, 0]))
tform = FundamentalMatrixTransform()
tform.params = essential_matrix_tform.params
src = np.array([[0, 0], [0, 0], [0, 0]])
dst = np.array([[2, 0], [2, 1], [2, 2]])
assert_almost_equal(tform.residuals(src, dst)**2, [0, 0.5, 2])
def test_fundamental_matrix_forward():
essential_matrix_tform = EssentialMatrixTransform(
rotation=np.eye(3), translation=np.array([1, 0, 0]))
tform = FundamentalMatrixTransform()
tform.params = essential_matrix_tform.params
src = np.array([[0, 0], [0, 1], [1, 1]])
assert_almost_equal(tform(src), [[0, -1, 0], [0, -1, 1], [0, -1, 1]])
def test_fundamental_matrix_inverse():
essential_matrix_tform = EssentialMatrixTransform(
rotation=np.eye(3), translation=np.array([1, 0, 0]))
tform = FundamentalMatrixTransform()
tform.params = essential_matrix_tform.params
src = np.array([[0, 0], [0, 1], [1, 1]])
assert_almost_equal(tform.inverse(src),
[[0, 1, 0], [0, 1, -1], [0, 1, -1]])
def test_essential_matrix_init():
tform = EssentialMatrixTransform(rotation=np.eye(3),
translation=np.array([0, 0, 1]))
assert_equal(tform.params,
np.array([0, -1, 0, 1, 0, 0, 0, 0, 0]).reshape(3, 3))
def test_essential_matrix_estimation():
src = np.array([1.839035, 1.924743, 0.543582, 0.375221,
0.473240, 0.142522, 0.964910, 0.598376,
0.102388, 0.140092, 15.994343, 9.622164,
0.285901, 0.430055, 0.091150, 0.254594]).reshape(-1, 2)
dst = np.array([1.002114, 1.129644, 1.521742, 1.846002,
1.084332, 0.275134, 0.293328, 0.588992,
0.839509, 0.087290, 1.779735, 1.116857,
0.878616, 0.602447, 0.642616, 1.028681]).reshape(-1, 2)
tform = estimate_transform('essential', src, dst)
# Reference values obtained using COLMAP SfM library.
tform_ref = np.array([[-0.0811666, 0.255449, -0.0478999],
[-0.192392, -0.0531675, 0.119547],
[0.177784, -0.22008, -0.015203]])
assert_almost_equal(tform.params, tform_ref, 6)
def test_essential_matrix_forward():
tform = EssentialMatrixTransform(rotation=np.eye(3),
translation=np.array([1, 0, 0]))
src = np.array([[0, 0], [0, 1], [1, 1]])
assert_almost_equal(tform(src), [[0, -1, 0], [0, -1, 1], [0, -1, 1]])
def test_essential_matrix_inverse():
tform = EssentialMatrixTransform(rotation=np.eye(3),
translation=np.array([1, 0, 0]))
src = np.array([[0, 0], [0, 1], [1, 1]])
assert_almost_equal(tform.inverse(src),
[[0, 1, 0], [0, 1, -1], [0, 1, -1]])
def test_essential_matrix_residuals():
tform = EssentialMatrixTransform(rotation=np.eye(3),
translation=np.array([1, 0, 0]))
src = np.array([[0, 0], [0, 0], [0, 0]])
dst = np.array([[2, 0], [2, 1], [2, 2]])
assert_almost_equal(tform.residuals(src, dst)**2, [0, 0.5, 2])
def test_projective_estimation():
# exact solution
tform = estimate_transform('projective', SRC[:4, :], DST[:4, :])
assert_almost_equal(tform(SRC[:4, :]), DST[:4, :])
# over-determined
tform2 = estimate_transform('projective', SRC, DST)
assert_almost_equal(tform2.inverse(tform2(SRC)), SRC)
# via estimate method
tform3 = ProjectiveTransform()
tform3.estimate(SRC, DST)
assert_almost_equal(tform3.params, tform2.params)
def test_projective_init():
tform = estimate_transform('projective', SRC, DST)
# init with transformation matrix
tform2 = ProjectiveTransform(tform.params)
assert_almost_equal(tform2.params, tform.params)
def test_polynomial_estimation():
# over-determined
tform = estimate_transform('polynomial', SRC, DST, order=10)
assert_almost_equal(tform(SRC), DST, 6)
# via estimate method
tform2 = PolynomialTransform()
tform2.estimate(SRC, DST, order=10)
assert_almost_equal(tform2.params, tform.params)
def test_polynomial_init():
tform = estimate_transform('polynomial', SRC, DST, order=10)
# init with transformation parameters
tform2 = PolynomialTransform(tform.params)
assert_almost_equal(tform2.params, tform.params)
def test_polynomial_default_order():
tform = estimate_transform('polynomial', SRC, DST)
tform2 = estimate_transform('polynomial', SRC, DST, order=2)
assert_almost_equal(tform2.params, tform.params)
def test_polynomial_inverse():
with testing.raises(Exception):
PolynomialTransform().inverse(0)
def test_union():
tform1 = SimilarityTransform(scale=0.1, rotation=0.3)
tform2 = SimilarityTransform(scale=0.1, rotation=0.9)
tform3 = SimilarityTransform(scale=0.1 ** 2, rotation=0.3 + 0.9)
tform = tform1 + tform2
assert_almost_equal(tform.params, tform3.params)
tform1 = AffineTransform(scale=(0.1, 0.1), rotation=0.3)
tform2 = SimilarityTransform(scale=0.1, rotation=0.9)
tform3 = SimilarityTransform(scale=0.1 ** 2, rotation=0.3 + 0.9)
tform = tform1 + tform2
assert_almost_equal(tform.params, tform3.params)
assert tform.__class__ == ProjectiveTransform
tform = AffineTransform(scale=(0.1, 0.1), rotation=0.3)
assert_almost_equal((tform + tform.inverse).params, np.eye(3))
tform1 = SimilarityTransform(scale=0.1, rotation=0.3)
tform2 = SimilarityTransform(scale=0.1, rotation=0.9)
tform3 = SimilarityTransform(scale=0.1 * 1/0.1, rotation=0.3 - 0.9)
tform = tform1 + tform2.inverse
assert_almost_equal(tform.params, tform3.params)
def test_union_differing_types():
tform1 = SimilarityTransform()
tform2 = PolynomialTransform()
with testing.raises(TypeError):
tform1.__add__(tform2)
def test_geometric_tform():
tform = GeometricTransform()
with testing.raises(NotImplementedError):
tform(0)
with testing.raises(NotImplementedError):
tform.inverse(0)
with testing.raises(NotImplementedError):
tform.__add__(0)
# See gh-3926 for discussion details
for i in range(20):
# Generate random Homography
H = np.random.rand(3, 3) * 100
H[2, H[2] == 0] += np.finfo(float).eps
H /= H[2, 2]
# Craft some src coords
src = np.array([
[(H[2, 1] + 1) / -H[2, 0], 1],
[1, (H[2, 0] + 1) / -H[2, 1]],
[1, 1],
])
# Prior to gh-3926, under the above circumstances,
# destination coordinates could be returned with nan/inf values.
tform = ProjectiveTransform(H) # Construct the transform
dst = tform(src) # Obtain the dst coords
# Ensure dst coords are finite numeric values
assert(np.isfinite(dst).all())
def test_invalid_input():
with testing.raises(ValueError):
ProjectiveTransform(np.zeros((2, 3)))
with testing.raises(ValueError):
AffineTransform(np.zeros((2, 3)))
with testing.raises(ValueError):
SimilarityTransform(np.zeros((2, 3)))
with testing.raises(ValueError):
EuclideanTransform(np.zeros((2, 3)))
with testing.raises(ValueError):
AffineTransform(matrix=np.zeros((2, 3)), scale=1)
with testing.raises(ValueError):
SimilarityTransform(matrix=np.zeros((2, 3)), scale=1)
with testing.raises(ValueError):
EuclideanTransform(
matrix=np.zeros((2, 3)), translation=(0, 0))
with testing.raises(ValueError):
PolynomialTransform(np.zeros((3, 3)))
with testing.raises(ValueError):
FundamentalMatrixTransform(matrix=np.zeros((3, 2)))
with testing.raises(ValueError):
EssentialMatrixTransform(matrix=np.zeros((3, 2)))
with testing.raises(ValueError):
EssentialMatrixTransform(rotation=np.zeros((3, 2)))
with testing.raises(ValueError):
EssentialMatrixTransform(
rotation=np.zeros((3, 3)))
with testing.raises(ValueError):
EssentialMatrixTransform(
rotation=np.eye(3))
with testing.raises(ValueError):
EssentialMatrixTransform(rotation=np.eye(3),
translation=np.zeros((2,)))
with testing.raises(ValueError):
EssentialMatrixTransform(rotation=np.eye(3),
translation=np.zeros((2,)))
with testing.raises(ValueError):
EssentialMatrixTransform(
rotation=np.eye(3), translation=np.zeros((3,)))
def test_degenerate():
src = dst = np.zeros((10, 2))
tform = SimilarityTransform()
tform.estimate(src, dst)
assert np.all(np.isnan(tform.params))
tform = AffineTransform()
tform.estimate(src, dst)
assert np.all(np.isnan(tform.params))
tform = ProjectiveTransform()
tform.estimate(src, dst)
assert np.all(np.isnan(tform.params))
# See gh-3926 for discussion details
tform = ProjectiveTransform()
for i in range(20):
# Some random coordinates
src = np.random.rand(4, 2) * 100
dst = np.random.rand(4, 2) * 100
# Degenerate the case by arranging points on a single line
src[:, 1] = np.random.rand()
# Prior to gh-3926, under the above circumstances,
# a transform could be returned with nan values.
assert(not tform.estimate(src, dst) or np.isfinite(tform.params).all())
def test_projective_repr():
tform = ProjectiveTransform()
want = re.escape(textwrap.dedent(
'''
<ProjectiveTransform(matrix=
[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]]) at
''').strip()) + ' 0x[a-f0-9]+' + re.escape('>')
# Hack the escaped regex to allow whitespace before each number for
# compatibility with different numpy versions.
want = want.replace('0\\.', ' *0\\.')
want = want.replace('1\\.', ' *1\\.')
assert re.match(want, repr(tform))
def test_projective_str():
tform = ProjectiveTransform()
want = re.escape(textwrap.dedent(
'''
<ProjectiveTransform(matrix=
[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])>
''').strip())
# Hack the escaped regex to allow whitespace before each number for
# compatibility with different numpy versions.
want = want.replace('0\\.', ' *0\\.')
want = want.replace('1\\.', ' *1\\.')
print(want)
assert re.match(want, str(tform))

View file

@ -0,0 +1,549 @@
import numpy as np
from skimage import transform
from skimage import data
from skimage.feature import canny
from skimage.draw import line, circle_perimeter, ellipse_perimeter
from skimage._shared import testing
from skimage._shared.testing import (assert_almost_equal, assert_equal,
test_parallel)
@test_parallel()
def test_hough_line():
# Generate a test image
img = np.zeros((100, 150), dtype=int)
rr, cc = line(60, 130, 80, 10)
img[rr, cc] = 1
out, angles, d = transform.hough_line(img)
y, x = np.where(out == out.max())
dist = d[y[0]]
theta = angles[x[0]]
assert_almost_equal(dist, 80.723, 1)
assert_almost_equal(theta, 1.41, 1)
def test_hough_line_angles():
img = np.zeros((10, 10))
img[0, 0] = 1
out, angles, d = transform.hough_line(img, np.linspace(0, 360, 10))
assert_equal(len(angles), 10)
def test_hough_line_bad_input():
img = np.zeros(100)
img[10] = 1
# Expected error, img must be 2D
with testing.raises(ValueError):
transform.hough_line(img, np.linspace(0, 360, 10))
def test_probabilistic_hough():
# Generate a test image
img = np.zeros((100, 100), dtype=int)
for i in range(25, 75):
img[100 - i, i] = 100
img[i, i] = 100
# decrease default theta sampling because similar orientations may confuse
# as mentioned in article of Galambos et al
theta = np.linspace(0, np.pi, 45)
lines = transform.probabilistic_hough_line(
img, threshold=10, line_length=10, line_gap=1, theta=theta)
# sort the lines according to the x-axis
sorted_lines = []
for line in lines:
line = list(line)
line.sort(key=lambda x: x[0])
sorted_lines.append(line)
assert([(25, 75), (74, 26)] in sorted_lines)
assert([(25, 25), (74, 74)] in sorted_lines)
# Execute with default theta
transform.probabilistic_hough_line(img, line_length=10, line_gap=3)
def test_probabilistic_hough_seed():
# Load image that is likely to give a randomly varying number of lines
image = data.checkerboard()
# Use constant seed to ensure a deterministic output
lines = transform.probabilistic_hough_line(image, threshold=50,
line_length=50, line_gap=1,
seed=1234)
assert len(lines) == 65
def test_probabilistic_hough_bad_input():
img = np.zeros(100)
img[10] = 1
# Expected error, img must be 2D
with testing.raises(ValueError):
transform.probabilistic_hough_line(img)
def test_hough_line_peaks():
img = np.zeros((100, 150), dtype=int)
rr, cc = line(60, 130, 80, 10)
img[rr, cc] = 1
out, angles, d = transform.hough_line(img)
out, theta, dist = transform.hough_line_peaks(out, angles, d)
assert_equal(len(dist), 1)
assert_almost_equal(dist[0], 80.723, 1)
assert_almost_equal(theta[0], 1.41, 1)
def test_hough_line_peaks_ordered():
# Regression test per PR #1421
testim = np.zeros((256, 64), dtype=np.bool)
testim[50:100, 20] = True
testim[85:200, 25] = True
testim[15:35, 50] = True
testim[1:-1, 58] = True
hough_space, angles, dists = transform.hough_line(testim)
hspace, _, _ = transform.hough_line_peaks(hough_space, angles, dists)
assert hspace[0] > hspace[1]
def test_hough_line_peaks_dist():
img = np.zeros((100, 100), dtype=np.bool_)
img[:, 30] = True
img[:, 40] = True
hspace, angles, dists = transform.hough_line(img)
assert len(transform.hough_line_peaks(hspace, angles, dists,
min_distance=5)[0]) == 2
assert len(transform.hough_line_peaks(hspace, angles, dists,
min_distance=15)[0]) == 1
def test_hough_line_peaks_angle():
check_hough_line_peaks_angle()
def check_hough_line_peaks_angle():
img = np.zeros((100, 100), dtype=np.bool_)
img[:, 0] = True
img[0, :] = True
hspace, angles, dists = transform.hough_line(img)
assert len(transform.hough_line_peaks(hspace, angles, dists,
min_angle=45)[0]) == 2
assert len(transform.hough_line_peaks(hspace, angles, dists,
min_angle=90)[0]) == 1
theta = np.linspace(0, np.pi, 100)
hspace, angles, dists = transform.hough_line(img, theta)
assert len(transform.hough_line_peaks(hspace, angles, dists,
min_angle=45)[0]) == 2
assert len(transform.hough_line_peaks(hspace, angles, dists,
min_angle=90)[0]) == 1
theta = np.linspace(np.pi / 3, 4. / 3 * np.pi, 100)
hspace, angles, dists = transform.hough_line(img, theta)
assert len(transform.hough_line_peaks(hspace, angles, dists,
min_angle=45)[0]) == 2
assert len(transform.hough_line_peaks(hspace, angles, dists,
min_angle=90)[0]) == 1
def test_hough_line_peaks_num():
img = np.zeros((100, 100), dtype=np.bool_)
img[:, 30] = True
img[:, 40] = True
hspace, angles, dists = transform.hough_line(img)
assert len(transform.hough_line_peaks(hspace, angles, dists,
min_distance=0, min_angle=0,
num_peaks=1)[0]) == 1
def test_hough_line_peaks_zero_input():
# Test to make sure empty input doesn't cause a failure
img = np.zeros((100, 100), dtype='uint8')
theta = np.linspace(0, np.pi, 100)
hspace, angles, dists = transform.hough_line(img, theta)
h, a, d = transform.hough_line_peaks(hspace, angles, dists)
assert_equal(a, np.array([]))
@test_parallel()
def test_hough_circle():
# Prepare picture
img = np.zeros((120, 100), dtype=int)
radius = 20
x_0, y_0 = (99, 50)
y, x = circle_perimeter(y_0, x_0, radius)
img[x, y] = 1
out1 = transform.hough_circle(img, radius)
out2 = transform.hough_circle(img, [radius])
assert_equal(out1, out2)
out = transform.hough_circle(img, np.array([radius], dtype=np.intp))
assert_equal(out, out1)
x, y = np.where(out[0] == out[0].max())
assert_equal(x[0], x_0)
assert_equal(y[0], y_0)
def test_hough_circle_extended():
# Prepare picture
# The circle center is outside the image
img = np.zeros((100, 100), dtype=int)
radius = 20
x_0, y_0 = (-5, 50)
y, x = circle_perimeter(y_0, x_0, radius)
img[x[np.where(x > 0)], y[np.where(x > 0)]] = 1
out = transform.hough_circle(img, np.array([radius], dtype=np.intp),
full_output=True)
x, y = np.where(out[0] == out[0].max())
# Offset for x_0, y_0
assert_equal(x[0], x_0 + radius)
assert_equal(y[0], y_0 + radius)
def test_hough_circle_peaks():
x_0, y_0, rad_0 = (99, 50, 20)
img = np.zeros((120, 100), dtype=int)
y, x = circle_perimeter(y_0, x_0, rad_0)
img[x, y] = 1
x_1, y_1, rad_1 = (49, 60, 30)
y, x = circle_perimeter(y_1, x_1, rad_1)
img[x, y] = 1
radii = [rad_0, rad_1]
hspaces = transform.hough_circle(img, radii)
out = transform.hough_circle_peaks(hspaces, radii, min_xdistance=1,
min_ydistance=1, threshold=None,
num_peaks=np.inf,
total_num_peaks=np.inf)
s = np.argsort(out[3]) # sort by radii
assert_equal(out[1][s], np.array([y_0, y_1]))
assert_equal(out[2][s], np.array([x_0, x_1]))
assert_equal(out[3][s], np.array([rad_0, rad_1]))
def test_hough_circle_peaks_total_peak():
img = np.zeros((120, 100), dtype=int)
x_0, y_0, rad_0 = (99, 50, 20)
y, x = circle_perimeter(y_0, x_0, rad_0)
img[x, y] = 1
x_1, y_1, rad_1 = (49, 60, 30)
y, x = circle_perimeter(y_1, x_1, rad_1)
img[x, y] = 1
radii = [rad_0, rad_1]
hspaces = transform.hough_circle(img, radii)
out = transform.hough_circle_peaks(hspaces, radii, min_xdistance=1,
min_ydistance=1, threshold=None,
num_peaks=np.inf, total_num_peaks=1)
assert_equal(out[1][0], np.array([y_1, ]))
assert_equal(out[2][0], np.array([x_1, ]))
assert_equal(out[3][0], np.array([rad_1, ]))
def test_hough_circle_peaks_min_distance():
x_0, y_0, rad_0 = (50, 50, 20)
img = np.zeros((120, 100), dtype=int)
y, x = circle_perimeter(y_0, x_0, rad_0)
img[x, y] = 1
x_1, y_1, rad_1 = (60, 60, 30)
y, x = circle_perimeter(y_1, x_1, rad_1)
# Add noise and create an imperfect circle to lower the peak in Hough space
y[::2] += 1
x[::2] += 1
img[x, y] = 1
x_2, y_2, rad_2 = (70, 70, 20)
y, x = circle_perimeter(y_2, x_2, rad_2)
# Add noise and create an imperfect circle to lower the peak in Hough space
y[::2] += 1
x[::2] += 1
img[x, y] = 1
radii = [rad_0, rad_1, rad_2]
hspaces = transform.hough_circle(img, radii)
out = transform.hough_circle_peaks(hspaces, radii, min_xdistance=15,
min_ydistance=15, threshold=None,
num_peaks=np.inf,
total_num_peaks=np.inf,
normalize=True)
# The second circle is too close to the first one
# and has a weaker peak in Hough space due to imperfectness.
# Therefore it got removed.
assert_equal(out[1], np.array([y_0, y_2]))
assert_equal(out[2], np.array([x_0, x_2]))
assert_equal(out[3], np.array([rad_0, rad_2]))
def test_hough_circle_peaks_total_peak_and_min_distance():
img = np.zeros((120, 120), dtype=int)
cx = cy = [40, 50, 60, 70, 80]
radii = range(20, 30, 2)
for i in range(len(cx)):
y, x = circle_perimeter(cy[i], cx[i], radii[i])
img[x, y] = 1
hspaces = transform.hough_circle(img, radii)
out = transform.hough_circle_peaks(hspaces, radii, min_xdistance=15,
min_ydistance=15, threshold=None,
num_peaks=np.inf,
total_num_peaks=2,
normalize=True)
# 2nd (4th) circle is removed as it is close to 1st (3rd) oneself.
# 5th is removed as total_num_peaks = 2
assert_equal(out[1], np.array(cy[:4:2]))
assert_equal(out[2], np.array(cx[:4:2]))
assert_equal(out[3], np.array(radii[:4:2]))
def test_hough_circle_peaks_normalize():
x_0, y_0, rad_0 = (50, 50, 20)
img = np.zeros((120, 100), dtype=int)
y, x = circle_perimeter(y_0, x_0, rad_0)
img[x, y] = 1
x_1, y_1, rad_1 = (60, 60, 30)
y, x = circle_perimeter(y_1, x_1, rad_1)
img[x, y] = 1
radii = [rad_0, rad_1]
hspaces = transform.hough_circle(img, radii)
out = transform.hough_circle_peaks(hspaces, radii, min_xdistance=15,
min_ydistance=15, threshold=None,
num_peaks=np.inf,
total_num_peaks=np.inf,
normalize=False)
# Two perfect circles are close but the second one is bigger.
# Therefore, it is picked due to its high peak.
assert_equal(out[1], np.array([y_1]))
assert_equal(out[2], np.array([x_1]))
assert_equal(out[3], np.array([rad_1]))
def test_hough_ellipse_zero_angle():
img = np.zeros((25, 25), dtype=int)
rx = 6
ry = 8
x0 = 12
y0 = 15
angle = 0
rr, cc = ellipse_perimeter(y0, x0, ry, rx)
img[rr, cc] = 1
result = transform.hough_ellipse(img, threshold=9)
best = result[-1]
assert_equal(best[1], y0)
assert_equal(best[2], x0)
assert_almost_equal(best[3], ry, decimal=1)
assert_almost_equal(best[4], rx, decimal=1)
assert_equal(best[5], angle)
# Check if I re-draw the ellipse, points are the same!
# ie check API compatibility between hough_ellipse and ellipse_perimeter
rr2, cc2 = ellipse_perimeter(y0, x0, int(best[3]), int(best[4]),
orientation=best[5])
assert_equal(rr, rr2)
assert_equal(cc, cc2)
def test_hough_ellipse_non_zero_posangle1():
# ry > rx, angle in [0:pi/2]
img = np.zeros((30, 24), dtype=int)
rx = 6
ry = 12
x0 = 10
y0 = 15
angle = np.pi / 1.35
rr, cc = ellipse_perimeter(y0, x0, ry, rx, orientation=angle)
img[rr, cc] = 1
result = transform.hough_ellipse(img, threshold=15, accuracy=3)
result.sort(order='accumulator')
best = result[-1]
assert_almost_equal(best[1] / 100., y0 / 100., decimal=1)
assert_almost_equal(best[2] / 100., x0 / 100., decimal=1)
assert_almost_equal(best[3] / 10., ry / 10., decimal=1)
assert_almost_equal(best[4] / 100., rx / 100., decimal=1)
assert_almost_equal(best[5], angle, decimal=1)
# Check if I re-draw the ellipse, points are the same!
# ie check API compatibility between hough_ellipse and ellipse_perimeter
rr2, cc2 = ellipse_perimeter(y0, x0, int(best[3]), int(best[4]),
orientation=best[5])
assert_equal(rr, rr2)
assert_equal(cc, cc2)
def test_hough_ellipse_non_zero_posangle2():
# ry < rx, angle in [0:pi/2]
img = np.zeros((30, 24), dtype=int)
rx = 12
ry = 6
x0 = 10
y0 = 15
angle = np.pi / 1.35
rr, cc = ellipse_perimeter(y0, x0, ry, rx, orientation=angle)
img[rr, cc] = 1
result = transform.hough_ellipse(img, threshold=15, accuracy=3)
result.sort(order='accumulator')
best = result[-1]
assert_almost_equal(best[1] / 100., y0 / 100., decimal=1)
assert_almost_equal(best[2] / 100., x0 / 100., decimal=1)
assert_almost_equal(best[3] / 10., ry / 10., decimal=1)
assert_almost_equal(best[4] / 100., rx / 100., decimal=1)
assert_almost_equal(best[5], angle, decimal=1)
# Check if I re-draw the ellipse, points are the same!
# ie check API compatibility between hough_ellipse and ellipse_perimeter
rr2, cc2 = ellipse_perimeter(y0, x0, int(best[3]), int(best[4]),
orientation=best[5])
assert_equal(rr, rr2)
assert_equal(cc, cc2)
def test_hough_ellipse_non_zero_posangle3():
# ry < rx, angle in [pi/2:pi]
img = np.zeros((30, 24), dtype=int)
rx = 12
ry = 6
x0 = 10
y0 = 15
angle = np.pi / 1.35 + np.pi / 2.
rr, cc = ellipse_perimeter(y0, x0, ry, rx, orientation=angle)
img[rr, cc] = 1
result = transform.hough_ellipse(img, threshold=15, accuracy=3)
result.sort(order='accumulator')
best = result[-1]
# Check if I re-draw the ellipse, points are the same!
# ie check API compatibility between hough_ellipse and ellipse_perimeter
rr2, cc2 = ellipse_perimeter(y0, x0, int(best[3]), int(best[4]),
orientation=best[5])
assert_equal(rr, rr2)
assert_equal(cc, cc2)
def test_hough_ellipse_non_zero_posangle4():
# ry < rx, angle in [pi:3pi/4]
img = np.zeros((30, 24), dtype=int)
rx = 12
ry = 6
x0 = 10
y0 = 15
angle = np.pi / 1.35 + np.pi
rr, cc = ellipse_perimeter(y0, x0, ry, rx, orientation=angle)
img[rr, cc] = 1
result = transform.hough_ellipse(img, threshold=15, accuracy=3)
result.sort(order='accumulator')
best = result[-1]
# Check if I re-draw the ellipse, points are the same!
# ie check API compatibility between hough_ellipse and ellipse_perimeter
rr2, cc2 = ellipse_perimeter(y0, x0, int(best[3]), int(best[4]),
orientation=best[5])
assert_equal(rr, rr2)
assert_equal(cc, cc2)
def test_hough_ellipse_non_zero_negangle1():
# ry > rx, angle in [0:-pi/2]
img = np.zeros((30, 24), dtype=int)
rx = 6
ry = 12
x0 = 10
y0 = 15
angle = - np.pi / 1.35
rr, cc = ellipse_perimeter(y0, x0, ry, rx, orientation=angle)
img[rr, cc] = 1
result = transform.hough_ellipse(img, threshold=15, accuracy=3)
result.sort(order='accumulator')
best = result[-1]
# Check if I re-draw the ellipse, points are the same!
# ie check API compatibility between hough_ellipse and ellipse_perimeter
rr2, cc2 = ellipse_perimeter(y0, x0, int(best[3]), int(best[4]),
orientation=best[5])
assert_equal(rr, rr2)
assert_equal(cc, cc2)
def test_hough_ellipse_non_zero_negangle2():
# ry < rx, angle in [0:-pi/2]
img = np.zeros((30, 24), dtype=int)
rx = 12
ry = 6
x0 = 10
y0 = 15
angle = - np.pi / 1.35
rr, cc = ellipse_perimeter(y0, x0, ry, rx, orientation=angle)
img[rr, cc] = 1
result = transform.hough_ellipse(img, threshold=15, accuracy=3)
result.sort(order='accumulator')
best = result[-1]
# Check if I re-draw the ellipse, points are the same!
# ie check API compatibility between hough_ellipse and ellipse_perimeter
rr2, cc2 = ellipse_perimeter(y0, x0, int(best[3]), int(best[4]),
orientation=best[5])
assert_equal(rr, rr2)
assert_equal(cc, cc2)
def test_hough_ellipse_non_zero_negangle3():
# ry < rx, angle in [-pi/2:-pi]
img = np.zeros((30, 24), dtype=int)
rx = 12
ry = 6
x0 = 10
y0 = 15
angle = - np.pi / 1.35 - np.pi / 2.
rr, cc = ellipse_perimeter(y0, x0, ry, rx, orientation=angle)
img[rr, cc] = 1
result = transform.hough_ellipse(img, threshold=15, accuracy=3)
result.sort(order='accumulator')
best = result[-1]
# Check if I re-draw the ellipse, points are the same!
# ie check API compatibility between hough_ellipse and ellipse_perimeter
rr2, cc2 = ellipse_perimeter(y0, x0, int(best[3]), int(best[4]),
orientation=best[5])
assert_equal(rr, rr2)
assert_equal(cc, cc2)
def test_hough_ellipse_non_zero_negangle4():
# ry < rx, angle in [-pi:-3pi/4]
img = np.zeros((30, 24), dtype=int)
rx = 12
ry = 6
x0 = 10
y0 = 15
angle = - np.pi / 1.35 - np.pi
rr, cc = ellipse_perimeter(y0, x0, ry, rx, orientation=angle)
img[rr, cc] = 1
result = transform.hough_ellipse(img, threshold=15, accuracy=3)
result.sort(order='accumulator')
best = result[-1]
# Check if I re-draw the ellipse, points are the same!
# ie check API compatibility between hough_ellipse and ellipse_perimeter
rr2, cc2 = ellipse_perimeter(y0, x0, int(best[3]), int(best[4]),
orientation=best[5])
assert_equal(rr, rr2)
assert_equal(cc, cc2)
def test_hough_ellipse_all_black_img():
assert(transform.hough_ellipse(np.zeros((100, 100))).shape == (0, 6))

View file

@ -0,0 +1,47 @@
import numpy as np
from skimage.transform import integral_image, integrate
from skimage._shared.testing import assert_equal
np.random.seed(0)
x = (np.random.rand(50, 50) * 255).astype(np.uint8)
s = integral_image(x)
def test_validity():
y = np.arange(12).reshape((4, 3))
y = (np.random.rand(50, 50) * 255).astype(np.uint8)
assert_equal(integral_image(y)[-1, -1],
y.sum())
def test_basic():
assert_equal(x[12:24, 10:20].sum(), integrate(s, (12, 10), (23, 19)))
assert_equal(x[:20, :20].sum(), integrate(s, (0, 0), (19, 19)))
assert_equal(x[:20, 10:20].sum(), integrate(s, (0, 10), (19, 19)))
assert_equal(x[10:20, :20].sum(), integrate(s, (10, 0), (19, 19)))
def test_single():
assert_equal(x[0, 0], integrate(s, (0, 0), (0, 0)))
assert_equal(x[10, 10], integrate(s, (10, 10), (10, 10)))
def test_vectorized_integrate():
r0 = np.array([12, 0, 0, 10, 0, 10, 30])
c0 = np.array([10, 0, 10, 0, 0, 10, 31])
r1 = np.array([23, 19, 19, 19, 0, 10, 49])
c1 = np.array([19, 19, 19, 19, 0, 10, 49])
expected = np.array([x[12:24, 10:20].sum(),
x[:20, :20].sum(),
x[:20, 10:20].sum(),
x[10:20, :20].sum(),
x[0, 0],
x[10, 10],
x[30:, 31:].sum()])
start_pts = [(r0[i], c0[i]) for i in range(len(r0))]
end_pts = [(r1[i], c1[i]) for i in range(len(r0))]
assert_equal(expected, integrate(s, start_pts, end_pts))

View file

@ -0,0 +1,147 @@
import math
import pytest
import numpy as np
from skimage import data
from skimage.transform import pyramids
from skimage._shared import testing
from skimage._shared.testing import (assert_array_equal, assert_, assert_equal,
assert_almost_equal)
image = data.astronaut()
image_gray = image[..., 0]
def test_pyramid_reduce_rgb():
rows, cols, dim = image.shape
out = pyramids.pyramid_reduce(image, downscale=2, multichannel=True)
assert_array_equal(out.shape, (rows / 2, cols / 2, dim))
def test_pyramid_reduce_gray():
rows, cols = image_gray.shape
out1 = pyramids.pyramid_reduce(image_gray, downscale=2,
multichannel=False)
assert_array_equal(out1.shape, (rows / 2, cols / 2))
assert_almost_equal(out1.ptp(), 1.0, decimal=2)
out2 = pyramids.pyramid_reduce(image_gray, downscale=2,
multichannel=False, preserve_range=True)
assert_almost_equal(out2.ptp() / image_gray.ptp(), 1.0, decimal=2)
def test_pyramid_reduce_nd():
for ndim in [1, 2, 3, 4]:
img = np.random.randn(*((8, ) * ndim))
out = pyramids.pyramid_reduce(img, downscale=2,
multichannel=False)
expected_shape = np.asarray(img.shape) / 2
assert_array_equal(out.shape, expected_shape)
def test_pyramid_expand_rgb():
rows, cols, dim = image.shape
out = pyramids.pyramid_expand(image, upscale=2,
multichannel=True)
assert_array_equal(out.shape, (rows * 2, cols * 2, dim))
def test_pyramid_expand_gray():
rows, cols = image_gray.shape
out = pyramids.pyramid_expand(image_gray, upscale=2,
multichannel=False)
assert_array_equal(out.shape, (rows * 2, cols * 2))
def test_pyramid_expand_nd():
for ndim in [1, 2, 3, 4]:
img = np.random.randn(*((4, ) * ndim))
out = pyramids.pyramid_expand(img, upscale=2,
multichannel=False)
expected_shape = np.asarray(img.shape) * 2
assert_array_equal(out.shape, expected_shape)
def test_build_gaussian_pyramid_rgb():
rows, cols, dim = image.shape
pyramid = pyramids.pyramid_gaussian(image, downscale=2,
multichannel=True)
for layer, out in enumerate(pyramid):
layer_shape = (rows / 2 ** layer, cols / 2 ** layer, dim)
assert_array_equal(out.shape, layer_shape)
def test_build_gaussian_pyramid_gray():
rows, cols = image_gray.shape
pyramid = pyramids.pyramid_gaussian(image_gray, downscale=2,
multichannel=False)
for layer, out in enumerate(pyramid):
layer_shape = (rows / 2 ** layer, cols / 2 ** layer)
assert_array_equal(out.shape, layer_shape)
def test_build_gaussian_pyramid_nd():
for ndim in [1, 2, 3, 4]:
img = np.random.randn(*((8, ) * ndim))
original_shape = np.asarray(img.shape)
pyramid = pyramids.pyramid_gaussian(img, downscale=2,
multichannel=False)
for layer, out in enumerate(pyramid):
layer_shape = original_shape / 2 ** layer
assert_array_equal(out.shape, layer_shape)
def test_build_laplacian_pyramid_rgb():
rows, cols, dim = image.shape
pyramid = pyramids.pyramid_laplacian(image, downscale=2,
multichannel=True)
for layer, out in enumerate(pyramid):
layer_shape = (rows / 2 ** layer, cols / 2 ** layer, dim)
assert_array_equal(out.shape, layer_shape)
def test_build_laplacian_pyramid_nd():
for ndim in [1, 2, 3, 4]:
img = np.random.randn(*(16, )*ndim)
original_shape = np.asarray(img.shape)
pyramid = pyramids.pyramid_laplacian(img, downscale=2,
multichannel=False)
for layer, out in enumerate(pyramid):
print(out.shape)
layer_shape = original_shape / 2 ** layer
assert_array_equal(out.shape, layer_shape)
def test_laplacian_pyramid_max_layers():
for downscale in [2, 3, 5, 7]:
img = np.random.randn(32, 8)
pyramid = pyramids.pyramid_laplacian(img, downscale=downscale,
multichannel=False)
max_layer = int(np.ceil(math.log(np.max(img.shape), downscale)))
for layer, out in enumerate(pyramid):
if layer < max_layer:
# should not reach all axes as size 1 prior to final level
assert_(np.max(out.shape) > 1)
# total number of images is max_layer + 1
assert_equal(max_layer, layer)
# final layer should be size 1 on all axes
assert_array_equal((out.shape), (1, 1))
def test_check_factor():
with testing.raises(ValueError):
pyramids._check_factor(0.99)
with testing.raises(ValueError):
pyramids._check_factor(- 2)
@pytest.mark.parametrize('dtype, expected',
zip(['float32', 'float64', 'uint8', 'int64'],
['float32', 'float64', 'float64', 'float64']))
def test_pyramid_gaussian_dtype_support(dtype, expected):
img = np.random.randn(32, 8).astype(dtype)
pyramid = pyramids.pyramid_gaussian(img)
assert np.all([im.dtype == expected for im in pyramid])

View file

@ -0,0 +1,473 @@
import itertools
import pytest
import numpy as np
from skimage.data import shepp_logan_phantom
from skimage.transform import radon, iradon, iradon_sart, rescale
from skimage._shared.utils import convert_to_float
from skimage._shared import testing
from skimage._shared.testing import test_parallel
from skimage._shared._warnings import expected_warnings
PHANTOM = shepp_logan_phantom()[::2, ::2]
PHANTOM = rescale(PHANTOM, 0.5, order=1,
mode='constant', anti_aliasing=False, multichannel=False)
def _debug_plot(original, result, sinogram=None):
from matplotlib import pyplot as plt
imkwargs = dict(cmap='gray', interpolation='nearest')
if sinogram is None:
plt.figure(figsize=(15, 6))
sp = 130
else:
plt.figure(figsize=(11, 11))
sp = 221
plt.subplot(sp + 0)
plt.imshow(sinogram, aspect='auto', **imkwargs)
plt.subplot(sp + 1)
plt.imshow(original, **imkwargs)
plt.subplot(sp + 2)
plt.imshow(result, vmin=original.min(), vmax=original.max(), **imkwargs)
plt.subplot(sp + 3)
plt.imshow(result - original, **imkwargs)
plt.colorbar()
plt.show()
def _rescale_intensity(x):
x = x.astype(float)
x -= x.min()
x /= x.max()
return x
def test_iradon_bias_circular_phantom():
"""
test that a uniform circular phantom has a small reconstruction bias
"""
pixels = 128
xy = np.arange(-pixels / 2, pixels / 2) + 0.5
x, y = np.meshgrid(xy, xy)
image = x**2 + y**2 <= (pixels/4)**2
theta = np.linspace(0., 180., max(image.shape), endpoint=False)
sinogram = radon(image, theta=theta)
reconstruction_fbp = iradon(sinogram, theta=theta)
error = reconstruction_fbp - image
tol = 5e-5
roi_err = np.abs(np.mean(error))
assert roi_err < tol
def check_radon_center(shape, circle, dtype, preserve_range):
# Create a test image with only a single non-zero pixel at the origin
image = np.zeros(shape, dtype=dtype)
image[(shape[0] // 2, shape[1] // 2)] = 1.
# Calculate the sinogram
theta = np.linspace(0., 180., max(shape), endpoint=False)
sinogram = radon(image, theta=theta, circle=circle,
preserve_range=preserve_range)
# The sinogram should be a straight, horizontal line
sinogram_max = np.argmax(sinogram, axis=0)
print(sinogram_max)
assert np.std(sinogram_max) < 1e-6
@testing.parametrize("shape", [(16, 16), (17, 17)])
@testing.parametrize("circle", [False, True])
@testing.parametrize("dtype", [np.float64, np.float32, np.uint8, bool])
@testing.parametrize("preserve_range", [False, True])
def test_radon_center(shape, circle, dtype, preserve_range):
check_radon_center(shape, circle, dtype, preserve_range)
@testing.parametrize("shape", [(32, 16), (33, 17)])
@testing.parametrize("circle", [False])
@testing.parametrize("dtype", [np.float64, np.float32, np.uint8, bool])
@testing.parametrize("preserve_range", [False, True])
def test_radon_center_rectangular(shape, circle, dtype, preserve_range):
check_radon_center(shape, circle, dtype, preserve_range)
def check_iradon_center(size, theta, circle):
debug = False
# Create a test sinogram corresponding to a single projection
# with a single non-zero pixel at the rotation center
if circle:
sinogram = np.zeros((size, 1), dtype=np.float)
sinogram[size // 2, 0] = 1.
else:
diagonal = int(np.ceil(np.sqrt(2) * size))
sinogram = np.zeros((diagonal, 1), dtype=np.float)
sinogram[sinogram.shape[0] // 2, 0] = 1.
maxpoint = np.unravel_index(np.argmax(sinogram), sinogram.shape)
print('shape of generated sinogram', sinogram.shape)
print('maximum in generated sinogram', maxpoint)
# Compare reconstructions for theta=angle and theta=angle + 180;
# these should be exactly equal
reconstruction = iradon(sinogram, theta=[theta], circle=circle)
reconstruction_opposite = iradon(sinogram, theta=[theta + 180],
circle=circle)
print('rms deviance:',
np.sqrt(np.mean((reconstruction_opposite - reconstruction)**2)))
if debug:
import matplotlib.pyplot as plt
imkwargs = dict(cmap='gray', interpolation='nearest')
plt.figure()
plt.subplot(221)
plt.imshow(sinogram, **imkwargs)
plt.subplot(222)
plt.imshow(reconstruction_opposite - reconstruction, **imkwargs)
plt.subplot(223)
plt.imshow(reconstruction, **imkwargs)
plt.subplot(224)
plt.imshow(reconstruction_opposite, **imkwargs)
plt.show()
assert np.allclose(reconstruction, reconstruction_opposite)
sizes_for_test_iradon_center = [16, 17]
thetas_for_test_iradon_center = [0, 90]
circles_for_test_iradon_center = [False, True]
@testing.parametrize("size, theta, circle",
itertools.product(sizes_for_test_iradon_center,
thetas_for_test_iradon_center,
circles_for_test_iradon_center))
def test_iradon_center(size, theta, circle):
check_iradon_center(size, theta, circle)
def check_radon_iradon(interpolation_type, filter_type):
debug = False
image = PHANTOM
reconstructed = iradon(radon(image, circle=False), filter_name=filter_type,
interpolation=interpolation_type, circle=False)
delta = np.mean(np.abs(image - reconstructed))
print('\n\tmean error:', delta)
if debug:
_debug_plot(image, reconstructed)
if filter_type in ('ramp', 'shepp-logan'):
if interpolation_type == 'nearest':
allowed_delta = 0.03
else:
allowed_delta = 0.025
else:
allowed_delta = 0.05
assert delta < allowed_delta
filter_types = ["ramp", "shepp-logan", "cosine", "hamming", "hann"]
interpolation_types = ['linear', 'nearest']
radon_iradon_inputs = list(itertools.product(interpolation_types,
filter_types))
# cubic interpolation is slow; only run one test for it
radon_iradon_inputs.append(('cubic', 'shepp-logan'))
@testing.parametrize("interpolation_type, filter_type",
radon_iradon_inputs)
def test_radon_iradon(interpolation_type, filter_type):
check_radon_iradon(interpolation_type, filter_type)
@pytest.mark.parametrize("filter_type", filter_types)
def test_iradon_new_signature(filter_type):
image = PHANTOM
sinogram = radon(image, circle=False)
with pytest.warns(FutureWarning):
assert np.array_equal(iradon(sinogram, filter=filter_type),
iradon(sinogram, filter_name=filter_type))
def test_iradon_angles():
"""
Test with different number of projections
"""
size = 100
# Synthetic data
image = np.tri(size) + np.tri(size)[::-1]
# Large number of projections: a good quality is expected
nb_angles = 200
theta = np.linspace(0, 180, nb_angles, endpoint=False)
radon_image_200 = radon(image, theta=theta, circle=False)
reconstructed = iradon(radon_image_200, circle=False)
delta_200 = np.mean(abs(_rescale_intensity(image) -
_rescale_intensity(reconstructed)))
assert delta_200 < 0.03
# Lower number of projections
nb_angles = 80
radon_image_80 = radon(image, theta=theta, circle=False)
# Test whether the sum of all projections is approximately the same
s = radon_image_80.sum(axis=0)
assert np.allclose(s, s[0], rtol=0.01)
reconstructed = iradon(radon_image_80, circle=False)
delta_80 = np.mean(abs(image / np.max(image) -
reconstructed / np.max(reconstructed)))
# Loss of quality when the number of projections is reduced
assert delta_80 > delta_200
def check_radon_iradon_minimal(shape, slices):
debug = False
theta = np.arange(180)
image = np.zeros(shape, dtype=np.float)
image[slices] = 1.
sinogram = radon(image, theta, circle=False)
reconstructed = iradon(sinogram, theta, circle=False)
print('\n\tMaximum deviation:', np.max(np.abs(image - reconstructed)))
if debug:
_debug_plot(image, reconstructed, sinogram)
if image.sum() == 1:
assert (np.unravel_index(np.argmax(reconstructed), image.shape)
== np.unravel_index(np.argmax(image), image.shape))
shapes = [(3, 3), (4, 4), (5, 5)]
def generate_test_data_for_radon_iradon_minimal(shapes):
def shape2coordinates(shape):
c0, c1 = shape[0] // 2, shape[1] // 2
coordinates = itertools.product((c0 - 1, c0, c0 + 1),
(c1 - 1, c1, c1 + 1))
return coordinates
def shape2shapeandcoordinates(shape):
return itertools.product([shape], shape2coordinates(shape))
return itertools.chain.from_iterable([shape2shapeandcoordinates(shape)
for shape in shapes])
@testing.parametrize("shape, coordinate",
generate_test_data_for_radon_iradon_minimal(shapes))
def test_radon_iradon_minimal(shape, coordinate):
check_radon_iradon_minimal(shape, coordinate)
def test_reconstruct_with_wrong_angles():
a = np.zeros((3, 3))
p = radon(a, theta=[0, 1, 2], circle=False)
iradon(p, theta=[0, 1, 2], circle=False)
with testing.raises(ValueError):
iradon(p, theta=[0, 1, 2, 3])
def _random_circle(shape):
# Synthetic random data, zero outside reconstruction circle
np.random.seed(98312871)
image = np.random.rand(*shape)
c0, c1 = np.ogrid[0:shape[0], 0:shape[1]]
r = np.sqrt((c0 - shape[0] // 2)**2 + (c1 - shape[1] // 2)**2)
radius = min(shape) // 2
image[r > radius] = 0.
return image
def test_radon_circle():
a = np.ones((10, 10))
with expected_warnings(['reconstruction circle']):
radon(a, circle=True)
# Synthetic data, circular symmetry
shape = (61, 79)
c0, c1 = np.ogrid[0:shape[0], 0:shape[1]]
r = np.sqrt((c0 - shape[0] // 2)**2 + (c1 - shape[1] // 2)**2)
radius = min(shape) // 2
image = np.clip(radius - r, 0, np.inf)
image = _rescale_intensity(image)
angles = np.linspace(0, 180, min(shape), endpoint=False)
sinogram = radon(image, theta=angles, circle=True)
assert np.all(sinogram.std(axis=1) < 1e-2)
# Synthetic data, random
image = _random_circle(shape)
sinogram = radon(image, theta=angles, circle=True)
mass = sinogram.sum(axis=0)
average_mass = mass.mean()
relative_error = np.abs(mass - average_mass) / average_mass
print(relative_error.max(), relative_error.mean())
assert np.all(relative_error < 3.2e-3)
def check_sinogram_circle_to_square(size):
from skimage.transform.radon_transform import _sinogram_circle_to_square
image = _random_circle((size, size))
theta = np.linspace(0., 180., size, False)
sinogram_circle = radon(image, theta, circle=True)
def argmax_shape(a):
return np.unravel_index(np.argmax(a), a.shape)
print('\n\targmax of circle:', argmax_shape(sinogram_circle))
sinogram_square = radon(image, theta, circle=False)
print('\targmax of square:', argmax_shape(sinogram_square))
sinogram_circle_to_square = _sinogram_circle_to_square(sinogram_circle)
print('\targmax of circle to square:',
argmax_shape(sinogram_circle_to_square))
error = abs(sinogram_square - sinogram_circle_to_square)
print(np.mean(error), np.max(error))
assert (argmax_shape(sinogram_square) ==
argmax_shape(sinogram_circle_to_square))
@testing.parametrize("size", (50, 51))
def test_sinogram_circle_to_square(size):
check_sinogram_circle_to_square(size)
def check_radon_iradon_circle(interpolation, shape, output_size):
# Forward and inverse radon on synthetic data
image = _random_circle(shape)
radius = min(shape) // 2
sinogram_rectangle = radon(image, circle=False)
reconstruction_rectangle = iradon(sinogram_rectangle,
output_size=output_size,
interpolation=interpolation,
circle=False)
sinogram_circle = radon(image, circle=True)
reconstruction_circle = iradon(sinogram_circle,
output_size=output_size,
interpolation=interpolation,
circle=True)
# Crop rectangular reconstruction to match circle=True reconstruction
width = reconstruction_circle.shape[0]
excess = int(np.ceil((reconstruction_rectangle.shape[0] - width) / 2))
s = np.s_[excess:width + excess, excess:width + excess]
reconstruction_rectangle = reconstruction_rectangle[s]
# Find the reconstruction circle, set reconstruction to zero outside
c0, c1 = np.ogrid[0:width, 0:width]
r = np.sqrt((c0 - width // 2)**2 + (c1 - width // 2)**2)
reconstruction_rectangle[r > radius] = 0.
print(reconstruction_circle.shape)
print(reconstruction_rectangle.shape)
np.allclose(reconstruction_rectangle, reconstruction_circle)
# if adding more shapes to test data, you might want to look at commit d0f2bac3f
shapes_radon_iradon_circle = ((61, 79), )
interpolations = ('nearest', 'linear')
output_sizes = (None,
min(shapes_radon_iradon_circle[0]),
max(shapes_radon_iradon_circle[0]),
97)
@testing.parametrize("shape, interpolation, output_size",
itertools.product(shapes_radon_iradon_circle,
interpolations, output_sizes))
def test_radon_iradon_circle(shape, interpolation, output_size):
check_radon_iradon_circle(interpolation, shape, output_size)
def test_order_angles_golden_ratio():
from skimage.transform.radon_transform import order_angles_golden_ratio
np.random.seed(1231)
lengths = [1, 4, 10, 180]
for l in lengths:
theta_ordered = np.linspace(0, 180, l, endpoint=False)
theta_random = np.random.uniform(0, 180, l)
for theta in (theta_random, theta_ordered):
indices = [x for x in order_angles_golden_ratio(theta)]
# no duplicate indices allowed
assert len(indices) == len(set(indices))
@test_parallel()
def test_iradon_sart():
debug = False
image = rescale(PHANTOM, 0.8, mode='reflect',
multichannel=False, anti_aliasing=False)
theta_ordered = np.linspace(0., 180., image.shape[0], endpoint=False)
theta_missing_wedge = np.linspace(0., 150., image.shape[0], endpoint=True)
for theta, error_factor in ((theta_ordered, 1.),
(theta_missing_wedge, 2.)):
sinogram = radon(image, theta, circle=True)
reconstructed = iradon_sart(sinogram, theta)
if debug:
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(221)
plt.imshow(image, interpolation='nearest')
plt.subplot(222)
plt.imshow(sinogram, interpolation='nearest')
plt.subplot(223)
plt.imshow(reconstructed, interpolation='nearest')
plt.subplot(224)
plt.imshow(reconstructed - image, interpolation='nearest')
plt.show()
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration) =', delta)
assert delta < 0.02 * error_factor
reconstructed = iradon_sart(sinogram, theta, reconstructed)
delta = np.mean(np.abs(reconstructed - image))
print('delta (2 iterations) =', delta)
assert delta < 0.014 * error_factor
reconstructed = iradon_sart(sinogram, theta, clip=(0, 1))
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration, clip) =', delta)
assert delta < 0.018 * error_factor
np.random.seed(1239867)
shifts = np.random.uniform(-3, 3, sinogram.shape[1])
x = np.arange(sinogram.shape[0])
sinogram_shifted = np.vstack([np.interp(x + shifts[i], x,
sinogram[:, i])
for i in range(sinogram.shape[1])]).T
reconstructed = iradon_sart(sinogram_shifted, theta,
projection_shifts=shifts)
if debug:
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(221)
plt.imshow(image, interpolation='nearest')
plt.subplot(222)
plt.imshow(sinogram_shifted, interpolation='nearest')
plt.subplot(223)
plt.imshow(reconstructed, interpolation='nearest')
plt.subplot(224)
plt.imshow(reconstructed - image, interpolation='nearest')
plt.show()
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration, shifted sinogram) =', delta)
assert delta < 0.022 * error_factor
def test_radon_dtype():
img = convert_to_float(PHANTOM, False)
img32 = img.astype(np.float32)
assert radon(img).dtype == img.dtype
assert radon(img32).dtype == img32.dtype
def test_iradon_sart_dtype():
sinogram = np.zeros((16, 1), dtype=int)
sinogram[8, 0] = 1.
sinogram64 = sinogram.astype('float64')
sinogram32 = sinogram.astype('float32')
with expected_warnings(['Input data is cast to float']):
assert iradon_sart(sinogram, theta=[0]).dtype == 'float64'
assert iradon_sart(sinogram64, theta=[0]).dtype == sinogram64.dtype
assert iradon_sart(sinogram32, theta=[0]).dtype == sinogram32.dtype
def test_iradon_wrong_dtype():
sinogram = np.zeros((16, 1))
with testing.raises(ValueError):
iradon_sart(sinogram, dtype=int)

View file

@ -0,0 +1,679 @@
import numpy as np
from scipy.ndimage import map_coordinates
from skimage.data import checkerboard, astronaut
from skimage.util.dtype import img_as_float
from skimage.color.colorconv import rgb2gray
from skimage.draw.draw import circle_perimeter_aa
from skimage.feature.peak import peak_local_max
from skimage._shared import testing
from skimage._shared.testing import (assert_almost_equal, assert_equal,
test_parallel)
from skimage._shared._warnings import expected_warnings
from skimage.transform._warps import (_stackcopy,
_linear_polar_mapping,
_log_polar_mapping, warp,
warp_coords, rotate, resize,
rescale, warp_polar, swirl,
downscale_local_mean)
from skimage.transform._geometric import (AffineTransform,
ProjectiveTransform,
SimilarityTransform)
np.random.seed(0)
def test_stackcopy():
layers = 4
x = np.empty((3, 3, layers))
y = np.eye(3, 3)
_stackcopy(x, y)
for i in range(layers):
assert_almost_equal(x[..., i], y)
def test_warp_tform():
x = np.zeros((5, 5), dtype=np.double)
x[2, 2] = 1
theta = - np.pi / 2
tform = SimilarityTransform(scale=1, rotation=theta, translation=(0, 4))
x90 = warp(x, tform, order=1)
assert_almost_equal(x90, np.rot90(x))
x90 = warp(x, tform.inverse, order=1)
assert_almost_equal(x90, np.rot90(x))
def test_warp_callable():
x = np.zeros((5, 5), dtype=np.double)
x[2, 2] = 1
refx = np.zeros((5, 5), dtype=np.double)
refx[1, 1] = 1
def shift(xy):
return xy + 1
outx = warp(x, shift, order=1)
assert_almost_equal(outx, refx)
@test_parallel()
def test_warp_matrix():
x = np.zeros((5, 5), dtype=np.double)
x[2, 2] = 1
refx = np.zeros((5, 5), dtype=np.double)
refx[1, 1] = 1
matrix = np.array([[1, 0, 1], [0, 1, 1], [0, 0, 1]])
# _warp_fast
outx = warp(x, matrix, order=1)
assert_almost_equal(outx, refx)
# check for ndimage.map_coordinates
outx = warp(x, matrix, order=5)
def test_warp_nd():
for dim in range(2, 8):
shape = dim * (5,)
x = np.zeros(shape, dtype=np.double)
x_c = dim * (2,)
x[x_c] = 1
refx = np.zeros(shape, dtype=np.double)
refx_c = dim * (1,)
refx[refx_c] = 1
coord_grid = dim * (slice(0, 5, 1),)
coords = np.array(np.mgrid[coord_grid]) + 1
outx = warp(x, coords, order=0, cval=0)
assert_almost_equal(outx, refx)
def test_warp_clip():
x = np.zeros((5, 5), dtype=np.double)
x[2, 2] = 1
outx = rescale(x, 3, order=3, clip=False,
multichannel=False, anti_aliasing=False, mode='constant')
assert outx.min() < 0
outx = rescale(x, 3, order=3, clip=True,
multichannel=False, anti_aliasing=False, mode='constant')
assert_almost_equal(outx.min(), 0)
assert_almost_equal(outx.max(), 1)
def test_homography():
x = np.zeros((5, 5), dtype=np.double)
x[1, 1] = 1
theta = -np.pi / 2
M = np.array([[np.cos(theta), - np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 4],
[0, 0, 1]])
x90 = warp(x,
inverse_map=ProjectiveTransform(M).inverse,
order=1)
assert_almost_equal(x90, np.rot90(x))
def test_rotate():
x = np.zeros((5, 5), dtype=np.double)
x[1, 1] = 1
x90 = rotate(x, 90)
assert_almost_equal(x90, np.rot90(x))
def test_rotate_resize():
x = np.zeros((10, 10), dtype=np.double)
x45 = rotate(x, 45, resize=False)
assert x45.shape == (10, 10)
x45 = rotate(x, 45, resize=True)
# new dimension should be d = sqrt(2 * (10/2)^2)
assert x45.shape == (14, 14)
def test_rotate_center():
x = np.zeros((10, 10), dtype=np.double)
x[4, 4] = 1
refx = np.zeros((10, 10), dtype=np.double)
refx[2, 5] = 1
x20 = rotate(x, 20, order=0, center=(0, 0))
assert_almost_equal(x20, refx)
x0 = rotate(x20, -20, order=0, center=(0, 0))
assert_almost_equal(x0, x)
def test_rotate_resize_center():
x = np.zeros((10, 10), dtype=np.double)
x[0, 0] = 1
ref_x45 = np.zeros((14, 14), dtype=np.double)
ref_x45[6, 0] = 1
ref_x45[7, 0] = 1
x45 = rotate(x, 45, resize=True, center=(3, 3), order=0)
# new dimension should be d = sqrt(2 * (10/2)^2)
assert x45.shape == (14, 14)
assert_equal(x45, ref_x45)
def test_rotate_resize_90():
x90 = rotate(np.zeros((470, 230), dtype=np.double), 90, resize=True)
assert x90.shape == (230, 470)
def test_rescale():
# same scale factor
x = np.zeros((5, 5), dtype=np.double)
x[1, 1] = 1
scaled = rescale(x, 2, order=0,
multichannel=False, anti_aliasing=False, mode='constant')
ref = np.zeros((10, 10))
ref[2:4, 2:4] = 1
assert_almost_equal(scaled, ref)
# different scale factors
x = np.zeros((5, 5), dtype=np.double)
x[1, 1] = 1
scaled = rescale(x, (2, 1), order=0,
multichannel=False, anti_aliasing=False, mode='constant')
ref = np.zeros((10, 5))
ref[2:4, 1] = 1
assert_almost_equal(scaled, ref)
def test_rescale_invalid_scale():
x = np.zeros((10, 10, 3))
with testing.raises(ValueError):
rescale(x, (2, 2),
multichannel=False, anti_aliasing=False, mode='constant')
with testing.raises(ValueError):
rescale(x, (2, 2, 2),
multichannel=True, anti_aliasing=False, mode='constant')
def test_rescale_multichannel():
# 1D + channels
x = np.zeros((8, 3), dtype=np.double)
scaled = rescale(x, 2, order=0, multichannel=True, anti_aliasing=False,
mode='constant')
assert_equal(scaled.shape, (16, 3))
# 2D
scaled = rescale(x, 2, order=0, multichannel=False, anti_aliasing=False,
mode='constant')
assert_equal(scaled.shape, (16, 6))
# 2D + channels
x = np.zeros((8, 8, 3), dtype=np.double)
scaled = rescale(x, 2, order=0, multichannel=True, anti_aliasing=False,
mode='constant')
assert_equal(scaled.shape, (16, 16, 3))
# 3D
scaled = rescale(x, 2, order=0, multichannel=False, anti_aliasing=False,
mode='constant')
assert_equal(scaled.shape, (16, 16, 6))
# 3D + channels
x = np.zeros((8, 8, 8, 3), dtype=np.double)
scaled = rescale(x, 2, order=0, multichannel=True, anti_aliasing=False,
mode='constant')
assert_equal(scaled.shape, (16, 16, 16, 3))
# 4D
scaled = rescale(x, 2, order=0, multichannel=False, anti_aliasing=False,
mode='constant')
assert_equal(scaled.shape, (16, 16, 16, 6))
def test_rescale_multichannel_multiscale():
x = np.zeros((5, 5, 3), dtype=np.double)
scaled = rescale(x, (2, 1), order=0, multichannel=True,
anti_aliasing=False, mode='constant')
assert_equal(scaled.shape, (10, 5, 3))
def test_rescale_multichannel_defaults():
x = np.zeros((8, 3), dtype=np.double)
scaled = rescale(x, 2, order=0, anti_aliasing=False, mode='constant')
assert_equal(scaled.shape, (16, 6))
x = np.zeros((8, 8, 3), dtype=np.double)
scaled = rescale(x, 2, order=0, anti_aliasing=False, mode='constant')
assert_equal(scaled.shape, (16, 16, 6))
def test_resize2d():
x = np.zeros((5, 5), dtype=np.double)
x[1, 1] = 1
resized = resize(x, (10, 10), order=0, anti_aliasing=False,
mode='constant')
ref = np.zeros((10, 10))
ref[2:4, 2:4] = 1
assert_almost_equal(resized, ref)
def test_resize3d_keep():
# keep 3rd dimension
x = np.zeros((5, 5, 3), dtype=np.double)
x[1, 1, :] = 1
resized = resize(x, (10, 10), order=0, anti_aliasing=False,
mode='constant')
with testing.raises(ValueError):
# output_shape too short
resize(x, (10, ), order=0, anti_aliasing=False, mode='constant')
ref = np.zeros((10, 10, 3))
ref[2:4, 2:4, :] = 1
assert_almost_equal(resized, ref)
resized = resize(x, (10, 10, 3), order=0, anti_aliasing=False,
mode='constant')
assert_almost_equal(resized, ref)
def test_resize3d_resize():
# resize 3rd dimension
x = np.zeros((5, 5, 3), dtype=np.double)
x[1, 1, :] = 1
resized = resize(x, (10, 10, 1), order=0, anti_aliasing=False,
mode='constant')
ref = np.zeros((10, 10, 1))
ref[2:4, 2:4] = 1
assert_almost_equal(resized, ref)
def test_resize3d_2din_3dout():
# 3D output with 2D input
x = np.zeros((5, 5), dtype=np.double)
x[1, 1] = 1
resized = resize(x, (10, 10, 1), order=0, anti_aliasing=False,
mode='constant')
ref = np.zeros((10, 10, 1))
ref[2:4, 2:4] = 1
assert_almost_equal(resized, ref)
def test_resize2d_4d():
# resize with extra output dimensions
x = np.zeros((5, 5), dtype=np.double)
x[1, 1] = 1
out_shape = (10, 10, 1, 1)
resized = resize(x, out_shape, order=0, anti_aliasing=False,
mode='constant')
ref = np.zeros(out_shape)
ref[2:4, 2:4, ...] = 1
assert_almost_equal(resized, ref)
def test_resize_nd():
for dim in range(1, 6):
shape = 2 + np.arange(dim) * 2
x = np.ones(shape)
out_shape = np.asarray(shape) * 1.5
resized = resize(x, out_shape, order=0, mode='reflect',
anti_aliasing=False)
expected_shape = 1.5 * shape
assert_equal(resized.shape, expected_shape)
assert np.all(resized == 1)
def test_resize3d_bilinear():
# bilinear 3rd dimension
x = np.zeros((5, 5, 2), dtype=np.double)
x[1, 1, 0] = 0
x[1, 1, 1] = 1
resized = resize(x, (10, 10, 1), order=1, mode='constant',
anti_aliasing=False)
ref = np.zeros((10, 10, 1))
ref[1:5, 1:5, :] = 0.03125
ref[1:5, 2:4, :] = 0.09375
ref[2:4, 1:5, :] = 0.09375
ref[2:4, 2:4, :] = 0.28125
assert_almost_equal(resized, ref)
def test_resize_dtype():
x = np.zeros((5, 5))
x_f32 = x.astype(np.float32)
x_u8 = x.astype(np.uint8)
x_b = x.astype(bool)
assert resize(x, (10, 10), preserve_range=False).dtype == x.dtype
assert resize(x, (10, 10), preserve_range=True).dtype == x.dtype
assert resize(x_u8, (10, 10), preserve_range=False).dtype == np.double
assert resize(x_u8, (10, 10), preserve_range=True).dtype == np.double
assert resize(x_b, (10, 10), preserve_range=False).dtype == np.double
assert resize(x_b, (10, 10), preserve_range=True).dtype == np.double
assert resize(x_f32, (10, 10), preserve_range=False).dtype == x_f32.dtype
assert resize(x_f32, (10, 10), preserve_range=True).dtype == x_f32.dtype
def test_swirl():
image = img_as_float(checkerboard())
swirl_params = {'radius': 80, 'rotation': 0, 'order': 2, 'mode': 'reflect'}
with expected_warnings(['Bi-quadratic.*bug']):
swirled = swirl(image, strength=10, **swirl_params)
unswirled = swirl(swirled, strength=-10, **swirl_params)
assert np.mean(np.abs(image - unswirled)) < 0.01
swirl_params.pop('mode')
with expected_warnings(['Bi-quadratic.*bug']):
swirled = swirl(image, strength=10, **swirl_params)
unswirled = swirl(swirled, strength=-10, **swirl_params)
assert np.mean(np.abs(image[1:-1, 1:-1] - unswirled[1:-1, 1:-1])) < 0.01
def test_const_cval_out_of_range():
img = np.random.randn(100, 100)
cval = - 10
warped = warp(img, AffineTransform(translation=(10, 10)), cval=cval)
assert np.sum(warped == cval) == (2 * 100 * 10 - 10 * 10)
def test_warp_identity():
img = img_as_float(rgb2gray(astronaut()))
assert len(img.shape) == 2
assert np.allclose(img, warp(img, AffineTransform(rotation=0)))
assert not np.allclose(img, warp(img, AffineTransform(rotation=0.1)))
rgb_img = np.transpose(np.asarray([img, np.zeros_like(img), img]),
(1, 2, 0))
warped_rgb_img = warp(rgb_img, AffineTransform(rotation=0.1))
assert np.allclose(rgb_img, warp(rgb_img, AffineTransform(rotation=0)))
assert not np.allclose(rgb_img, warped_rgb_img)
# assert no cross-talk between bands
assert np.all(0 == warped_rgb_img[:, :, 1])
def test_warp_coords_example():
image = astronaut().astype(np.float32)
assert 3 == image.shape[2]
tform = SimilarityTransform(translation=(0, -10))
coords = warp_coords(tform, (30, 30, 3))
map_coordinates(image[:, :, 0], coords[:2])
def test_downsize():
x = np.zeros((10, 10), dtype=np.double)
x[2:4, 2:4] = 1
scaled = resize(x, (5, 5), order=0, anti_aliasing=False, mode='constant')
assert_equal(scaled.shape, (5, 5))
assert_equal(scaled[1, 1], 1)
assert_equal(scaled[2:, :].sum(), 0)
assert_equal(scaled[:, 2:].sum(), 0)
def test_downsize_anti_aliasing():
x = np.zeros((10, 10), dtype=np.double)
x[2, 2] = 1
scaled = resize(x, (5, 5), order=1, anti_aliasing=True, mode='constant')
assert_equal(scaled.shape, (5, 5))
assert np.all(scaled[:3, :3] > 0)
assert_equal(scaled[3:, :].sum(), 0)
assert_equal(scaled[:, 3:].sum(), 0)
sigma = 0.125
out_size = (5, 5)
resize(x, out_size, order=1, mode='constant',
anti_aliasing=True, anti_aliasing_sigma=sigma)
resize(x, out_size, order=1, mode='edge',
anti_aliasing=True, anti_aliasing_sigma=sigma)
resize(x, out_size, order=1, mode='symmetric',
anti_aliasing=True, anti_aliasing_sigma=sigma)
resize(x, out_size, order=1, mode='reflect',
anti_aliasing=True, anti_aliasing_sigma=sigma)
resize(x, out_size, order=1, mode='wrap',
anti_aliasing=True, anti_aliasing_sigma=sigma)
with testing.raises(ValueError): # Unknown mode, or cannot translate mode
resize(x, out_size, order=1, mode='non-existent',
anti_aliasing=True, anti_aliasing_sigma=sigma)
def test_downsize_anti_aliasing_invalid_stddev():
x = np.zeros((10, 10), dtype=np.double)
with testing.raises(ValueError):
resize(x, (5, 5), order=0, anti_aliasing=True, anti_aliasing_sigma=-1,
mode='constant')
with expected_warnings(["Anti-aliasing standard deviation greater"]):
resize(x, (5, 15), order=0, anti_aliasing=True,
anti_aliasing_sigma=(1, 1), mode="reflect")
resize(x, (5, 15), order=0, anti_aliasing=True,
anti_aliasing_sigma=(0, 1), mode="reflect")
def test_downscale():
x = np.zeros((10, 10), dtype=np.double)
x[2:4, 2:4] = 1
scaled = rescale(x, 0.5, order=0, anti_aliasing=False,
multichannel=False, mode='constant')
assert_equal(scaled.shape, (5, 5))
assert_equal(scaled[1, 1], 1)
assert_equal(scaled[2:, :].sum(), 0)
assert_equal(scaled[:, 2:].sum(), 0)
def test_downscale_anti_aliasing():
x = np.zeros((10, 10), dtype=np.double)
x[2, 2] = 1
scaled = rescale(x, 0.5, order=1, anti_aliasing=True,
multichannel=False, mode='constant')
assert_equal(scaled.shape, (5, 5))
assert np.all(scaled[:3, :3] > 0)
assert_equal(scaled[3:, :].sum(), 0)
assert_equal(scaled[:, 3:].sum(), 0)
def test_downscale_local_mean():
image1 = np.arange(4 * 6).reshape(4, 6)
out1 = downscale_local_mean(image1, (2, 3))
expected1 = np.array([[4., 7.],
[16., 19.]])
assert_equal(expected1, out1)
image2 = np.arange(5 * 8).reshape(5, 8)
out2 = downscale_local_mean(image2, (4, 5))
expected2 = np.array([[14., 10.8],
[8.5, 5.7]])
assert_equal(expected2, out2)
def test_invalid():
with testing.raises(ValueError):
warp(np.ones((4, 3, 3, 3)),
SimilarityTransform())
def test_inverse():
tform = SimilarityTransform(scale=0.5, rotation=0.1)
inverse_tform = SimilarityTransform(matrix=np.linalg.inv(tform.params))
image = np.arange(10 * 10).reshape(10, 10).astype(np.double)
assert_equal(warp(image, inverse_tform), warp(image, tform.inverse))
def test_slow_warp_nonint_oshape():
image = np.random.rand(5, 5)
with testing.raises(ValueError):
warp(image, lambda xy: xy,
output_shape=(13.1, 19.5))
warp(image, lambda xy: xy, output_shape=(13.0001, 19.9999))
def test_keep_range():
image = np.linspace(0, 2, 25).reshape(5, 5)
out = rescale(image, 2, preserve_range=False, clip=True, order=0,
mode='constant', multichannel=False, anti_aliasing=False)
assert out.min() == 0
assert out.max() == 2
out = rescale(image, 2, preserve_range=True, clip=True, order=0,
mode='constant', multichannel=False, anti_aliasing=False)
assert out.min() == 0
assert out.max() == 2
out = rescale(image.astype(np.uint8), 2, preserve_range=False,
mode='constant', multichannel=False, anti_aliasing=False,
clip=True, order=0)
assert out.min() == 0
assert out.max() == 2 / 255.0
def test_zero_image_size():
with testing.raises(ValueError):
warp(np.zeros(0),
SimilarityTransform())
with testing.raises(ValueError):
warp(np.zeros((0, 10)),
SimilarityTransform())
with testing.raises(ValueError):
warp(np.zeros((10, 0)),
SimilarityTransform())
with testing.raises(ValueError):
warp(np.zeros((10, 10, 0)),
SimilarityTransform())
def test_linear_polar_mapping():
output_coords = np.array([[0, 0],
[0, 90],
[0, 180],
[0, 270],
[99, 0],
[99, 180],
[99, 270],
[99, 45]])
ground_truth = np.array([[100, 100],
[100, 100],
[100, 100],
[100, 100],
[199, 100],
[1, 100],
[100, 1],
[170.00357134, 170.00357134]])
k_angle = 360 / (2 * np.pi)
k_radius = 1
center = (100, 100)
coords = _linear_polar_mapping(output_coords, k_angle, k_radius, center)
assert np.allclose(coords, ground_truth)
def test_log_polar_mapping():
output_coords = np.array([[0, 0],
[0, 90],
[0, 180],
[0, 270],
[99, 0],
[99, 180],
[99, 270],
[99, 45]])
ground_truth = np.array([[101, 100],
[100, 101],
[99, 100],
[100, 99],
[195.4992586, 100],
[4.5007414, 100],
[100, 4.5007414],
[167.52817336, 167.52817336]])
k_angle = 360 / (2 * np.pi)
k_radius = 100 / np.log(100)
center = (100, 100)
coords = _log_polar_mapping(output_coords, k_angle, k_radius, center)
assert np.allclose(coords, ground_truth)
def test_linear_warp_polar():
radii = [5, 10, 15, 20]
image = np.zeros([51, 51])
for rad in radii:
rr, cc, val = circle_perimeter_aa(25, 25, rad)
image[rr, cc] = val
warped = warp_polar(image, radius=25)
profile = warped.mean(axis=0)
peaks = peak_local_max(profile)
assert np.alltrue([peak in radii for peak in peaks])
def test_log_warp_polar():
radii = [np.exp(2), np.exp(3), np.exp(4), np.exp(5),
np.exp(5)-1, np.exp(5)+1]
radii = [int(x) for x in radii]
image = np.zeros([301, 301])
for rad in radii:
rr, cc, val = circle_perimeter_aa(150, 150, rad)
image[rr, cc] = val
warped = warp_polar(image, radius=200, scaling='log')
profile = warped.mean(axis=0)
peaks_coord = peak_local_max(profile)
peaks_coord.sort(axis=0)
gaps = peaks_coord[1:] - peaks_coord[:-1]
assert np.alltrue([x >= 38 and x <= 40 for x in gaps])
def test_invalid_scaling_polar():
with testing.raises(ValueError):
warp_polar(np.zeros((10, 10)), (5, 5), scaling='invalid')
with testing.raises(ValueError):
warp_polar(np.zeros((10, 10)), (5, 5), scaling=None)
def test_invalid_dimensions_polar():
with testing.raises(ValueError):
warp_polar(np.zeros((10, 10, 3)), (5, 5))
with testing.raises(ValueError):
warp_polar(np.zeros((10, 10)), (5, 5), multichannel=True)
with testing.raises(ValueError):
warp_polar(np.zeros((10, 10, 10, 3)), (5, 5), multichannel=True)
def test_bool_img_rescale():
img = np.ones((12, 18), dtype=bool)
img[2:-2, 4:-4] = False
res = rescale(img, 0.5)
expected = np.ones((6, 9))
expected[1:-1, 2:-2] = False
assert_equal(res, expected)
def test_bool_img_resize():
img = np.ones((12, 18), dtype=bool)
img[2:-2, 4:-4] = False
res = resize(img, (6, 9))
expected = np.ones((6, 9))
expected[1:-1, 2:-2] = False
assert_equal(res, expected)
def test_boll_array_warnings():
img = np.zeros((10, 10), dtype=bool)
with expected_warnings(['Input image dtype is bool']):
rescale(img, 0.5, anti_aliasing=True)
with expected_warnings(['Input image dtype is bool']):
resize(img, (5, 5), anti_aliasing=True)
with expected_warnings(['Input image dtype is bool']):
rescale(img, 0.5, order=1)
with expected_warnings(['Input image dtype is bool']):
resize(img, (5, 5), order=1)
with expected_warnings(['Input image dtype is bool']):
warp(img, np.eye(3), order=1)