Fixed database typo and removed unnecessary class identifier.

This commit is contained in:
Batuhan Berk Başoğlu 2020-10-14 10:10:37 -04:00
parent 00ad49a143
commit 45fb349a7d
5098 changed files with 952558 additions and 85 deletions

View file

@ -0,0 +1,8 @@
from ._optical_flow import optical_flow_tvl1
from ._phase_cross_correlation import phase_cross_correlation
__all__ = [
'optical_flow_tvl1',
'phase_cross_correlation'
]

View file

@ -0,0 +1,287 @@
"""
Implementation of the masked normalized cross-correlation.
Based on the following publication:
D. Padfield. Masked object registration in the Fourier domain.
IEEE Transactions on Image Processing (2012)
and the author's original MATLAB implementation, available on this website:
http://www.dirkpadfield.com/
"""
import numpy as np
from functools import partial
from .._shared.fft import fftmodule, next_fast_len
def _masked_phase_cross_correlation(reference_image, moving_image,
reference_mask, moving_mask=None,
overlap_ratio=0.3):
"""Masked image translation registration by masked normalized
cross-correlation.
Parameters
----------
reference_image : ndarray
Reference image.
moving_image : ndarray
Image to register. Must be same dimensionality as ``reference_image``,
but not necessarily the same size.
reference_mask : ndarray
Boolean mask for ``reference_image``. The mask should evaluate
to ``True`` (or 1) on valid pixels. ``reference_mask`` should
have the same shape as ``reference_image``.
moving_mask : ndarray or None, optional
Boolean mask for ``moving_image``. The mask should evaluate to ``True``
(or 1) on valid pixels. ``moving_mask`` should have the same shape
as ``moving_image``. If ``None``, ``reference_mask`` will be used.
overlap_ratio : float, optional
Minimum allowed overlap ratio between images. The correlation for
translations corresponding with an overlap ratio lower than this
threshold will be ignored. A lower `overlap_ratio` leads to smaller
maximum translation, while a higher `overlap_ratio` leads to greater
robustness against spurious matches due to small overlap between
masked images.
Returns
-------
shifts : ndarray
Shift vector (in pixels) required to register ``moving_image``
with ``reference_image``. Axis ordering is consistent with
numpy (e.g. Z, Y, X)
References
----------
.. [1] Dirk Padfield. Masked Object Registration in the Fourier Domain.
IEEE Transactions on Image Processing, vol. 21(5),
pp. 2706-2718 (2012). :DOI:`10.1109/TIP.2011.2181402`
.. [2] D. Padfield. "Masked FFT registration". In Proc. Computer Vision and
Pattern Recognition, pp. 2918-2925 (2010).
:DOI:`10.1109/CVPR.2010.5540032`
"""
if moving_mask is None:
if reference_image.shape != moving_image.shape:
raise ValueError(
"Input images have different shapes, moving_mask must "
"be explicitely set.")
moving_mask = reference_mask.astype(bool)
# We need masks to be of the same size as their respective images
for (im, mask) in [(reference_image, reference_mask),
(moving_image, moving_mask)]:
if im.shape != mask.shape:
raise ValueError(
"Image sizes must match their respective mask sizes.")
xcorr = cross_correlate_masked(moving_image, reference_image, moving_mask,
reference_mask, axes=(0, 1), mode='full',
overlap_ratio=overlap_ratio)
# Generalize to the average of multiple equal maxima
maxima = np.transpose(np.nonzero(xcorr == xcorr.max()))
center = np.mean(maxima, axis=0)
shifts = center - np.array(reference_image.shape) + 1
# The mismatch in size will impact the center location of the
# cross-correlation
size_mismatch = (np.array(moving_image.shape)
- np.array(reference_image.shape))
return -shifts + (size_mismatch / 2)
def cross_correlate_masked(arr1, arr2, m1, m2, mode='full', axes=(-2, -1),
overlap_ratio=0.3):
"""
Masked normalized cross-correlation between arrays.
Parameters
----------
arr1 : ndarray
First array.
arr2 : ndarray
Seconds array. The dimensions of `arr2` along axes that are not
transformed should be equal to that of `arr1`.
m1 : ndarray
Mask of `arr1`. The mask should evaluate to `True`
(or 1) on valid pixels. `m1` should have the same shape as `arr1`.
m2 : ndarray
Mask of `arr2`. The mask should evaluate to `True`
(or 1) on valid pixels. `m2` should have the same shape as `arr2`.
mode : {'full', 'same'}, optional
'full':
This returns the convolution at each point of overlap. At
the end-points of the convolution, the signals do not overlap
completely, and boundary effects may be seen.
'same':
The output is the same size as `arr1`, centered with respect
to the `full` output. Boundary effects are less prominent.
axes : tuple of ints, optional
Axes along which to compute the cross-correlation.
overlap_ratio : float, optional
Minimum allowed overlap ratio between images. The correlation for
translations corresponding with an overlap ratio lower than this
threshold will be ignored. A lower `overlap_ratio` leads to smaller
maximum translation, while a higher `overlap_ratio` leads to greater
robustness against spurious matches due to small overlap between
masked images.
Returns
-------
out : ndarray
Masked normalized cross-correlation.
Raises
------
ValueError : if correlation `mode` is not valid, or array dimensions along
non-transformation axes are not equal.
References
----------
.. [1] Dirk Padfield. Masked Object Registration in the Fourier Domain.
IEEE Transactions on Image Processing, vol. 21(5),
pp. 2706-2718 (2012). :DOI:`10.1109/TIP.2011.2181402`
.. [2] D. Padfield. "Masked FFT registration". In Proc. Computer Vision and
Pattern Recognition, pp. 2918-2925 (2010).
:DOI:`10.1109/CVPR.2010.5540032`
"""
if mode not in {'full', 'same'}:
raise ValueError("Correlation mode '{}' is not valid.".format(mode))
fixed_image = np.array(arr1, dtype=np.float)
fixed_mask = np.array(m1, dtype=np.bool)
moving_image = np.array(arr2, dtype=np.float)
moving_mask = np.array(m2, dtype=np.bool)
eps = np.finfo(np.float).eps
# Array dimensions along non-transformation axes should be equal.
all_axes = set(range(fixed_image.ndim))
for axis in (all_axes - set(axes)):
if fixed_image.shape[axis] != moving_image.shape[axis]:
raise ValueError(
"Array shapes along non-transformation axes should be "
"equal, but dimensions along axis {a} are not".format(a=axis))
# Determine final size along transformation axes
# Note that it might be faster to compute Fourier transform in a slightly
# larger shape (`fast_shape`). Then, after all fourier transforms are done,
# we slice back to`final_shape` using `final_slice`.
final_shape = list(arr1.shape)
for axis in axes:
final_shape[axis] = fixed_image.shape[axis] + \
moving_image.shape[axis] - 1
final_shape = tuple(final_shape)
final_slice = tuple([slice(0, int(sz)) for sz in final_shape])
# Extent transform axes to the next fast length (i.e. multiple of 3, 5, or
# 7)
fast_shape = tuple([next_fast_len(final_shape[ax]) for ax in axes])
# We use numpy.fft or the new scipy.fft because they allow leaving the
# transform axes unchanged which was not possible with scipy.fftpack's
# fftn/ifftn in older versions of SciPy.
# E.g. arr shape (2, 3, 7), transform along axes (0, 1) with shape (4, 4)
# results in arr_fft shape (4, 4, 7)
fft = partial(fftmodule.fftn, s=fast_shape, axes=axes)
ifft = partial(fftmodule.ifftn, s=fast_shape, axes=axes)
fixed_image[np.logical_not(fixed_mask)] = 0.0
moving_image[np.logical_not(moving_mask)] = 0.0
# N-dimensional analog to rotation by 180deg is flip over all relevant axes.
# See [1] for discussion.
rotated_moving_image = _flip(moving_image, axes=axes)
rotated_moving_mask = _flip(moving_mask, axes=axes)
fixed_fft = fft(fixed_image)
rotated_moving_fft = fft(rotated_moving_image)
fixed_mask_fft = fft(fixed_mask)
rotated_moving_mask_fft = fft(rotated_moving_mask)
# Calculate overlap of masks at every point in the convolution.
# Locations with high overlap should not be taken into account.
number_overlap_masked_px = np.real(
ifft(rotated_moving_mask_fft * fixed_mask_fft))
number_overlap_masked_px[:] = np.round(number_overlap_masked_px)
number_overlap_masked_px[:] = np.fmax(number_overlap_masked_px, eps)
masked_correlated_fixed_fft = ifft(rotated_moving_mask_fft * fixed_fft)
masked_correlated_rotated_moving_fft = ifft(
fixed_mask_fft * rotated_moving_fft)
numerator = ifft(rotated_moving_fft * fixed_fft)
numerator -= masked_correlated_fixed_fft * \
masked_correlated_rotated_moving_fft / number_overlap_masked_px
fixed_squared_fft = fft(np.square(fixed_image))
fixed_denom = ifft(rotated_moving_mask_fft * fixed_squared_fft)
fixed_denom -= np.square(masked_correlated_fixed_fft) / \
number_overlap_masked_px
fixed_denom[:] = np.fmax(fixed_denom, 0.0)
rotated_moving_squared_fft = fft(np.square(rotated_moving_image))
moving_denom = ifft(fixed_mask_fft * rotated_moving_squared_fft)
moving_denom -= np.square(masked_correlated_rotated_moving_fft) / \
number_overlap_masked_px
moving_denom[:] = np.fmax(moving_denom, 0.0)
denom = np.sqrt(fixed_denom * moving_denom)
# Slice back to expected convolution shape.
numerator = numerator[final_slice]
denom = denom[final_slice]
number_overlap_masked_px = number_overlap_masked_px[final_slice]
if mode == 'same':
_centering = partial(_centered,
newshape=fixed_image.shape, axes=axes)
denom = _centering(denom)
numerator = _centering(numerator)
number_overlap_masked_px = _centering(number_overlap_masked_px)
# Pixels where `denom` is very small will introduce large
# numbers after division. To get around this problem,
# we zero-out problematic pixels.
tol = 1e3 * eps * np.max(np.abs(denom), axis=axes, keepdims=True)
nonzero_indices = denom > tol
out = np.zeros_like(denom)
out[nonzero_indices] = numerator[nonzero_indices] / denom[nonzero_indices]
np.clip(out, a_min=-1, a_max=1, out=out)
# Apply overlap ratio threshold
number_px_threshold = overlap_ratio * np.max(number_overlap_masked_px,
axis=axes, keepdims=True)
out[number_overlap_masked_px < number_px_threshold] = 0.0
return out
def _centered(arr, newshape, axes):
""" Return the center `newshape` portion of `arr`, leaving axes not
in `axes` untouched. """
newshape = np.asarray(newshape)
currshape = np.array(arr.shape)
slices = [slice(None, None)] * arr.ndim
for ax in axes:
startind = (currshape[ax] - newshape[ax]) // 2
endind = startind + newshape[ax]
slices[ax] = slice(startind, endind)
return arr[tuple(slices)]
def _flip(arr, axes=None):
""" Reverse array over many axes. Generalization of arr[::-1] for many
dimensions. If `axes` is `None`, flip along all axes. """
if axes is None:
reverse = [slice(None, None, -1)] * arr.ndim
else:
reverse = [slice(None, None, None)] * arr.ndim
for axis in axes:
reverse[axis] = slice(None, None, -1)
return arr[tuple(reverse)]

View file

@ -0,0 +1,218 @@
# coding: utf-8
"""TV-L1 optical flow algorithm implementation.
"""
from functools import partial
import numpy as np
from scipy import ndimage as ndi
from skimage.transform import warp
from ._optical_flow_utils import coarse_to_fine
def _tvl1(reference_image, moving_image, flow0, attachment, tightness,
num_warp, num_iter, tol, prefilter):
"""TV-L1 solver for optical flow estimation.
Parameters
----------
reference_image : ndarray, shape (M, N[, P[, ...]])
The first gray scale image of the sequence.
moving_image : ndarray, shape (M, N[, P[, ...]])
The second gray scale image of the sequence.
flow0 : ndarray, shape (image0.ndim, M, N[, P[, ...]])
Initialization for the vector field.
attachment : float
Attachment parameter. The smaller this parameter is,
the smoother is the solutions.
tightness : float
Tightness parameter. It should have a small value in order to
maintain attachement and regularization parts in
correspondence.
num_warp : int
Number of times image1 is warped.
num_iter : int
Number of fixed point iteration.
tol : float
Tolerance used as stopping criterion based on the distance
between two consecutive values of (u, v).
prefilter : bool
Whether to prefilter the estimated optical flow before each
image warp.
Returns
-------
flow : ndarray, shape ((image0.ndim, M, N[, P[, ...]])
The estimated optical flow components for each axis.
"""
dtype = reference_image.dtype
grid = np.meshgrid(*[np.arange(n, dtype=dtype)
for n in reference_image.shape],
indexing='ij')
dt = 0.5 / reference_image.ndim
reg_num_iter = 2
f0 = attachment * tightness
f1 = dt / tightness
tol *= reference_image.size
flow_current = flow_previous = flow0
g = np.zeros((reference_image.ndim,) + reference_image.shape, dtype=dtype)
proj = np.zeros((reference_image.ndim, reference_image.ndim,)
+ reference_image.shape, dtype=dtype)
s_g = [slice(None), ] * g.ndim
s_p = [slice(None), ] * proj.ndim
s_d = [slice(None), ] * (proj.ndim-2)
for _ in range(num_warp):
if prefilter:
flow_current = ndi.median_filter(flow_current,
[1] + reference_image.ndim * [3])
image1_warp = warp(moving_image, grid + flow_current, mode='nearest')
grad = np.array(np.gradient(image1_warp))
NI = (grad*grad).sum(0)
NI[NI == 0] = 1
rho_0 = image1_warp - reference_image - (grad * flow_current).sum(0)
for _ in range(num_iter):
# Data term
rho = rho_0 + (grad*flow_current).sum(0)
idx = abs(rho) <= f0 * NI
flow_auxiliary = flow_current
flow_auxiliary[:, idx] -= rho[idx]*grad[:, idx]/NI[idx]
idx = ~idx
srho = f0 * np.sign(rho[idx])
flow_auxiliary[:, idx] -= srho*grad[:, idx]
# Regularization term
flow_current = flow_auxiliary.copy()
for idx in range(reference_image.ndim):
s_p[0] = idx
for _ in range(reg_num_iter):
for ax in range(reference_image.ndim):
s_g[0] = ax
s_g[ax+1] = slice(0, -1)
g[tuple(s_g)] = np.diff(flow_current[idx], axis=ax)
s_g[ax+1] = slice(None)
norm = np.sqrt((g ** 2).sum(0))[np.newaxis, ...]
norm *= f1
norm += 1.
proj[idx] -= dt * g
proj[idx] /= norm
# d will be the (negative) divergence of proj[idx]
d = -proj[idx].sum(0)
for ax in range(reference_image.ndim):
s_p[1] = ax
s_p[ax+2] = slice(0, -1)
s_d[ax] = slice(1, None)
d[tuple(s_d)] += proj[tuple(s_p)]
s_p[ax+2] = slice(None)
s_d[ax] = slice(None)
flow_current[idx] = flow_auxiliary[idx] + d
flow_previous -= flow_current # The difference as stopping criteria
if (flow_previous*flow_previous).sum() < tol:
break
flow_previous = flow_current
return flow_current
def optical_flow_tvl1(reference_image, moving_image,
*,
attachment=15, tightness=0.3, num_warp=5, num_iter=10,
tol=1e-4, prefilter=False, dtype=np.float32):
r"""Coarse to fine optical flow estimator.
The TV-L1 solver is applied at each level of the image
pyramid. TV-L1 is a popular algorithm for optical flow estimation
introduced by Zack et al. [1]_, improved in [2]_ and detailed in [3]_.
Parameters
----------
reference_image : ndarray, shape (M, N[, P[, ...]])
The first gray scale image of the sequence.
moving_image : ndarray, shape (M, N[, P[, ...]])
The second gray scale image of the sequence.
attachment : float, optional
Attachment parameter (:math:`\lambda` in [1]_). The smaller
this parameter is, the smoother the returned result will be.
tightness : float, optional
Tightness parameter (:math:`\tau` in [1]_). It should have
a small value in order to maintain attachement and
regularization parts in correspondence.
num_warp : int, optional
Number of times image1 is warped.
num_iter : int, optional
Number of fixed point iteration.
tol : float, optional
Tolerance used as stopping criterion based on the distance
between two consecutive values of (u, v).
prefilter : bool, optional
Whether to prefilter the estimated optical flow before each
image warp. This helps to remove the potential outliers.
dtype : dtype, optional
Output data type: must be floating point. Single precision
provides good results and saves memory usage and computation
time compared to double precision.
Returns
-------
flow : ndarray, shape ((image0.ndim, M, N[, P[, ...]])
The estimated optical flow components for each axis.
Notes
-----
Color images are not supported.
References
----------
.. [1] Zach, C., Pock, T., & Bischof, H. (2007, September). A
duality based approach for realtime TV-L 1 optical flow. In Joint
pattern recognition symposium (pp. 214-223). Springer, Berlin,
Heidelberg. :DOI:`10.1007/978-3-540-74936-3_22`
.. [2] Wedel, A., Pock, T., Zach, C., Bischof, H., & Cremers,
D. (2009). An improved algorithm for TV-L 1 optical flow. In
Statistical and geometrical approaches to visual motion analysis
(pp. 23-45). Springer, Berlin, Heidelberg.
:DOI:`10.1007/978-3-642-03061-1_2`
.. [3] Pérez, J. S., Meinhardt-Llopis, E., & Facciolo,
G. (2013). TV-L1 optical flow estimation. Image Processing On
Line, 2013, 137-150. :DOI:`10.5201/ipol.2013.26`
Examples
--------
>>> from skimage.color import rgb2gray
>>> from skimage.data import stereo_motorcycle
>>> from skimage.registration import optical_flow_tvl1
>>> image0, image1, disp = stereo_motorcycle()
>>> # --- Convert the images to gray level: color is not supported.
>>> image0 = rgb2gray(image0)
>>> image1 = rgb2gray(image1)
>>> flow = optical_flow_tvl1(image1, image0)
"""
solver = partial(_tvl1, attachment=attachment,
tightness=tightness, num_warp=num_warp, num_iter=num_iter,
tol=tol, prefilter=prefilter)
return coarse_to_fine(reference_image, moving_image, solver, dtype=dtype)

View file

@ -0,0 +1,127 @@
# coding: utf-8
"""Common tools to optical flow algorithms.
"""
import numpy as np
from skimage.transform import pyramid_reduce
from skimage.util.dtype import _convert
from scipy import ndimage as ndi
def resize_flow(flow, shape):
"""Rescale the values of the vector field (u, v) to the desired shape.
The values of the output vector field are scaled to the new
resolution.
Parameters
----------
flow : ndarray
The motion field to be processed.
shape : iterable
Couple of integers representing the output shape.
Returns
-------
rflow : ndarray
The resized and rescaled motion field.
"""
scale = [n / o for n, o in zip(shape, flow.shape[1:])]
scale_factor = np.array(scale, dtype=flow.dtype)
for _ in shape:
scale_factor = scale_factor[..., np.newaxis]
rflow = scale_factor*ndi.zoom(flow, [1] + scale, order=0,
mode='nearest', prefilter=False)
return rflow
def get_pyramid(I, downscale=2.0, nlevel=10, min_size=16):
"""Construct image pyramid.
Parameters
----------
I : ndarray
The image to be preprocessed (Gray scale or RGB).
downscale : float
The pyramid downscale factor.
nlevel : int
The maximum number of pyramid levels.
min_size : int
The minimum size for any dimension of the pyramid levels.
Returns
-------
pyramid : list[ndarray]
The coarse to fine images pyramid.
"""
pyramid = [I]
size = min(I.shape)
count = 1
while (count < nlevel) and (size > downscale * min_size):
J = pyramid_reduce(pyramid[-1], downscale, multichannel=False)
pyramid.append(J)
size = min(J.shape)
count += 1
return pyramid[::-1]
def coarse_to_fine(I0, I1, solver, downscale=2, nlevel=10, min_size=16,
dtype=np.float32):
"""Generic coarse to fine solver.
Parameters
----------
I0 : ndarray
The first gray scale image of the sequence.
I1 : ndarray
The second gray scale image of the sequence.
solver : callable
The solver applyed at each pyramid level.
downscale : float
The pyramid downscale factor.
nlevel : int
The maximum number of pyramid levels.
min_size : int
The minimum size for any dimension of the pyramid levels.
dtype : dtype
Output data type.
Returns
-------
flow : ndarray
The estimated optical flow components for each axis.
"""
if I0.shape != I1.shape:
raise ValueError("Input images should have the same shape")
if np.dtype(dtype).char not in 'efdg':
raise ValueError("Only floating point data type are valid"
" for optical flow")
pyramid = list(zip(get_pyramid(_convert(I0, dtype),
downscale, nlevel, min_size),
get_pyramid(_convert(I1, dtype),
downscale, nlevel, min_size)))
# Initialization to 0 at coarsest level.
flow = np.zeros((pyramid[0][0].ndim, ) + pyramid[0][0].shape,
dtype=dtype)
flow = solver(pyramid[0][0], pyramid[0][1], flow)
for J0, J1 in pyramid[1:]:
flow = solver(J0, J1, resize_flow(flow, J0.shape))
return flow

View file

@ -0,0 +1,270 @@
"""
Port of Manuel Guizar's code from:
http://www.mathworks.com/matlabcentral/fileexchange/18401-efficient-subpixel-image-registration-by-cross-correlation
"""
import numpy as np
from .._shared.fft import fftmodule as fft
from ._masked_phase_cross_correlation import _masked_phase_cross_correlation
def _upsampled_dft(data, upsampled_region_size,
upsample_factor=1, axis_offsets=None):
"""
Upsampled DFT by matrix multiplication.
This code is intended to provide the same result as if the following
operations were performed:
- Embed the array "data" in an array that is ``upsample_factor`` times
larger in each dimension. ifftshift to bring the center of the
image to (1,1).
- Take the FFT of the larger array.
- Extract an ``[upsampled_region_size]`` region of the result, starting
with the ``[axis_offsets+1]`` element.
It achieves this result by computing the DFT in the output array without
the need to zeropad. Much faster and memory efficient than the zero-padded
FFT approach if ``upsampled_region_size`` is much smaller than
``data.size * upsample_factor``.
Parameters
----------
data : array
The input data array (DFT of original data) to upsample.
upsampled_region_size : integer or tuple of integers, optional
The size of the region to be sampled. If one integer is provided, it
is duplicated up to the dimensionality of ``data``.
upsample_factor : integer, optional
The upsampling factor. Defaults to 1.
axis_offsets : tuple of integers, optional
The offsets of the region to be sampled. Defaults to None (uses
image center)
Returns
-------
output : ndarray
The upsampled DFT of the specified region.
"""
# if people pass in an integer, expand it to a list of equal-sized sections
if not hasattr(upsampled_region_size, "__iter__"):
upsampled_region_size = [upsampled_region_size, ] * data.ndim
else:
if len(upsampled_region_size) != data.ndim:
raise ValueError("shape of upsampled region sizes must be equal "
"to input data's number of dimensions.")
if axis_offsets is None:
axis_offsets = [0, ] * data.ndim
else:
if len(axis_offsets) != data.ndim:
raise ValueError("number of axis offsets must be equal to input "
"data's number of dimensions.")
im2pi = 1j * 2 * np.pi
dim_properties = list(zip(data.shape, upsampled_region_size, axis_offsets))
for (n_items, ups_size, ax_offset) in dim_properties[::-1]:
kernel = ((np.arange(ups_size) - ax_offset)[:, None]
* fft.fftfreq(n_items, upsample_factor))
kernel = np.exp(-im2pi * kernel)
# Equivalent to:
# data[i, j, k] = kernel[i, :] @ data[j, k].T
data = np.tensordot(kernel, data, axes=(1, -1))
return data
def _compute_phasediff(cross_correlation_max):
"""
Compute global phase difference between the two images (should be
zero if images are non-negative).
Parameters
----------
cross_correlation_max : complex
The complex value of the cross correlation at its maximum point.
"""
return np.arctan2(cross_correlation_max.imag, cross_correlation_max.real)
def _compute_error(cross_correlation_max, src_amp, target_amp):
"""
Compute RMS error metric between ``src_image`` and ``target_image``.
Parameters
----------
cross_correlation_max : complex
The complex value of the cross correlation at its maximum point.
src_amp : float
The normalized average image intensity of the source image
target_amp : float
The normalized average image intensity of the target image
"""
error = 1.0 - cross_correlation_max * cross_correlation_max.conj() /\
(src_amp * target_amp)
return np.sqrt(np.abs(error))
def phase_cross_correlation(reference_image, moving_image, *,
upsample_factor=1, space="real",
return_error=True, reference_mask=None,
moving_mask=None, overlap_ratio=0.3):
"""Efficient subpixel image translation registration by cross-correlation.
This code gives the same precision as the FFT upsampled cross-correlation
in a fraction of the computation time and with reduced memory requirements.
It obtains an initial estimate of the cross-correlation peak by an FFT and
then refines the shift estimation by upsampling the DFT only in a small
neighborhood of that estimate by means of a matrix-multiply DFT.
Parameters
----------
reference_image : array
Reference image.
moving_image : array
Image to register. Must be same dimensionality as
``reference_image``.
upsample_factor : int, optional
Upsampling factor. Images will be registered to within
``1 / upsample_factor`` of a pixel. For example
``upsample_factor == 20`` means the images will be registered
within 1/20th of a pixel. Default is 1 (no upsampling).
Not used if any of ``reference_mask`` or ``moving_mask`` is not None.
space : string, one of "real" or "fourier", optional
Defines how the algorithm interprets input data. "real" means
data will be FFT'd to compute the correlation, while "fourier"
data will bypass FFT of input data. Case insensitive. Not
used if any of ``reference_mask`` or ``moving_mask`` is not
None.
return_error : bool, optional
Returns error and phase difference if on, otherwise only
shifts are returned. Has noeffect if any of ``reference_mask`` or
``moving_mask`` is not None. In this case only shifts is returned.
reference_mask : ndarray
Boolean mask for ``reference_image``. The mask should evaluate
to ``True`` (or 1) on valid pixels. ``reference_mask`` should
have the same shape as ``reference_image``.
moving_mask : ndarray or None, optional
Boolean mask for ``moving_image``. The mask should evaluate to ``True``
(or 1) on valid pixels. ``moving_mask`` should have the same shape
as ``moving_image``. If ``None``, ``reference_mask`` will be used.
overlap_ratio : float, optional
Minimum allowed overlap ratio between images. The correlation for
translations corresponding with an overlap ratio lower than this
threshold will be ignored. A lower `overlap_ratio` leads to smaller
maximum translation, while a higher `overlap_ratio` leads to greater
robustness against spurious matches due to small overlap between
masked images. Used only if one of ``reference_mask`` or
``moving_mask`` is None.
Returns
-------
shifts : ndarray
Shift vector (in pixels) required to register ``moving_image``
with ``reference_image``. Axis ordering is consistent with
numpy (e.g. Z, Y, X)
error : float
Translation invariant normalized RMS error between
``reference_image`` and ``moving_image``.
phasediff : float
Global phase difference between the two images (should be
zero if images are non-negative).
References
----------
.. [1] Manuel Guizar-Sicairos, Samuel T. Thurman, and James R. Fienup,
"Efficient subpixel image registration algorithms,"
Optics Letters 33, 156-158 (2008). :DOI:`10.1364/OL.33.000156`
.. [2] James R. Fienup, "Invariant error metrics for image reconstruction"
Optics Letters 36, 8352-8357 (1997). :DOI:`10.1364/AO.36.008352`
.. [3] Dirk Padfield. Masked Object Registration in the Fourier Domain.
IEEE Transactions on Image Processing, vol. 21(5),
pp. 2706-2718 (2012). :DOI:`10.1109/TIP.2011.2181402`
.. [4] D. Padfield. "Masked FFT registration". In Proc. Computer Vision and
Pattern Recognition, pp. 2918-2925 (2010).
:DOI:`10.1109/CVPR.2010.5540032`
"""
if (reference_mask is not None) or (moving_mask is not None):
return _masked_phase_cross_correlation(reference_image, moving_image,
reference_mask, moving_mask,
overlap_ratio)
# images must be the same shape
if reference_image.shape != moving_image.shape:
raise ValueError("images must be same shape")
# assume complex data is already in Fourier space
if space.lower() == 'fourier':
src_freq = reference_image
target_freq = moving_image
# real data needs to be fft'd.
elif space.lower() == 'real':
src_freq = fft.fftn(reference_image)
target_freq = fft.fftn(moving_image)
else:
raise ValueError('space argument must be "real" of "fourier"')
# Whole-pixel shift - Compute cross-correlation by an IFFT
shape = src_freq.shape
image_product = src_freq * target_freq.conj()
cross_correlation = fft.ifftn(image_product)
# Locate maximum
maxima = np.unravel_index(np.argmax(np.abs(cross_correlation)),
cross_correlation.shape)
midpoints = np.array([np.fix(axis_size / 2) for axis_size in shape])
shifts = np.array(maxima, dtype=np.float64)
shifts[shifts > midpoints] -= np.array(shape)[shifts > midpoints]
if upsample_factor == 1:
if return_error:
src_amp = np.sum(np.abs(src_freq) ** 2) / src_freq.size
target_amp = np.sum(np.abs(target_freq) ** 2) / target_freq.size
CCmax = cross_correlation[maxima]
# If upsampling > 1, then refine estimate with matrix multiply DFT
else:
# Initial shift estimate in upsampled grid
shifts = np.round(shifts * upsample_factor) / upsample_factor
upsampled_region_size = np.ceil(upsample_factor * 1.5)
# Center of output array at dftshift + 1
dftshift = np.fix(upsampled_region_size / 2.0)
upsample_factor = np.array(upsample_factor, dtype=np.float64)
normalization = (src_freq.size * upsample_factor ** 2)
# Matrix multiply DFT around the current shift estimate
sample_region_offset = dftshift - shifts*upsample_factor
cross_correlation = _upsampled_dft(image_product.conj(),
upsampled_region_size,
upsample_factor,
sample_region_offset).conj()
cross_correlation /= normalization
# Locate maximum and map back to original pixel grid
maxima = np.unravel_index(np.argmax(np.abs(cross_correlation)),
cross_correlation.shape)
CCmax = cross_correlation[maxima]
maxima = np.array(maxima, dtype=np.float64) - dftshift
shifts = shifts + maxima / upsample_factor
if return_error:
src_amp = _upsampled_dft(src_freq * src_freq.conj(),
1, upsample_factor)[0, 0]
src_amp /= normalization
target_amp = _upsampled_dft(target_freq * target_freq.conj(),
1, upsample_factor)[0, 0]
target_amp /= normalization
# If its only one row or column the shift along that dimension has no
# effect. We set to zero.
for dim in range(src_freq.ndim):
if shape[dim] == 1:
shifts[dim] = 0
if return_error:
return shifts, _compute_error(CCmax, src_amp, target_amp),\
_compute_phasediff(CCmax)
else:
return shifts