Fixed database typo and removed unnecessary class identifier.
This commit is contained in:
parent
00ad49a143
commit
45fb349a7d
5098 changed files with 952558 additions and 85 deletions
28
venv/Lib/site-packages/skimage/restoration/__init__.py
Normal file
28
venv/Lib/site-packages/skimage/restoration/__init__.py
Normal file
|
@ -0,0 +1,28 @@
|
|||
"""Image restoration module.
|
||||
|
||||
"""
|
||||
|
||||
from .deconvolution import wiener, unsupervised_wiener, richardson_lucy
|
||||
from .unwrap import unwrap_phase
|
||||
from ._denoise import (denoise_tv_chambolle, denoise_tv_bregman,
|
||||
denoise_bilateral, denoise_wavelet, estimate_sigma)
|
||||
from ._cycle_spin import cycle_spin
|
||||
from .non_local_means import denoise_nl_means
|
||||
from .inpaint import inpaint_biharmonic
|
||||
from .j_invariant import calibrate_denoiser
|
||||
|
||||
|
||||
__all__ = ['wiener',
|
||||
'unsupervised_wiener',
|
||||
'richardson_lucy',
|
||||
'unwrap_phase',
|
||||
'denoise_tv_bregman',
|
||||
'denoise_tv_chambolle',
|
||||
'denoise_bilateral',
|
||||
'denoise_wavelet',
|
||||
'denoise_nl_means',
|
||||
'estimate_sigma',
|
||||
'inpaint_biharmonic',
|
||||
'cycle_spin',
|
||||
'calibrate_denoiser',
|
||||
]
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
145
venv/Lib/site-packages/skimage/restoration/_cycle_spin.py
Normal file
145
venv/Lib/site-packages/skimage/restoration/_cycle_spin.py
Normal file
|
@ -0,0 +1,145 @@
|
|||
from itertools import product
|
||||
import numpy as np
|
||||
from .._shared.utils import warn
|
||||
|
||||
try:
|
||||
import dask
|
||||
dask_available = True
|
||||
except ImportError:
|
||||
dask_available = False
|
||||
|
||||
|
||||
def _generate_shifts(ndim, multichannel, max_shifts, shift_steps=1):
|
||||
"""Returns all combinations of shifts in n dimensions over the specified
|
||||
max_shifts and step sizes.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> s = list(_generate_shifts(2, False, max_shifts=(1, 2), shift_steps=1))
|
||||
>>> print(s)
|
||||
[(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)]
|
||||
"""
|
||||
mc = int(multichannel)
|
||||
if np.isscalar(max_shifts):
|
||||
max_shifts = (max_shifts, ) * (ndim - mc) + (0, ) * mc
|
||||
elif multichannel and len(max_shifts) == ndim - 1:
|
||||
max_shifts = tuple(max_shifts) + (0, )
|
||||
elif len(max_shifts) != ndim:
|
||||
raise ValueError("max_shifts should have length ndim")
|
||||
|
||||
if np.isscalar(shift_steps):
|
||||
shift_steps = (shift_steps, ) * (ndim - mc) + (1, ) * mc
|
||||
elif multichannel and len(shift_steps) == ndim - 1:
|
||||
shift_steps = tuple(shift_steps) + (1, )
|
||||
elif len(shift_steps) != ndim:
|
||||
raise ValueError("max_shifts should have length ndim")
|
||||
|
||||
if any(s < 1 for s in shift_steps):
|
||||
raise ValueError("shift_steps must all be >= 1")
|
||||
|
||||
if multichannel and max_shifts[-1] != 0:
|
||||
raise ValueError(
|
||||
"Multichannel cycle spinning should not have shifts along the "
|
||||
"last axis.")
|
||||
|
||||
return product(*[range(0, s + 1, t) for
|
||||
s, t in zip(max_shifts, shift_steps)])
|
||||
|
||||
|
||||
def cycle_spin(x, func, max_shifts, shift_steps=1, num_workers=None,
|
||||
multichannel=False, func_kw={}):
|
||||
"""Cycle spinning (repeatedly apply func to shifted versions of x).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
x : array-like
|
||||
Data for input to ``func``.
|
||||
func : function
|
||||
A function to apply to circularly shifted versions of ``x``. Should
|
||||
take ``x`` as its first argument. Any additional arguments can be
|
||||
supplied via ``func_kw``.
|
||||
max_shifts : int or tuple
|
||||
If an integer, shifts in ``range(0, max_shifts+1)`` will be used along
|
||||
each axis of ``x``. If a tuple, ``range(0, max_shifts[i]+1)`` will be
|
||||
along axis i.
|
||||
shift_steps : int or tuple, optional
|
||||
The step size for the shifts applied along axis, i, are::
|
||||
``range((0, max_shifts[i]+1, shift_steps[i]))``. If an integer is
|
||||
provided, the same step size is used for all axes.
|
||||
num_workers : int or None, optional
|
||||
The number of parallel threads to use during cycle spinning. If set to
|
||||
``None``, the full set of available cores are used.
|
||||
multichannel : bool, optional
|
||||
Whether to treat the final axis as channels (no cycle shifts are
|
||||
performed over the channels axis).
|
||||
func_kw : dict, optional
|
||||
Additional keyword arguments to supply to ``func``.
|
||||
|
||||
Returns
|
||||
-------
|
||||
avg_y : np.ndarray
|
||||
The output of ``func(x, **func_kw)`` averaged over all combinations of
|
||||
the specified axis shifts.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Cycle spinning was proposed as a way to approach shift-invariance via
|
||||
performing several circular shifts of a shift-variant transform [1]_.
|
||||
|
||||
For a n-level discrete wavelet transforms, one may wish to perform all
|
||||
shifts up to ``max_shifts = 2**n - 1``. In practice, much of the benefit
|
||||
can often be realized with only a small number of shifts per axis.
|
||||
|
||||
For transforms such as the blockwise discrete cosine transform, one may
|
||||
wish to evaluate shifts up to the block size used by the transform.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] R.R. Coifman and D.L. Donoho. "Translation-Invariant De-Noising".
|
||||
Wavelets and Statistics, Lecture Notes in Statistics, vol.103.
|
||||
Springer, New York, 1995, pp.125-150.
|
||||
:DOI:`10.1007/978-1-4612-2544-7_9`
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import skimage.data
|
||||
>>> from skimage import img_as_float
|
||||
>>> from skimage.restoration import denoise_wavelet, cycle_spin
|
||||
>>> img = img_as_float(skimage.data.camera())
|
||||
>>> sigma = 0.1
|
||||
>>> img = img + sigma * np.random.standard_normal(img.shape)
|
||||
>>> denoised = cycle_spin(img, func=denoise_wavelet,
|
||||
... max_shifts=3) # doctest: +SKIP
|
||||
|
||||
"""
|
||||
x = np.asanyarray(x)
|
||||
all_shifts = _generate_shifts(x.ndim, multichannel, max_shifts,
|
||||
shift_steps)
|
||||
all_shifts = list(all_shifts)
|
||||
roll_axes = tuple(range(x.ndim))
|
||||
|
||||
def _run_one_shift(shift):
|
||||
# shift, apply function, inverse shift
|
||||
xs = np.roll(x, shift, axis=roll_axes)
|
||||
tmp = func(xs, **func_kw)
|
||||
return np.roll(tmp, tuple(-s for s in shift), axis=roll_axes)
|
||||
|
||||
if not dask_available and (num_workers is None or num_workers > 1):
|
||||
num_workers = 1
|
||||
warn('The optional dask dependency is not installed. '
|
||||
'The number of workers is set to 1. To silence '
|
||||
'this warning, install dask or explicitly set `num_workers=1` '
|
||||
'when calling the `cycle_spin` function')
|
||||
# compute a running average across the cycle shifts
|
||||
if num_workers == 1:
|
||||
# serial processing
|
||||
mean = _run_one_shift(all_shifts[0])
|
||||
for shift in all_shifts[1:]:
|
||||
mean += _run_one_shift(shift)
|
||||
mean /= len(all_shifts)
|
||||
else:
|
||||
# multithreaded via dask
|
||||
futures = [dask.delayed(_run_one_shift)(s) for s in all_shifts]
|
||||
mean = sum(futures) / len(futures)
|
||||
mean = mean.compute(num_workers=num_workers)
|
||||
return mean
|
935
venv/Lib/site-packages/skimage/restoration/_denoise.py
Normal file
935
venv/Lib/site-packages/skimage/restoration/_denoise.py
Normal file
|
@ -0,0 +1,935 @@
|
|||
import scipy.stats
|
||||
import numpy as np
|
||||
from math import ceil
|
||||
from .. import img_as_float
|
||||
from ._denoise_cy import _denoise_bilateral, _denoise_tv_bregman
|
||||
from .._shared.utils import warn
|
||||
import pywt
|
||||
import skimage.color as color
|
||||
from skimage.color.colorconv import ycbcr_from_rgb
|
||||
import numbers
|
||||
|
||||
|
||||
def _gaussian_weight(array, sigma_squared, *, dtype=float):
|
||||
"""Helping function. Define a Gaussian weighting from array and
|
||||
sigma_square.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
array : ndarray
|
||||
Input array.
|
||||
sigma_squared : float
|
||||
The squared standard deviation used in the filter.
|
||||
dtype : data type object, optional (default : float)
|
||||
The type and size of the data to be returned.
|
||||
|
||||
Returns
|
||||
-------
|
||||
gaussian : ndarray
|
||||
The input array filtered by the Gaussian.
|
||||
"""
|
||||
return np.exp(-0.5 * (array ** 2 / sigma_squared), dtype=dtype)
|
||||
|
||||
|
||||
def _compute_color_lut(bins, sigma, max_value, *, dtype=float):
|
||||
"""Helping function. Define a lookup table containing Gaussian filter
|
||||
values using the color distance sigma.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
bins : int
|
||||
Number of discrete values for Gaussian weights of color filtering.
|
||||
A larger value results in improved accuracy.
|
||||
sigma : float
|
||||
Standard deviation for grayvalue/color distance (radiometric
|
||||
similarity). A larger value results in averaging of pixels with larger
|
||||
radiometric differences. Note, that the image will be converted using
|
||||
the `img_as_float` function and thus the standard deviation is in
|
||||
respect to the range ``[0, 1]``. If the value is ``None`` the standard
|
||||
deviation of the ``image`` will be used.
|
||||
max_value : float
|
||||
Maximum value of the input image.
|
||||
dtype : data type object, optional (default : float)
|
||||
The type and size of the data to be returned.
|
||||
|
||||
Returns
|
||||
-------
|
||||
color_lut : ndarray
|
||||
Lookup table for the color distance sigma.
|
||||
"""
|
||||
values = np.linspace(0, max_value, bins, endpoint=False)
|
||||
return _gaussian_weight(values, sigma**2, dtype=dtype)
|
||||
|
||||
|
||||
def _compute_spatial_lut(win_size, sigma, *, dtype=float):
|
||||
"""Helping function. Define a lookup table containing Gaussian filter
|
||||
values using the spatial sigma.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
win_size : int
|
||||
Window size for filtering.
|
||||
If win_size is not specified, it is calculated as
|
||||
``max(5, 2 * ceil(3 * sigma_spatial) + 1)``.
|
||||
sigma : float
|
||||
Standard deviation for range distance. A larger value results in
|
||||
averaging of pixels with larger spatial differences.
|
||||
dtype : data type object
|
||||
The type and size of the data to be returned.
|
||||
|
||||
Returns
|
||||
-------
|
||||
spatial_lut : ndarray
|
||||
Lookup table for the spatial sigma.
|
||||
"""
|
||||
grid_points = np.arange(-win_size // 2, win_size // 2 + 1)
|
||||
rr, cc = np.meshgrid(grid_points, grid_points, indexing='ij')
|
||||
distances = np.hypot(rr, cc)
|
||||
return _gaussian_weight(distances, sigma**2, dtype=dtype).ravel()
|
||||
|
||||
|
||||
def denoise_bilateral(image, win_size=None, sigma_color=None, sigma_spatial=1,
|
||||
bins=10000, mode='constant', cval=0, multichannel=False):
|
||||
"""Denoise image using bilateral filter.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray, shape (M, N[, 3])
|
||||
Input image, 2D grayscale or RGB.
|
||||
win_size : int
|
||||
Window size for filtering.
|
||||
If win_size is not specified, it is calculated as
|
||||
``max(5, 2 * ceil(3 * sigma_spatial) + 1)``.
|
||||
sigma_color : float
|
||||
Standard deviation for grayvalue/color distance (radiometric
|
||||
similarity). A larger value results in averaging of pixels with larger
|
||||
radiometric differences. Note, that the image will be converted using
|
||||
the `img_as_float` function and thus the standard deviation is in
|
||||
respect to the range ``[0, 1]``. If the value is ``None`` the standard
|
||||
deviation of the ``image`` will be used.
|
||||
sigma_spatial : float
|
||||
Standard deviation for range distance. A larger value results in
|
||||
averaging of pixels with larger spatial differences.
|
||||
bins : int
|
||||
Number of discrete values for Gaussian weights of color filtering.
|
||||
A larger value results in improved accuracy.
|
||||
mode : {'constant', 'edge', 'symmetric', 'reflect', 'wrap'}
|
||||
How to handle values outside the image borders. See
|
||||
`numpy.pad` for detail.
|
||||
cval : string
|
||||
Used in conjunction with mode 'constant', the value outside
|
||||
the image boundaries.
|
||||
multichannel : bool
|
||||
Whether the last axis of the image is to be interpreted as multiple
|
||||
channels or another spatial dimension.
|
||||
|
||||
Returns
|
||||
-------
|
||||
denoised : ndarray
|
||||
Denoised image.
|
||||
|
||||
Notes
|
||||
-----
|
||||
This is an edge-preserving, denoising filter. It averages pixels based on
|
||||
their spatial closeness and radiometric similarity [1]_.
|
||||
|
||||
Spatial closeness is measured by the Gaussian function of the Euclidean
|
||||
distance between two pixels and a certain standard deviation
|
||||
(`sigma_spatial`).
|
||||
|
||||
Radiometric similarity is measured by the Gaussian function of the
|
||||
Euclidean distance between two color values and a certain standard
|
||||
deviation (`sigma_color`).
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] C. Tomasi and R. Manduchi. "Bilateral Filtering for Gray and Color
|
||||
Images." IEEE International Conference on Computer Vision (1998)
|
||||
839-846. :DOI:`10.1109/ICCV.1998.710815`
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from skimage import data, img_as_float
|
||||
>>> astro = img_as_float(data.astronaut())
|
||||
>>> astro = astro[220:300, 220:320]
|
||||
>>> noisy = astro + 0.6 * astro.std() * np.random.random(astro.shape)
|
||||
>>> noisy = np.clip(noisy, 0, 1)
|
||||
>>> denoised = denoise_bilateral(noisy, sigma_color=0.05, sigma_spatial=15,
|
||||
... multichannel=True)
|
||||
"""
|
||||
if multichannel:
|
||||
if image.ndim != 3:
|
||||
if image.ndim == 2:
|
||||
raise ValueError("Use ``multichannel=False`` for 2D grayscale "
|
||||
"images. The last axis of the input image "
|
||||
"must be multiple color channels not another "
|
||||
"spatial dimension.")
|
||||
else:
|
||||
raise ValueError("Bilateral filter is only implemented for "
|
||||
"2D grayscale images (image.ndim == 2) and "
|
||||
"2D multichannel (image.ndim == 3) images, "
|
||||
"but the input image has {0} dimensions. "
|
||||
"".format(image.ndim))
|
||||
elif image.shape[2] not in (3, 4):
|
||||
if image.shape[2] > 4:
|
||||
msg = ("The last axis of the input image is interpreted as "
|
||||
"channels. Input image with shape {0} has {1} channels "
|
||||
"in last axis. ``denoise_bilateral`` is implemented "
|
||||
"for 2D grayscale and color images only")
|
||||
warn(msg.format(image.shape, image.shape[2]))
|
||||
else:
|
||||
msg = "Input image must be grayscale, RGB, or RGBA; " \
|
||||
"but has shape {0}."
|
||||
warn(msg.format(image.shape))
|
||||
else:
|
||||
if image.ndim > 2:
|
||||
raise ValueError("Bilateral filter is not implemented for "
|
||||
"grayscale images of 3 or more dimensions, "
|
||||
"but input image has {0} dimension. Use "
|
||||
"``multichannel=True`` for 2-D RGB "
|
||||
"images.".format(image.shape))
|
||||
|
||||
if win_size is None:
|
||||
win_size = max(5, 2 * int(ceil(3 * sigma_spatial)) + 1)
|
||||
|
||||
min_value = image.min()
|
||||
max_value = image.max()
|
||||
|
||||
if min_value == max_value:
|
||||
return image
|
||||
|
||||
# if image.max() is 0, then dist_scale can have an unverified value
|
||||
# and color_lut[<int>(dist * dist_scale)] may cause a segmentation fault
|
||||
# so we verify we have a positive image and that the max is not 0.0.
|
||||
if min_value < 0.0:
|
||||
raise ValueError("Image must contain only positive values")
|
||||
|
||||
if max_value == 0.0:
|
||||
raise ValueError("The maximum value found in the image was 0.")
|
||||
|
||||
image = np.atleast_3d(img_as_float(image))
|
||||
image = np.ascontiguousarray(image)
|
||||
|
||||
sigma_color = sigma_color or image.std()
|
||||
|
||||
color_lut = _compute_color_lut(bins, sigma_color, max_value,
|
||||
dtype=image.dtype)
|
||||
|
||||
range_lut = _compute_spatial_lut(win_size, sigma_spatial, dtype=image.dtype)
|
||||
|
||||
out = np.empty(image.shape, dtype=image.dtype)
|
||||
|
||||
dims = image.shape[2]
|
||||
|
||||
# There are a number of arrays needed in the Cython function.
|
||||
# It's easier to allocate them outside of Cython so that all
|
||||
# arrays are in the same type, then just copy the empty array
|
||||
# where needed within Cython.
|
||||
empty_dims = np.empty(dims, dtype=image.dtype)
|
||||
|
||||
return _denoise_bilateral(image, image.max(), win_size, sigma_color,
|
||||
sigma_spatial, bins, mode, cval, color_lut,
|
||||
range_lut, empty_dims, out)
|
||||
|
||||
|
||||
def denoise_tv_bregman(image, weight, max_iter=100, eps=1e-3, isotropic=True,
|
||||
*, multichannel=False):
|
||||
"""Perform total-variation denoising using split-Bregman optimization.
|
||||
|
||||
Total-variation denoising (also know as total-variation regularization)
|
||||
tries to find an image with less total-variation under the constraint
|
||||
of being similar to the input image, which is controlled by the
|
||||
regularization parameter ([1]_, [2]_, [3]_, [4]_).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray
|
||||
Input data to be denoised (converted using img_as_float`).
|
||||
weight : float
|
||||
Denoising weight. The smaller the `weight`, the more denoising (at
|
||||
the expense of less similarity to the `input`). The regularization
|
||||
parameter `lambda` is chosen as `2 * weight`.
|
||||
eps : float, optional
|
||||
Relative difference of the value of the cost function that determines
|
||||
the stop criterion. The algorithm stops when::
|
||||
|
||||
SUM((u(n) - u(n-1))**2) < eps
|
||||
|
||||
max_iter : int, optional
|
||||
Maximal number of iterations used for the optimization.
|
||||
isotropic : boolean, optional
|
||||
Switch between isotropic and anisotropic TV denoising.
|
||||
multichannel : bool, optional
|
||||
Apply total-variation denoising separately for each channel. This
|
||||
option should be true for color images, otherwise the denoising is
|
||||
also applied in the channels dimension.
|
||||
|
||||
Returns
|
||||
-------
|
||||
u : ndarray
|
||||
Denoised image.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] https://en.wikipedia.org/wiki/Total_variation_denoising
|
||||
.. [2] Tom Goldstein and Stanley Osher, "The Split Bregman Method For L1
|
||||
Regularized Problems",
|
||||
ftp://ftp.math.ucla.edu/pub/camreport/cam08-29.pdf
|
||||
.. [3] Pascal Getreuer, "Rudin–Osher–Fatemi Total Variation Denoising
|
||||
using Split Bregman" in Image Processing On Line on 2012–05–19,
|
||||
https://www.ipol.im/pub/art/2012/g-tvd/article_lr.pdf
|
||||
.. [4] https://web.math.ucsb.edu/~cgarcia/UGProjects/BregmanAlgorithms_JacquelineBush.pdf
|
||||
|
||||
"""
|
||||
image = np.atleast_3d(img_as_float(image))
|
||||
|
||||
rows = image.shape[0]
|
||||
cols = image.shape[1]
|
||||
dims = image.shape[2]
|
||||
|
||||
shape_ext = (rows + 2, cols + 2, dims)
|
||||
|
||||
out = np.zeros(shape_ext, image.dtype)
|
||||
|
||||
if multichannel:
|
||||
channel_out = np.zeros(shape_ext[:2] + (1,), dtype=out.dtype)
|
||||
for c in range(image.shape[-1]):
|
||||
# the algorithm below expects 3 dimensions to always be present.
|
||||
# slicing the array in this fashion preserves the channel dimension for us
|
||||
channel_in = np.ascontiguousarray(image[..., c:c+1])
|
||||
|
||||
_denoise_tv_bregman(channel_in, image.dtype.type(weight),
|
||||
max_iter, eps, isotropic, channel_out)
|
||||
|
||||
out[..., c] = channel_out[..., 0]
|
||||
|
||||
else:
|
||||
image = np.ascontiguousarray(image)
|
||||
|
||||
_denoise_tv_bregman(image, image.dtype.type(weight), max_iter, eps,
|
||||
isotropic, out)
|
||||
|
||||
return np.squeeze(out[1:-1, 1:-1])
|
||||
|
||||
|
||||
def _denoise_tv_chambolle_nd(image, weight=0.1, eps=2.e-4, n_iter_max=200):
|
||||
"""Perform total-variation denoising on n-dimensional images.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray
|
||||
n-D input data to be denoised.
|
||||
weight : float, optional
|
||||
Denoising weight. The greater `weight`, the more denoising (at
|
||||
the expense of fidelity to `input`).
|
||||
eps : float, optional
|
||||
Relative difference of the value of the cost function that determines
|
||||
the stop criterion. The algorithm stops when:
|
||||
|
||||
(E_(n-1) - E_n) < eps * E_0
|
||||
|
||||
n_iter_max : int, optional
|
||||
Maximal number of iterations used for the optimization.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray
|
||||
Denoised array of floats.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Rudin, Osher and Fatemi algorithm.
|
||||
"""
|
||||
|
||||
ndim = image.ndim
|
||||
p = np.zeros((image.ndim, ) + image.shape, dtype=image.dtype)
|
||||
g = np.zeros_like(p)
|
||||
d = np.zeros_like(image)
|
||||
i = 0
|
||||
while i < n_iter_max:
|
||||
if i > 0:
|
||||
# d will be the (negative) divergence of p
|
||||
d = -p.sum(0)
|
||||
slices_d = [slice(None), ] * ndim
|
||||
slices_p = [slice(None), ] * (ndim + 1)
|
||||
for ax in range(ndim):
|
||||
slices_d[ax] = slice(1, None)
|
||||
slices_p[ax+1] = slice(0, -1)
|
||||
slices_p[0] = ax
|
||||
d[tuple(slices_d)] += p[tuple(slices_p)]
|
||||
slices_d[ax] = slice(None)
|
||||
slices_p[ax+1] = slice(None)
|
||||
out = image + d
|
||||
else:
|
||||
out = image
|
||||
E = (d ** 2).sum()
|
||||
|
||||
# g stores the gradients of out along each axis
|
||||
# e.g. g[0] is the first order finite difference along axis 0
|
||||
slices_g = [slice(None), ] * (ndim + 1)
|
||||
for ax in range(ndim):
|
||||
slices_g[ax+1] = slice(0, -1)
|
||||
slices_g[0] = ax
|
||||
g[tuple(slices_g)] = np.diff(out, axis=ax)
|
||||
slices_g[ax+1] = slice(None)
|
||||
|
||||
norm = np.sqrt((g ** 2).sum(axis=0))[np.newaxis, ...]
|
||||
E += weight * norm.sum()
|
||||
tau = 1. / (2.*ndim)
|
||||
norm *= tau / weight
|
||||
norm += 1.
|
||||
p -= tau * g
|
||||
p /= norm
|
||||
E /= float(image.size)
|
||||
if i == 0:
|
||||
E_init = E
|
||||
E_previous = E
|
||||
else:
|
||||
if np.abs(E_previous - E) < eps * E_init:
|
||||
break
|
||||
else:
|
||||
E_previous = E
|
||||
i += 1
|
||||
return out
|
||||
|
||||
|
||||
def denoise_tv_chambolle(image, weight=0.1, eps=2.e-4, n_iter_max=200,
|
||||
multichannel=False):
|
||||
"""Perform total-variation denoising on n-dimensional images.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray of ints, uints or floats
|
||||
Input data to be denoised. `image` can be of any numeric type,
|
||||
but it is cast into an ndarray of floats for the computation
|
||||
of the denoised image.
|
||||
weight : float, optional
|
||||
Denoising weight. The greater `weight`, the more denoising (at
|
||||
the expense of fidelity to `input`).
|
||||
eps : float, optional
|
||||
Relative difference of the value of the cost function that
|
||||
determines the stop criterion. The algorithm stops when:
|
||||
|
||||
(E_(n-1) - E_n) < eps * E_0
|
||||
|
||||
n_iter_max : int, optional
|
||||
Maximal number of iterations used for the optimization.
|
||||
multichannel : bool, optional
|
||||
Apply total-variation denoising separately for each channel. This
|
||||
option should be true for color images, otherwise the denoising is
|
||||
also applied in the channels dimension.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray
|
||||
Denoised image.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Make sure to set the multichannel parameter appropriately for color images.
|
||||
|
||||
The principle of total variation denoising is explained in
|
||||
https://en.wikipedia.org/wiki/Total_variation_denoising
|
||||
|
||||
The principle of total variation denoising is to minimize the
|
||||
total variation of the image, which can be roughly described as
|
||||
the integral of the norm of the image gradient. Total variation
|
||||
denoising tends to produce "cartoon-like" images, that is,
|
||||
piecewise-constant images.
|
||||
|
||||
This code is an implementation of the algorithm of Rudin, Fatemi and Osher
|
||||
that was proposed by Chambolle in [1]_.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] A. Chambolle, An algorithm for total variation minimization and
|
||||
applications, Journal of Mathematical Imaging and Vision,
|
||||
Springer, 2004, 20, 89-97.
|
||||
|
||||
Examples
|
||||
--------
|
||||
2D example on astronaut image:
|
||||
|
||||
>>> from skimage import color, data
|
||||
>>> img = color.rgb2gray(data.astronaut())[:50, :50]
|
||||
>>> img += 0.5 * img.std() * np.random.randn(*img.shape)
|
||||
>>> denoised_img = denoise_tv_chambolle(img, weight=60)
|
||||
|
||||
3D example on synthetic data:
|
||||
|
||||
>>> x, y, z = np.ogrid[0:20, 0:20, 0:20]
|
||||
>>> mask = (x - 22)**2 + (y - 20)**2 + (z - 17)**2 < 8**2
|
||||
>>> mask = mask.astype(np.float)
|
||||
>>> mask += 0.2*np.random.randn(*mask.shape)
|
||||
>>> res = denoise_tv_chambolle(mask, weight=100)
|
||||
|
||||
"""
|
||||
|
||||
im_type = image.dtype
|
||||
if not im_type.kind == 'f':
|
||||
image = img_as_float(image)
|
||||
|
||||
if multichannel:
|
||||
out = np.zeros_like(image)
|
||||
for c in range(image.shape[-1]):
|
||||
out[..., c] = _denoise_tv_chambolle_nd(image[..., c], weight, eps,
|
||||
n_iter_max)
|
||||
else:
|
||||
out = _denoise_tv_chambolle_nd(image, weight, eps, n_iter_max)
|
||||
return out
|
||||
|
||||
|
||||
def _bayes_thresh(details, var):
|
||||
"""BayesShrink threshold for a zero-mean details coeff array."""
|
||||
# Equivalent to: dvar = np.var(details) for 0-mean details array
|
||||
dvar = np.mean(details*details)
|
||||
eps = np.finfo(details.dtype).eps
|
||||
thresh = var / np.sqrt(max(dvar - var, eps))
|
||||
return thresh
|
||||
|
||||
|
||||
def _universal_thresh(img, sigma):
|
||||
""" Universal threshold used by the VisuShrink method """
|
||||
return sigma*np.sqrt(2*np.log(img.size))
|
||||
|
||||
|
||||
def _sigma_est_dwt(detail_coeffs, distribution='Gaussian'):
|
||||
"""Calculate the robust median estimator of the noise standard deviation.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
detail_coeffs : ndarray
|
||||
The detail coefficients corresponding to the discrete wavelet
|
||||
transform of an image.
|
||||
distribution : str
|
||||
The underlying noise distribution.
|
||||
|
||||
Returns
|
||||
-------
|
||||
sigma : float
|
||||
The estimated noise standard deviation (see section 4.2 of [1]_).
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] D. L. Donoho and I. M. Johnstone. "Ideal spatial adaptation
|
||||
by wavelet shrinkage." Biometrika 81.3 (1994): 425-455.
|
||||
:DOI:`10.1093/biomet/81.3.425`
|
||||
"""
|
||||
# Consider regions with detail coefficients exactly zero to be masked out
|
||||
detail_coeffs = detail_coeffs[np.nonzero(detail_coeffs)]
|
||||
|
||||
if distribution.lower() == 'gaussian':
|
||||
# 75th quantile of the underlying, symmetric noise distribution
|
||||
denom = scipy.stats.norm.ppf(0.75)
|
||||
sigma = np.median(np.abs(detail_coeffs)) / denom
|
||||
else:
|
||||
raise ValueError("Only Gaussian noise estimation is currently "
|
||||
"supported")
|
||||
return sigma
|
||||
|
||||
|
||||
def _wavelet_threshold(image, wavelet, method=None, threshold=None,
|
||||
sigma=None, mode='soft', wavelet_levels=None):
|
||||
"""Perform wavelet thresholding.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray (2d or 3d) of ints, uints or floats
|
||||
Input data to be denoised. `image` can be of any numeric type,
|
||||
but it is cast into an ndarray of floats for the computation
|
||||
of the denoised image.
|
||||
wavelet : string
|
||||
The type of wavelet to perform. Can be any of the options
|
||||
pywt.wavelist outputs. For example, this may be any of ``{db1, db2,
|
||||
db3, db4, haar}``.
|
||||
method : {'BayesShrink', 'VisuShrink'}, optional
|
||||
Thresholding method to be used. The currently supported methods are
|
||||
"BayesShrink" [1]_ and "VisuShrink" [2]_. If it is set to None, a
|
||||
user-specified ``threshold`` must be supplied instead.
|
||||
threshold : float, optional
|
||||
The thresholding value to apply during wavelet coefficient
|
||||
thresholding. The default value (None) uses the selected ``method`` to
|
||||
estimate appropriate threshold(s) for noise removal.
|
||||
sigma : float, optional
|
||||
The standard deviation of the noise. The noise is estimated when sigma
|
||||
is None (the default) by the method in [2]_.
|
||||
mode : {'soft', 'hard'}, optional
|
||||
An optional argument to choose the type of denoising performed. It
|
||||
noted that choosing soft thresholding given additive noise finds the
|
||||
best approximation of the original image.
|
||||
wavelet_levels : int or None, optional
|
||||
The number of wavelet decomposition levels to use. The default is
|
||||
three less than the maximum number of possible decomposition levels
|
||||
(see Notes below).
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray
|
||||
Denoised image.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Chang, S. Grace, Bin Yu, and Martin Vetterli. "Adaptive wavelet
|
||||
thresholding for image denoising and compression." Image Processing,
|
||||
IEEE Transactions on 9.9 (2000): 1532-1546.
|
||||
:DOI:`10.1109/83.862633`
|
||||
.. [2] D. L. Donoho and I. M. Johnstone. "Ideal spatial adaptation
|
||||
by wavelet shrinkage." Biometrika 81.3 (1994): 425-455.
|
||||
:DOI:`10.1093/biomet/81.3.425`
|
||||
"""
|
||||
wavelet = pywt.Wavelet(wavelet)
|
||||
if not wavelet.orthogonal:
|
||||
warn(("Wavelet thresholding was designed for use with orthogonal "
|
||||
"wavelets. For nonorthogonal wavelets such as {}, results are "
|
||||
"likely to be suboptimal.").format(wavelet.name))
|
||||
|
||||
# original_extent is used to workaround PyWavelets issue #80
|
||||
# odd-sized input results in an image with 1 extra sample after waverecn
|
||||
original_extent = tuple(slice(s) for s in image.shape)
|
||||
|
||||
# Determine the number of wavelet decomposition levels
|
||||
if wavelet_levels is None:
|
||||
# Determine the maximum number of possible levels for image
|
||||
dlen = wavelet.dec_len
|
||||
wavelet_levels = pywt.dwtn_max_level(image.shape, wavelet)
|
||||
|
||||
# Skip coarsest wavelet scales (see Notes in docstring).
|
||||
wavelet_levels = max(wavelet_levels - 3, 1)
|
||||
|
||||
coeffs = pywt.wavedecn(image, wavelet=wavelet, level=wavelet_levels)
|
||||
# Detail coefficients at each decomposition level
|
||||
dcoeffs = coeffs[1:]
|
||||
|
||||
if sigma is None:
|
||||
# Estimate the noise via the method in [2]_
|
||||
detail_coeffs = dcoeffs[-1]['d' * image.ndim]
|
||||
sigma = _sigma_est_dwt(detail_coeffs, distribution='Gaussian')
|
||||
|
||||
if method is not None and threshold is not None:
|
||||
warn(("Thresholding method {} selected. The user-specified threshold "
|
||||
"will be ignored.").format(method))
|
||||
|
||||
if threshold is None:
|
||||
var = sigma**2
|
||||
if method is None:
|
||||
raise ValueError(
|
||||
"If method is None, a threshold must be provided.")
|
||||
elif method == "BayesShrink":
|
||||
# The BayesShrink thresholds from [1]_ in docstring
|
||||
threshold = [{key: _bayes_thresh(level[key], var) for key in level}
|
||||
for level in dcoeffs]
|
||||
elif method == "VisuShrink":
|
||||
# The VisuShrink thresholds from [2]_ in docstring
|
||||
threshold = _universal_thresh(image, sigma)
|
||||
else:
|
||||
raise ValueError("Unrecognized method: {}".format(method))
|
||||
|
||||
if np.isscalar(threshold):
|
||||
# A single threshold for all coefficient arrays
|
||||
denoised_detail = [{key: pywt.threshold(level[key],
|
||||
value=threshold,
|
||||
mode=mode) for key in level}
|
||||
for level in dcoeffs]
|
||||
else:
|
||||
# Dict of unique threshold coefficients for each detail coeff. array
|
||||
denoised_detail = [{key: pywt.threshold(level[key],
|
||||
value=thresh[key],
|
||||
mode=mode) for key in level}
|
||||
for thresh, level in zip(threshold, dcoeffs)]
|
||||
denoised_coeffs = [coeffs[0]] + denoised_detail
|
||||
return pywt.waverecn(denoised_coeffs, wavelet)[original_extent]
|
||||
|
||||
|
||||
def _scale_sigma_and_image_consistently(image, sigma, multichannel,
|
||||
rescale_sigma):
|
||||
"""If the ``image`` is rescaled, also rescale ``sigma`` consistently.
|
||||
|
||||
Images that are not floating point will be rescaled via ``img_as_float``.
|
||||
"""
|
||||
if multichannel:
|
||||
if isinstance(sigma, numbers.Number) or sigma is None:
|
||||
sigma = [sigma] * image.shape[-1]
|
||||
elif len(sigma) != image.shape[-1]:
|
||||
raise ValueError(
|
||||
"When multichannel is True, sigma must be a scalar or have "
|
||||
"length equal to the number of channels")
|
||||
if image.dtype.kind != 'f':
|
||||
if rescale_sigma:
|
||||
range_pre = image.max() - image.min()
|
||||
image = img_as_float(image)
|
||||
if rescale_sigma:
|
||||
range_post = image.max() - image.min()
|
||||
# apply the same magnitude scaling to sigma
|
||||
scale_factor = range_post / range_pre
|
||||
if multichannel:
|
||||
sigma = [s * scale_factor if s is not None else s
|
||||
for s in sigma]
|
||||
elif sigma is not None:
|
||||
sigma *= scale_factor
|
||||
return image, sigma
|
||||
|
||||
|
||||
def _rescale_sigma_rgb2ycbcr(sigmas):
|
||||
"""Convert user-provided noise standard deviations to YCbCr space.
|
||||
|
||||
Notes
|
||||
-----
|
||||
If R, G, B are linearly independent random variables and a1, a2, a3 are
|
||||
scalars, then random variable C:
|
||||
C = a1 * R + a2 * G + a3 * B
|
||||
has variance, var_C, given by:
|
||||
var_C = a1**2 * var_R + a2**2 * var_G + a3**2 * var_B
|
||||
"""
|
||||
if sigmas[0] is None:
|
||||
return sigmas
|
||||
sigmas = np.asarray(sigmas)
|
||||
rgv_variances = sigmas * sigmas
|
||||
for i in range(3):
|
||||
scalars = ycbcr_from_rgb[i, :]
|
||||
var_channel = np.sum(scalars * scalars * rgv_variances)
|
||||
sigmas[i] = np.sqrt(var_channel)
|
||||
return sigmas
|
||||
|
||||
|
||||
def denoise_wavelet(image, sigma=None, wavelet='db1', mode='soft',
|
||||
wavelet_levels=None, multichannel=False,
|
||||
convert2ycbcr=False, method='BayesShrink',
|
||||
rescale_sigma=None):
|
||||
"""Perform wavelet denoising on an image.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray ([M[, N[, ...P]][, C]) of ints, uints or floats
|
||||
Input data to be denoised. `image` can be of any numeric type,
|
||||
but it is cast into an ndarray of floats for the computation
|
||||
of the denoised image.
|
||||
sigma : float or list, optional
|
||||
The noise standard deviation used when computing the wavelet detail
|
||||
coefficient threshold(s). When None (default), the noise standard
|
||||
deviation is estimated via the method in [2]_.
|
||||
wavelet : string, optional
|
||||
The type of wavelet to perform and can be any of the options
|
||||
``pywt.wavelist`` outputs. The default is `'db1'`. For example,
|
||||
``wavelet`` can be any of ``{'db2', 'haar', 'sym9'}`` and many more.
|
||||
mode : {'soft', 'hard'}, optional
|
||||
An optional argument to choose the type of denoising performed. It
|
||||
noted that choosing soft thresholding given additive noise finds the
|
||||
best approximation of the original image.
|
||||
wavelet_levels : int or None, optional
|
||||
The number of wavelet decomposition levels to use. The default is
|
||||
three less than the maximum number of possible decomposition levels.
|
||||
multichannel : bool, optional
|
||||
Apply wavelet denoising separately for each channel (where channels
|
||||
correspond to the final axis of the array).
|
||||
convert2ycbcr : bool, optional
|
||||
If True and multichannel True, do the wavelet denoising in the YCbCr
|
||||
colorspace instead of the RGB color space. This typically results in
|
||||
better performance for RGB images.
|
||||
method : {'BayesShrink', 'VisuShrink'}, optional
|
||||
Thresholding method to be used. The currently supported methods are
|
||||
"BayesShrink" [1]_ and "VisuShrink" [2]_. Defaults to "BayesShrink".
|
||||
rescale_sigma : bool or None, optional
|
||||
If False, no rescaling of the user-provided ``sigma`` will be
|
||||
performed. The default of ``None`` rescales sigma appropriately if the
|
||||
image is rescaled internally. A ``DeprecationWarning`` is raised to
|
||||
warn the user about this new behaviour. This warning can be avoided
|
||||
by setting ``rescale_sigma=True``.
|
||||
|
||||
.. versionadded:: 0.16
|
||||
``rescale_sigma`` was introduced in 0.16
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray
|
||||
Denoised image.
|
||||
|
||||
Notes
|
||||
-----
|
||||
The wavelet domain is a sparse representation of the image, and can be
|
||||
thought of similarly to the frequency domain of the Fourier transform.
|
||||
Sparse representations have most values zero or near-zero and truly random
|
||||
noise is (usually) represented by many small values in the wavelet domain.
|
||||
Setting all values below some threshold to 0 reduces the noise in the
|
||||
image, but larger thresholds also decrease the detail present in the image.
|
||||
|
||||
If the input is 3D, this function performs wavelet denoising on each color
|
||||
plane separately.
|
||||
|
||||
.. versionchanged:: 0.16
|
||||
For floating point inputs, the original input range is maintained and
|
||||
there is no clipping applied to the output. Other input types will be
|
||||
converted to a floating point value in the range [-1, 1] or [0, 1]
|
||||
depending on the input image range. Unless ``rescale_sigma = False``,
|
||||
any internal rescaling applied to the ``image`` will also be applied
|
||||
to ``sigma`` to maintain the same relative amplitude.
|
||||
|
||||
Many wavelet coefficient thresholding approaches have been proposed. By
|
||||
default, ``denoise_wavelet`` applies BayesShrink, which is an adaptive
|
||||
thresholding method that computes separate thresholds for each wavelet
|
||||
sub-band as described in [1]_.
|
||||
|
||||
If ``method == "VisuShrink"``, a single "universal threshold" is applied to
|
||||
all wavelet detail coefficients as described in [2]_. This threshold
|
||||
is designed to remove all Gaussian noise at a given ``sigma`` with high
|
||||
probability, but tends to produce images that appear overly smooth.
|
||||
|
||||
Although any of the wavelets from ``PyWavelets`` can be selected, the
|
||||
thresholding methods assume an orthogonal wavelet transform and may not
|
||||
choose the threshold appropriately for biorthogonal wavelets. Orthogonal
|
||||
wavelets are desirable because white noise in the input remains white noise
|
||||
in the subbands. Biorthogonal wavelets lead to colored noise in the
|
||||
subbands. Additionally, the orthogonal wavelets in PyWavelets are
|
||||
orthonormal so that noise variance in the subbands remains identical to the
|
||||
noise variance of the input. Example orthogonal wavelets are the Daubechies
|
||||
(e.g. 'db2') or symmlet (e.g. 'sym2') families.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Chang, S. Grace, Bin Yu, and Martin Vetterli. "Adaptive wavelet
|
||||
thresholding for image denoising and compression." Image Processing,
|
||||
IEEE Transactions on 9.9 (2000): 1532-1546.
|
||||
:DOI:`10.1109/83.862633`
|
||||
.. [2] D. L. Donoho and I. M. Johnstone. "Ideal spatial adaptation
|
||||
by wavelet shrinkage." Biometrika 81.3 (1994): 425-455.
|
||||
:DOI:`10.1093/biomet/81.3.425`
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from skimage import color, data
|
||||
>>> img = img_as_float(data.astronaut())
|
||||
>>> img = color.rgb2gray(img)
|
||||
>>> img += 0.1 * np.random.randn(*img.shape)
|
||||
>>> img = np.clip(img, 0, 1)
|
||||
>>> denoised_img = denoise_wavelet(img, sigma=0.1, rescale_sigma=True)
|
||||
|
||||
"""
|
||||
if method not in ["BayesShrink", "VisuShrink"]:
|
||||
raise ValueError(
|
||||
('Invalid method: {}. The currently supported methods are '
|
||||
'"BayesShrink" and "VisuShrink"').format(method))
|
||||
|
||||
# floating-point inputs are not rescaled, so don't clip their output.
|
||||
clip_output = image.dtype.kind != 'f'
|
||||
|
||||
if convert2ycbcr and not multichannel:
|
||||
raise ValueError("convert2ycbcr requires multichannel == True")
|
||||
|
||||
if rescale_sigma is None:
|
||||
msg = (
|
||||
"As of scikit-image 0.16, automated rescaling of sigma to match "
|
||||
"any internal rescaling of the image is performed. Setting "
|
||||
"rescale_sigma to False, will disable this new behaviour. To "
|
||||
"avoid this warning the user should explicitly set rescale_sigma "
|
||||
"to True or False."
|
||||
)
|
||||
warn(msg, FutureWarning, stacklevel=2)
|
||||
rescale_sigma = True
|
||||
image, sigma = _scale_sigma_and_image_consistently(image,
|
||||
sigma,
|
||||
multichannel,
|
||||
rescale_sigma)
|
||||
if multichannel:
|
||||
if convert2ycbcr:
|
||||
out = color.rgb2ycbcr(image)
|
||||
# convert user-supplied sigmas to the new colorspace as well
|
||||
if rescale_sigma:
|
||||
sigma = _rescale_sigma_rgb2ycbcr(sigma)
|
||||
for i in range(3):
|
||||
# renormalizing this color channel to live in [0, 1]
|
||||
_min, _max = out[..., i].min(), out[..., i].max()
|
||||
scale_factor = _max - _min
|
||||
if scale_factor == 0:
|
||||
# skip any channel containing only zeros!
|
||||
continue
|
||||
channel = out[..., i] - _min
|
||||
channel /= scale_factor
|
||||
sigma_channel = sigma[i]
|
||||
if sigma_channel is not None:
|
||||
sigma_channel /= scale_factor
|
||||
out[..., i] = denoise_wavelet(channel,
|
||||
wavelet=wavelet,
|
||||
method=method,
|
||||
sigma=sigma_channel,
|
||||
mode=mode,
|
||||
wavelet_levels=wavelet_levels,
|
||||
rescale_sigma=rescale_sigma)
|
||||
out[..., i] = out[..., i] * scale_factor
|
||||
out[..., i] += _min
|
||||
out = color.ycbcr2rgb(out)
|
||||
else:
|
||||
out = np.empty_like(image)
|
||||
for c in range(image.shape[-1]):
|
||||
out[..., c] = _wavelet_threshold(image[..., c],
|
||||
wavelet=wavelet,
|
||||
method=method,
|
||||
sigma=sigma[c], mode=mode,
|
||||
wavelet_levels=wavelet_levels)
|
||||
else:
|
||||
out = _wavelet_threshold(image, wavelet=wavelet, method=method,
|
||||
sigma=sigma, mode=mode,
|
||||
wavelet_levels=wavelet_levels)
|
||||
|
||||
if clip_output:
|
||||
clip_range = (-1, 1) if image.min() < 0 else (0, 1)
|
||||
out = np.clip(out, *clip_range, out=out)
|
||||
return out
|
||||
|
||||
|
||||
def estimate_sigma(image, average_sigmas=False, multichannel=False):
|
||||
"""
|
||||
Robust wavelet-based estimator of the (Gaussian) noise standard deviation.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray
|
||||
Image for which to estimate the noise standard deviation.
|
||||
average_sigmas : bool, optional
|
||||
If true, average the channel estimates of `sigma`. Otherwise return
|
||||
a list of sigmas corresponding to each channel.
|
||||
multichannel : bool
|
||||
Estimate sigma separately for each channel.
|
||||
|
||||
Returns
|
||||
-------
|
||||
sigma : float or list
|
||||
Estimated noise standard deviation(s). If `multichannel` is True and
|
||||
`average_sigmas` is False, a separate noise estimate for each channel
|
||||
is returned. Otherwise, the average of the individual channel
|
||||
estimates is returned.
|
||||
|
||||
Notes
|
||||
-----
|
||||
This function assumes the noise follows a Gaussian distribution. The
|
||||
estimation algorithm is based on the median absolute deviation of the
|
||||
wavelet detail coefficients as described in section 4.2 of [1]_.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] D. L. Donoho and I. M. Johnstone. "Ideal spatial adaptation
|
||||
by wavelet shrinkage." Biometrika 81.3 (1994): 425-455.
|
||||
:DOI:`10.1093/biomet/81.3.425`
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import skimage.data
|
||||
>>> from skimage import img_as_float
|
||||
>>> img = img_as_float(skimage.data.camera())
|
||||
>>> sigma = 0.1
|
||||
>>> img = img + sigma * np.random.standard_normal(img.shape)
|
||||
>>> sigma_hat = estimate_sigma(img, multichannel=False)
|
||||
"""
|
||||
if multichannel:
|
||||
nchannels = image.shape[-1]
|
||||
sigmas = [estimate_sigma(
|
||||
image[..., c], multichannel=False) for c in range(nchannels)]
|
||||
if average_sigmas:
|
||||
sigmas = np.mean(sigmas)
|
||||
return sigmas
|
||||
elif image.shape[-1] <= 4:
|
||||
msg = ("image is size {0} on the last axis, but multichannel is "
|
||||
"False. If this is a color image, please set multichannel "
|
||||
"to True for proper noise estimation.")
|
||||
warn(msg.format(image.shape[-1]))
|
||||
coeffs = pywt.dwtn(image, wavelet='db2')
|
||||
detail_coeffs = coeffs['d' * image.ndim]
|
||||
return _sigma_est_dwt(detail_coeffs, distribution='Gaussian')
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
377
venv/Lib/site-packages/skimage/restoration/deconvolution.py
Normal file
377
venv/Lib/site-packages/skimage/restoration/deconvolution.py
Normal file
|
@ -0,0 +1,377 @@
|
|||
"""Implementations restoration functions"""
|
||||
|
||||
|
||||
import numpy as np
|
||||
import numpy.random as npr
|
||||
from scipy.signal import convolve
|
||||
|
||||
from . import uft
|
||||
|
||||
__keywords__ = "restoration, image, deconvolution"
|
||||
|
||||
|
||||
def wiener(image, psf, balance, reg=None, is_real=True, clip=True):
|
||||
r"""Wiener-Hunt deconvolution
|
||||
|
||||
Return the deconvolution with a Wiener-Hunt approach (i.e. with
|
||||
Fourier diagonalisation).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : (M, N) ndarray
|
||||
Input degraded image
|
||||
psf : ndarray
|
||||
Point Spread Function. This is assumed to be the impulse
|
||||
response (input image space) if the data-type is real, or the
|
||||
transfer function (Fourier space) if the data-type is
|
||||
complex. There is no constraints on the shape of the impulse
|
||||
response. The transfer function must be of shape `(M, N)` if
|
||||
`is_real is True`, `(M, N // 2 + 1)` otherwise (see
|
||||
`np.fft.rfftn`).
|
||||
balance : float
|
||||
The regularisation parameter value that tunes the balance
|
||||
between the data adequacy that improve frequency restoration
|
||||
and the prior adequacy that reduce frequency restoration (to
|
||||
avoid noise artifacts).
|
||||
reg : ndarray, optional
|
||||
The regularisation operator. The Laplacian by default. It can
|
||||
be an impulse response or a transfer function, as for the
|
||||
psf. Shape constraint is the same as for the `psf` parameter.
|
||||
is_real : boolean, optional
|
||||
True by default. Specify if ``psf`` and ``reg`` are provided
|
||||
with hermitian hypothesis, that is only half of the frequency
|
||||
plane is provided (due to the redundancy of Fourier transform
|
||||
of real signal). It's apply only if ``psf`` and/or ``reg`` are
|
||||
provided as transfer function. For the hermitian property see
|
||||
``uft`` module or ``np.fft.rfftn``.
|
||||
clip : boolean, optional
|
||||
True by default. If True, pixel values of the result above 1 or
|
||||
under -1 are thresholded for skimage pipeline compatibility.
|
||||
|
||||
Returns
|
||||
-------
|
||||
im_deconv : (M, N) ndarray
|
||||
The deconvolved image.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from skimage import color, data, restoration
|
||||
>>> img = color.rgb2gray(data.astronaut())
|
||||
>>> from scipy.signal import convolve2d
|
||||
>>> psf = np.ones((5, 5)) / 25
|
||||
>>> img = convolve2d(img, psf, 'same')
|
||||
>>> img += 0.1 * img.std() * np.random.standard_normal(img.shape)
|
||||
>>> deconvolved_img = restoration.wiener(img, psf, 1100)
|
||||
|
||||
Notes
|
||||
-----
|
||||
This function applies the Wiener filter to a noisy and degraded
|
||||
image by an impulse response (or PSF). If the data model is
|
||||
|
||||
.. math:: y = Hx + n
|
||||
|
||||
where :math:`n` is noise, :math:`H` the PSF and :math:`x` the
|
||||
unknown original image, the Wiener filter is
|
||||
|
||||
.. math::
|
||||
\hat x = F^\dagger (|\Lambda_H|^2 + \lambda |\Lambda_D|^2)
|
||||
\Lambda_H^\dagger F y
|
||||
|
||||
where :math:`F` and :math:`F^\dagger` are the Fourier and inverse
|
||||
Fourier transforms respectively, :math:`\Lambda_H` the transfer
|
||||
function (or the Fourier transform of the PSF, see [Hunt] below)
|
||||
and :math:`\Lambda_D` the filter to penalize the restored image
|
||||
frequencies (Laplacian by default, that is penalization of high
|
||||
frequency). The parameter :math:`\lambda` tunes the balance
|
||||
between the data (that tends to increase high frequency, even
|
||||
those coming from noise), and the regularization.
|
||||
|
||||
These methods are then specific to a prior model. Consequently,
|
||||
the application or the true image nature must corresponds to the
|
||||
prior model. By default, the prior model (Laplacian) introduce
|
||||
image smoothness or pixel correlation. It can also be interpreted
|
||||
as high-frequency penalization to compensate the instability of
|
||||
the solution with respect to the data (sometimes called noise
|
||||
amplification or "explosive" solution).
|
||||
|
||||
Finally, the use of Fourier space implies a circulant property of
|
||||
:math:`H`, see [Hunt].
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] François Orieux, Jean-François Giovannelli, and Thomas
|
||||
Rodet, "Bayesian estimation of regularization and point
|
||||
spread function parameters for Wiener-Hunt deconvolution",
|
||||
J. Opt. Soc. Am. A 27, 1593-1607 (2010)
|
||||
|
||||
https://www.osapublishing.org/josaa/abstract.cfm?URI=josaa-27-7-1593
|
||||
|
||||
http://research.orieux.fr/files/papers/OGR-JOSA10.pdf
|
||||
|
||||
.. [2] B. R. Hunt "A matrix theory proof of the discrete
|
||||
convolution theorem", IEEE Trans. on Audio and
|
||||
Electroacoustics, vol. au-19, no. 4, pp. 285-288, dec. 1971
|
||||
"""
|
||||
if reg is None:
|
||||
reg, _ = uft.laplacian(image.ndim, image.shape, is_real=is_real)
|
||||
if not np.iscomplexobj(reg):
|
||||
reg = uft.ir2tf(reg, image.shape, is_real=is_real)
|
||||
|
||||
if psf.shape != reg.shape:
|
||||
trans_func = uft.ir2tf(psf, image.shape, is_real=is_real)
|
||||
else:
|
||||
trans_func = psf
|
||||
|
||||
wiener_filter = np.conj(trans_func) / (np.abs(trans_func) ** 2 +
|
||||
balance * np.abs(reg) ** 2)
|
||||
if is_real:
|
||||
deconv = uft.uirfft2(wiener_filter * uft.urfft2(image),
|
||||
shape=image.shape)
|
||||
else:
|
||||
deconv = uft.uifft2(wiener_filter * uft.ufft2(image))
|
||||
|
||||
if clip:
|
||||
deconv[deconv > 1] = 1
|
||||
deconv[deconv < -1] = -1
|
||||
|
||||
return deconv
|
||||
|
||||
|
||||
def unsupervised_wiener(image, psf, reg=None, user_params=None, is_real=True,
|
||||
clip=True):
|
||||
"""Unsupervised Wiener-Hunt deconvolution.
|
||||
|
||||
Return the deconvolution with a Wiener-Hunt approach, where the
|
||||
hyperparameters are automatically estimated. The algorithm is a
|
||||
stochastic iterative process (Gibbs sampler) described in the
|
||||
reference below. See also ``wiener`` function.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : (M, N) ndarray
|
||||
The input degraded image.
|
||||
psf : ndarray
|
||||
The impulse response (input image's space) or the transfer
|
||||
function (Fourier space). Both are accepted. The transfer
|
||||
function is automatically recognized as being complex
|
||||
(``np.iscomplexobj(psf)``).
|
||||
reg : ndarray, optional
|
||||
The regularisation operator. The Laplacian by default. It can
|
||||
be an impulse response or a transfer function, as for the psf.
|
||||
user_params : dict, optional
|
||||
Dictionary of parameters for the Gibbs sampler. See below.
|
||||
clip : boolean, optional
|
||||
True by default. If true, pixel values of the result above 1 or
|
||||
under -1 are thresholded for skimage pipeline compatibility.
|
||||
|
||||
Returns
|
||||
-------
|
||||
x_postmean : (M, N) ndarray
|
||||
The deconvolved image (the posterior mean).
|
||||
chains : dict
|
||||
The keys ``noise`` and ``prior`` contain the chain list of
|
||||
noise and prior precision respectively.
|
||||
|
||||
Other parameters
|
||||
----------------
|
||||
The keys of ``user_params`` are:
|
||||
|
||||
threshold : float
|
||||
The stopping criterion: the norm of the difference between to
|
||||
successive approximated solution (empirical mean of object
|
||||
samples, see Notes section). 1e-4 by default.
|
||||
burnin : int
|
||||
The number of sample to ignore to start computation of the
|
||||
mean. 15 by default.
|
||||
min_iter : int
|
||||
The minimum number of iterations. 30 by default.
|
||||
max_iter : int
|
||||
The maximum number of iterations if ``threshold`` is not
|
||||
satisfied. 200 by default.
|
||||
callback : callable (None by default)
|
||||
A user provided callable to which is passed, if the function
|
||||
exists, the current image sample for whatever purpose. The user
|
||||
can store the sample, or compute other moments than the
|
||||
mean. It has no influence on the algorithm execution and is
|
||||
only for inspection.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from skimage import color, data, restoration
|
||||
>>> img = color.rgb2gray(data.astronaut())
|
||||
>>> from scipy.signal import convolve2d
|
||||
>>> psf = np.ones((5, 5)) / 25
|
||||
>>> img = convolve2d(img, psf, 'same')
|
||||
>>> img += 0.1 * img.std() * np.random.standard_normal(img.shape)
|
||||
>>> deconvolved_img = restoration.unsupervised_wiener(img, psf)
|
||||
|
||||
Notes
|
||||
-----
|
||||
The estimated image is design as the posterior mean of a
|
||||
probability law (from a Bayesian analysis). The mean is defined as
|
||||
a sum over all the possible images weighted by their respective
|
||||
probability. Given the size of the problem, the exact sum is not
|
||||
tractable. This algorithm use of MCMC to draw image under the
|
||||
posterior law. The practical idea is to only draw highly probable
|
||||
images since they have the biggest contribution to the mean. At the
|
||||
opposite, the less probable images are drawn less often since
|
||||
their contribution is low. Finally the empirical mean of these
|
||||
samples give us an estimation of the mean, and an exact
|
||||
computation with an infinite sample set.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] François Orieux, Jean-François Giovannelli, and Thomas
|
||||
Rodet, "Bayesian estimation of regularization and point
|
||||
spread function parameters for Wiener-Hunt deconvolution",
|
||||
J. Opt. Soc. Am. A 27, 1593-1607 (2010)
|
||||
|
||||
https://www.osapublishing.org/josaa/abstract.cfm?URI=josaa-27-7-1593
|
||||
|
||||
http://research.orieux.fr/files/papers/OGR-JOSA10.pdf
|
||||
"""
|
||||
params = {'threshold': 1e-4, 'max_iter': 200,
|
||||
'min_iter': 30, 'burnin': 15, 'callback': None}
|
||||
params.update(user_params or {})
|
||||
|
||||
if reg is None:
|
||||
reg, _ = uft.laplacian(image.ndim, image.shape, is_real=is_real)
|
||||
if not np.iscomplexobj(reg):
|
||||
reg = uft.ir2tf(reg, image.shape, is_real=is_real)
|
||||
|
||||
if psf.shape != reg.shape:
|
||||
trans_fct = uft.ir2tf(psf, image.shape, is_real=is_real)
|
||||
else:
|
||||
trans_fct = psf
|
||||
|
||||
# The mean of the object
|
||||
x_postmean = np.zeros(trans_fct.shape)
|
||||
# The previous computed mean in the iterative loop
|
||||
prev_x_postmean = np.zeros(trans_fct.shape)
|
||||
|
||||
# Difference between two successive mean
|
||||
delta = np.NAN
|
||||
|
||||
# Initial state of the chain
|
||||
gn_chain, gx_chain = [1], [1]
|
||||
|
||||
# The correlation of the object in Fourier space (if size is big,
|
||||
# this can reduce computation time in the loop)
|
||||
areg2 = np.abs(reg) ** 2
|
||||
atf2 = np.abs(trans_fct) ** 2
|
||||
|
||||
# The Fourier transform may change the image.size attribute, so we
|
||||
# store it.
|
||||
if is_real:
|
||||
data_spectrum = uft.urfft2(image.astype(np.float))
|
||||
else:
|
||||
data_spectrum = uft.ufft2(image.astype(np.float))
|
||||
|
||||
# Gibbs sampling
|
||||
for iteration in range(params['max_iter']):
|
||||
# Sample of Eq. 27 p(circX^k | gn^k-1, gx^k-1, y).
|
||||
|
||||
# weighting (correlation in direct space)
|
||||
precision = gn_chain[-1] * atf2 + gx_chain[-1] * areg2 # Eq. 29
|
||||
excursion = np.sqrt(0.5) / np.sqrt(precision) * (
|
||||
np.random.standard_normal(data_spectrum.shape) +
|
||||
1j * np.random.standard_normal(data_spectrum.shape))
|
||||
|
||||
# mean Eq. 30 (RLS for fixed gn, gamma0 and gamma1 ...)
|
||||
wiener_filter = gn_chain[-1] * np.conj(trans_fct) / precision
|
||||
|
||||
# sample of X in Fourier space
|
||||
x_sample = wiener_filter * data_spectrum + excursion
|
||||
if params['callback']:
|
||||
params['callback'](x_sample)
|
||||
|
||||
# sample of Eq. 31 p(gn | x^k, gx^k, y)
|
||||
gn_chain.append(npr.gamma(image.size / 2,
|
||||
2 / uft.image_quad_norm(data_spectrum -
|
||||
x_sample *
|
||||
trans_fct)))
|
||||
|
||||
# sample of Eq. 31 p(gx | x^k, gn^k-1, y)
|
||||
gx_chain.append(npr.gamma((image.size - 1) / 2,
|
||||
2 / uft.image_quad_norm(x_sample * reg)))
|
||||
|
||||
# current empirical average
|
||||
if iteration > params['burnin']:
|
||||
x_postmean = prev_x_postmean + x_sample
|
||||
|
||||
if iteration > (params['burnin'] + 1):
|
||||
current = x_postmean / (iteration - params['burnin'])
|
||||
previous = prev_x_postmean / (iteration - params['burnin'] - 1)
|
||||
|
||||
delta = np.sum(np.abs(current - previous)) / \
|
||||
np.sum(np.abs(x_postmean)) / (iteration - params['burnin'])
|
||||
|
||||
prev_x_postmean = x_postmean
|
||||
|
||||
# stop of the algorithm
|
||||
if (iteration > params['min_iter']) and (delta < params['threshold']):
|
||||
break
|
||||
|
||||
# Empirical average \approx POSTMEAN Eq. 44
|
||||
x_postmean = x_postmean / (iteration - params['burnin'])
|
||||
if is_real:
|
||||
x_postmean = uft.uirfft2(x_postmean, shape=image.shape)
|
||||
else:
|
||||
x_postmean = uft.uifft2(x_postmean)
|
||||
|
||||
if clip:
|
||||
x_postmean[x_postmean > 1] = 1
|
||||
x_postmean[x_postmean < -1] = -1
|
||||
|
||||
return (x_postmean, {'noise': gn_chain, 'prior': gx_chain})
|
||||
|
||||
|
||||
def richardson_lucy(image, psf, iterations=50, clip=True):
|
||||
"""Richardson-Lucy deconvolution.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray
|
||||
Input degraded image (can be N dimensional).
|
||||
psf : ndarray
|
||||
The point spread function.
|
||||
iterations : int, optional
|
||||
Number of iterations. This parameter plays the role of
|
||||
regularisation.
|
||||
clip : boolean, optional
|
||||
True by default. If true, pixel value of the result above 1 or
|
||||
under -1 are thresholded for skimage pipeline compatibility.
|
||||
|
||||
Returns
|
||||
-------
|
||||
im_deconv : ndarray
|
||||
The deconvolved image.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from skimage import color, data, restoration
|
||||
>>> camera = color.rgb2gray(data.camera())
|
||||
>>> from scipy.signal import convolve2d
|
||||
>>> psf = np.ones((5, 5)) / 25
|
||||
>>> camera = convolve2d(camera, psf, 'same')
|
||||
>>> camera += 0.1 * camera.std() * np.random.standard_normal(camera.shape)
|
||||
>>> deconvolved = restoration.richardson_lucy(camera, psf, 5)
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] https://en.wikipedia.org/wiki/Richardson%E2%80%93Lucy_deconvolution
|
||||
"""
|
||||
image = image.astype(np.float)
|
||||
psf = psf.astype(np.float)
|
||||
im_deconv = np.full(image.shape, 0.5)
|
||||
psf_mirror = psf[::-1, ::-1]
|
||||
|
||||
for _ in range(iterations):
|
||||
relative_blur = image / convolve(im_deconv, psf, mode='same')
|
||||
im_deconv *= convolve(relative_blur, psf_mirror, mode='same')
|
||||
|
||||
if clip:
|
||||
im_deconv[im_deconv > 1] = 1
|
||||
im_deconv[im_deconv < -1] = -1
|
||||
|
||||
return im_deconv
|
152
venv/Lib/site-packages/skimage/restoration/inpaint.py
Normal file
152
venv/Lib/site-packages/skimage/restoration/inpaint.py
Normal file
|
@ -0,0 +1,152 @@
|
|||
|
||||
import numpy as np
|
||||
from scipy import sparse
|
||||
from scipy.sparse.linalg import spsolve
|
||||
import scipy.ndimage as ndi
|
||||
from scipy.ndimage.filters import laplace
|
||||
import skimage
|
||||
from ..measure import label
|
||||
|
||||
|
||||
def _get_neighborhood(nd_idx, radius, nd_shape):
|
||||
bounds_lo = (nd_idx - radius).clip(min=0)
|
||||
bounds_hi = (nd_idx + radius + 1).clip(max=nd_shape)
|
||||
return bounds_lo, bounds_hi
|
||||
|
||||
|
||||
def _inpaint_biharmonic_single_channel(mask, out, limits):
|
||||
# Initialize sparse matrices
|
||||
matrix_unknown = sparse.lil_matrix((np.sum(mask), out.size))
|
||||
matrix_known = sparse.lil_matrix((np.sum(mask), out.size))
|
||||
|
||||
# Find indexes of masked points in flatten array
|
||||
mask_i = np.ravel_multi_index(np.where(mask), mask.shape)
|
||||
|
||||
# Find masked points and prepare them to be easily enumerate over
|
||||
mask_pts = np.array(np.where(mask)).T
|
||||
|
||||
# Iterate over masked points
|
||||
for mask_pt_n, mask_pt_idx in enumerate(mask_pts):
|
||||
# Get bounded neighborhood of selected radius
|
||||
b_lo, b_hi = _get_neighborhood(mask_pt_idx, 2, out.shape)
|
||||
|
||||
# Create biharmonic coefficients ndarray
|
||||
neigh_coef = np.zeros(b_hi - b_lo)
|
||||
neigh_coef[tuple(mask_pt_idx - b_lo)] = 1
|
||||
neigh_coef = laplace(laplace(neigh_coef))
|
||||
|
||||
# Iterate over masked point's neighborhood
|
||||
it_inner = np.nditer(neigh_coef, flags=['multi_index'])
|
||||
for coef in it_inner:
|
||||
if coef == 0:
|
||||
continue
|
||||
tmp_pt_idx = np.add(b_lo, it_inner.multi_index)
|
||||
tmp_pt_i = np.ravel_multi_index(tmp_pt_idx, mask.shape)
|
||||
|
||||
if mask[tuple(tmp_pt_idx)]:
|
||||
matrix_unknown[mask_pt_n, tmp_pt_i] = coef
|
||||
else:
|
||||
matrix_known[mask_pt_n, tmp_pt_i] = coef
|
||||
|
||||
# Prepare diagonal matrix
|
||||
flat_diag_image = sparse.dia_matrix((out.flatten(), np.array([0])),
|
||||
shape=(out.size, out.size))
|
||||
|
||||
# Calculate right hand side as a sum of known matrix's columns
|
||||
matrix_known = matrix_known.tocsr()
|
||||
rhs = -(matrix_known * flat_diag_image).sum(axis=1)
|
||||
|
||||
# Solve linear system for masked points
|
||||
matrix_unknown = matrix_unknown[:, mask_i]
|
||||
matrix_unknown = sparse.csr_matrix(matrix_unknown)
|
||||
result = spsolve(matrix_unknown, rhs)
|
||||
|
||||
# Handle enormous values
|
||||
result = np.clip(result, *limits)
|
||||
|
||||
result = result.ravel()
|
||||
|
||||
# Substitute masked points with inpainted versions
|
||||
for mask_pt_n, mask_pt_idx in enumerate(mask_pts):
|
||||
out[tuple(mask_pt_idx)] = result[mask_pt_n]
|
||||
|
||||
return out
|
||||
|
||||
|
||||
def inpaint_biharmonic(image, mask, multichannel=False):
|
||||
"""Inpaint masked points in image with biharmonic equations.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : (M[, N[, ..., P]][, C]) ndarray
|
||||
Input image.
|
||||
mask : (M[, N[, ..., P]]) ndarray
|
||||
Array of pixels to be inpainted. Have to be the same shape as one
|
||||
of the 'image' channels. Unknown pixels have to be represented with 1,
|
||||
known pixels - with 0.
|
||||
multichannel : boolean, optional
|
||||
If True, the last `image` dimension is considered as a color channel,
|
||||
otherwise as spatial.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : (M[, N[, ..., P]][, C]) ndarray
|
||||
Input image with masked pixels inpainted.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] N.S.Hoang, S.B.Damelin, "On surface completion and image inpainting
|
||||
by biharmonic functions: numerical aspects",
|
||||
:arXiv:`1707.06567`
|
||||
.. [2] C. K. Chui and H. N. Mhaskar, MRA Contextual-Recovery Extension of
|
||||
Smooth Functions on Manifolds, Appl. and Comp. Harmonic Anal.,
|
||||
28 (2010), 104-113,
|
||||
:DOI:`10.1016/j.acha.2009.04.004`
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> img = np.tile(np.square(np.linspace(0, 1, 5)), (5, 1))
|
||||
>>> mask = np.zeros_like(img)
|
||||
>>> mask[2, 2:] = 1
|
||||
>>> mask[1, 3:] = 1
|
||||
>>> mask[0, 4:] = 1
|
||||
>>> out = inpaint_biharmonic(img, mask)
|
||||
"""
|
||||
|
||||
if image.ndim < 1:
|
||||
raise ValueError('Input array has to be at least 1D')
|
||||
|
||||
img_baseshape = image.shape[:-1] if multichannel else image.shape
|
||||
if img_baseshape != mask.shape:
|
||||
raise ValueError('Input arrays have to be the same shape')
|
||||
|
||||
if np.ma.isMaskedArray(image):
|
||||
raise TypeError('Masked arrays are not supported')
|
||||
|
||||
image = skimage.img_as_float(image)
|
||||
mask = mask.astype(np.bool)
|
||||
|
||||
# Split inpainting mask into independent regions
|
||||
kernel = ndi.morphology.generate_binary_structure(mask.ndim, 1)
|
||||
mask_dilated = ndi.morphology.binary_dilation(mask, structure=kernel)
|
||||
mask_labeled, num_labels = label(mask_dilated, return_num=True)
|
||||
mask_labeled *= mask
|
||||
|
||||
if not multichannel:
|
||||
image = image[..., np.newaxis]
|
||||
|
||||
out = np.copy(image)
|
||||
|
||||
for idx_channel in range(image.shape[-1]):
|
||||
known_points = image[..., idx_channel][~mask]
|
||||
limits = (np.min(known_points), np.max(known_points))
|
||||
|
||||
for idx_region in range(1, num_labels+1):
|
||||
mask_region = mask_labeled == idx_region
|
||||
_inpaint_biharmonic_single_channel(mask_region,
|
||||
out[..., idx_channel], limits)
|
||||
|
||||
if not multichannel:
|
||||
out = out[..., 0]
|
||||
|
||||
return out
|
317
venv/Lib/site-packages/skimage/restoration/j_invariant.py
Normal file
317
venv/Lib/site-packages/skimage/restoration/j_invariant.py
Normal file
|
@ -0,0 +1,317 @@
|
|||
import itertools
|
||||
import functools
|
||||
|
||||
import numpy as np
|
||||
from scipy import ndimage as ndi
|
||||
|
||||
from ..metrics import mean_squared_error
|
||||
from ..util import img_as_float
|
||||
|
||||
|
||||
def _interpolate_image(image, *, multichannel=False):
|
||||
"""Replacing each pixel in ``image`` with the average of its neighbors.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray
|
||||
Input data to be interpolated.
|
||||
multichannel : bool, optional
|
||||
Whether the last axis of the image is to be interpreted as multiple
|
||||
channels or another spatial dimension.
|
||||
|
||||
Returns
|
||||
-------
|
||||
interp : ndarray
|
||||
Interpolated version of `image`.
|
||||
"""
|
||||
spatialdims = image.ndim if not multichannel else image.ndim - 1
|
||||
conv_filter = ndi.generate_binary_structure(spatialdims, 1).astype(image.dtype)
|
||||
conv_filter.ravel()[conv_filter.size // 2] = 0
|
||||
conv_filter /= conv_filter.sum()
|
||||
|
||||
if multichannel:
|
||||
interp = np.zeros_like(image)
|
||||
for i in range(image.shape[-1]):
|
||||
interp[..., i] = ndi.convolve(image[..., i], conv_filter,
|
||||
mode='mirror')
|
||||
else:
|
||||
interp = ndi.convolve(image, conv_filter, mode='mirror')
|
||||
return interp
|
||||
|
||||
|
||||
def _generate_grid_slice(shape, *, offset, stride=3):
|
||||
"""Generate slices of uniformly-spaced points in an array.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
shape : tuple of int
|
||||
Shape of the mask.
|
||||
offset : int
|
||||
The offset of the grid of ones. Iterating over ``offset`` will cover
|
||||
the entire array. It should be between 0 and ``stride ** ndim``, not
|
||||
inclusive, where ``ndim = len(shape)``.
|
||||
stride : int, optional
|
||||
The spacing between ones, used in each dimension.
|
||||
|
||||
Returns
|
||||
-------
|
||||
mask : ndarray
|
||||
The mask.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> shape = (4, 4)
|
||||
>>> array = np.zeros(shape, dtype=int)
|
||||
>>> grid_slice = _generate_grid_slice(shape, offset=0, stride=2)
|
||||
>>> array[grid_slice] = 1
|
||||
>>> print(array)
|
||||
[[1 0 1 0]
|
||||
[0 0 0 0]
|
||||
[1 0 1 0]
|
||||
[0 0 0 0]]
|
||||
|
||||
Changing the offset moves the location of the 1s:
|
||||
|
||||
>>> array = np.zeros(shape, dtype=int)
|
||||
>>> grid_slice = _generate_grid_slice(shape, offset=3, stride=2)
|
||||
>>> array[grid_slice] = 1
|
||||
>>> print(array)
|
||||
[[0 0 0 0]
|
||||
[0 1 0 1]
|
||||
[0 0 0 0]
|
||||
[0 1 0 1]]
|
||||
"""
|
||||
phases = np.unravel_index(offset, (stride,) * len(shape))
|
||||
mask = tuple(slice(p, None, stride) for p in phases)
|
||||
|
||||
return mask
|
||||
|
||||
|
||||
def _invariant_denoise(image, denoise_function, *, stride=4,
|
||||
masks=None, denoiser_kwargs=None):
|
||||
"""Apply a J-invariant version of `denoise_function`.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray
|
||||
Input data to be denoised (converted using `img_as_float`).
|
||||
denoise_function : function
|
||||
Original denoising function.
|
||||
stride : int, optional
|
||||
Stride used in masking procedure that converts `denoise_function`
|
||||
to J-invariance.
|
||||
masks : list of ndarray, optional
|
||||
Set of masks to use for computing J-invariant output. If `None`,
|
||||
a full set of masks covering the image will be used.
|
||||
denoiser_kwargs:
|
||||
Keyword arguments passed to `denoise_function`.
|
||||
|
||||
Returns
|
||||
-------
|
||||
output : ndarray
|
||||
Denoised image, of same shape as `image`.
|
||||
"""
|
||||
image = img_as_float(image)
|
||||
if denoiser_kwargs is None:
|
||||
denoiser_kwargs = {}
|
||||
|
||||
if 'multichannel' in denoiser_kwargs:
|
||||
multichannel = denoiser_kwargs['multichannel']
|
||||
else:
|
||||
multichannel = False
|
||||
interp = _interpolate_image(image, multichannel=multichannel)
|
||||
output = np.zeros_like(image)
|
||||
|
||||
if masks is None:
|
||||
spatialdims = image.ndim if not multichannel else image.ndim - 1
|
||||
n_masks = stride ** spatialdims
|
||||
masks = (_generate_grid_slice(image.shape[:spatialdims],
|
||||
offset=idx, stride=stride)
|
||||
for idx in range(n_masks))
|
||||
|
||||
for mask in masks:
|
||||
input_image = image.copy()
|
||||
input_image[mask] = interp[mask]
|
||||
output[mask] = denoise_function(input_image, **denoiser_kwargs)[mask]
|
||||
return output
|
||||
|
||||
|
||||
def _product_from_dict(dictionary):
|
||||
"""Utility function to convert parameter ranges to parameter combinations.
|
||||
|
||||
Converts a dict of lists into a list of dicts whose values consist of the
|
||||
cartesian product of the values in the original dict.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
dictionary : dict of lists
|
||||
Dictionary of lists to be multiplied.
|
||||
|
||||
Yields
|
||||
------
|
||||
selections : dicts of values
|
||||
Dicts containing individual combinations of the values in the input
|
||||
dict.
|
||||
"""
|
||||
keys = dictionary.keys()
|
||||
for element in itertools.product(*dictionary.values()):
|
||||
yield dict(zip(keys, element))
|
||||
|
||||
|
||||
def calibrate_denoiser(image, denoise_function, denoise_parameters, *,
|
||||
stride=4, approximate_loss=True,
|
||||
extra_output=False):
|
||||
"""Calibrate a denoising function and return optimal J-invariant version.
|
||||
|
||||
The returned function is partially evaluated with optimal parameter values
|
||||
set for denoising the input image.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray
|
||||
Input data to be denoised (converted using `img_as_float`).
|
||||
denoise_function : function
|
||||
Denoising function to be calibrated.
|
||||
denoise_parameters : dict of list
|
||||
Ranges of parameters for `denoise_function` to be calibrated over.
|
||||
stride : int, optional
|
||||
Stride used in masking procedure that converts `denoise_function`
|
||||
to J-invariance.
|
||||
approximate_loss : bool, optional
|
||||
Whether to approximate the self-supervised loss used to evaluate the
|
||||
denoiser by only computing it on one masked version of the image.
|
||||
If False, the runtime will be a factor of `stride**image.ndim` longer.
|
||||
extra_output : bool, optional
|
||||
If True, return parameters and losses in addition to the calibrated
|
||||
denoising function
|
||||
|
||||
Returns
|
||||
-------
|
||||
best_denoise_function : function
|
||||
The optimal J-invariant version of `denoise_function`.
|
||||
|
||||
If `extra_output` is True, the following tuple is also returned:
|
||||
|
||||
(parameters_tested, losses) : tuple (list of dict, list of int)
|
||||
List of parameters tested for `denoise_function`, as a dictionary of
|
||||
kwargs
|
||||
Self-supervised loss for each set of parameters in `parameters_tested`.
|
||||
|
||||
|
||||
Notes
|
||||
-----
|
||||
|
||||
The calibration procedure uses a self-supervised mean-square-error loss
|
||||
to evaluate the performance of J-invariant versions of `denoise_function`.
|
||||
The minimizer of the self-supervised loss is also the minimizer of the
|
||||
ground-truth loss (i.e., the true MSE error) [1]. The returned function
|
||||
can be used on the original noisy image, or other images with similar
|
||||
characteristics.
|
||||
|
||||
Increasing the stride increases the performance of `best_denoise_function`
|
||||
at the expense of increasing its runtime. It has no effect on the runtime
|
||||
of the calibration.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] J. Batson & L. Royer. Noise2Self: Blind Denoising by Self-Supervision,
|
||||
International Conference on Machine Learning, p. 524-533 (2019).
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
>>> from skimage import color, data
|
||||
>>> from skimage.restoration import denoise_wavelet
|
||||
>>> import numpy as np
|
||||
>>> img = color.rgb2gray(data.astronaut()[:50, :50])
|
||||
>>> noisy = img + 0.5 * img.std() * np.random.randn(*img.shape)
|
||||
>>> parameters = {'sigma': np.arange(0.1, 0.4, 0.02)}
|
||||
>>> denoising_function = calibrate_denoiser(noisy, denoise_wavelet,
|
||||
... denoise_parameters=parameters)
|
||||
>>> denoised_img = denoising_function(img)
|
||||
|
||||
"""
|
||||
parameters_tested, losses = _calibrate_denoiser_search(
|
||||
image, denoise_function,
|
||||
denoise_parameters=denoise_parameters,
|
||||
stride=stride,
|
||||
approximate_loss=approximate_loss
|
||||
)
|
||||
|
||||
idx = np.argmin(losses)
|
||||
best_parameters = parameters_tested[idx]
|
||||
|
||||
best_denoise_function = functools.partial(
|
||||
_invariant_denoise,
|
||||
denoise_function=denoise_function,
|
||||
stride=stride,
|
||||
denoiser_kwargs=best_parameters,
|
||||
)
|
||||
|
||||
if extra_output:
|
||||
return best_denoise_function, (parameters_tested, losses)
|
||||
else:
|
||||
return best_denoise_function
|
||||
|
||||
|
||||
def _calibrate_denoiser_search(image, denoise_function, denoise_parameters, *,
|
||||
stride=4, approximate_loss=True):
|
||||
"""Return a parameter search history with losses for a denoise function.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray
|
||||
Input data to be denoised (converted using `img_as_float`).
|
||||
denoise_function : function
|
||||
Denoising function to be calibrated.
|
||||
denoise_parameters : dict of list
|
||||
Ranges of parameters for `denoise_function` to be calibrated over.
|
||||
stride : int, optional
|
||||
Stride used in masking procedure that converts `denoise_function`
|
||||
to J-invariance.
|
||||
approximate_loss : bool, optional
|
||||
Whether to approximate the self-supervised loss used to evaluate the
|
||||
denoiser by only computing it on one masked version of the image.
|
||||
If False, the runtime will be a factor of `stride**image.ndim` longer.
|
||||
|
||||
Returns
|
||||
-------
|
||||
parameters_tested : list of dict
|
||||
List of parameters tested for `denoise_function`, as a dictionary of
|
||||
kwargs.
|
||||
losses : list of int
|
||||
Self-supervised loss for each set of parameters in `parameters_tested`.
|
||||
"""
|
||||
image = img_as_float(image)
|
||||
parameters_tested = list(_product_from_dict(denoise_parameters))
|
||||
losses = []
|
||||
|
||||
for denoiser_kwargs in parameters_tested:
|
||||
if 'multichannel' in denoiser_kwargs:
|
||||
multichannel = denoiser_kwargs['multichannel']
|
||||
else:
|
||||
multichannel = False
|
||||
if not approximate_loss:
|
||||
denoised = _invariant_denoise(
|
||||
image, denoise_function,
|
||||
stride=stride,
|
||||
denoiser_kwargs=denoiser_kwargs
|
||||
)
|
||||
loss = mean_squared_error(image, denoised)
|
||||
else:
|
||||
spatialdims = image.ndim if not multichannel else image.ndim - 1
|
||||
n_masks = stride ** spatialdims
|
||||
mask = _generate_grid_slice(image.shape[:spatialdims],
|
||||
offset=n_masks // 2, stride=stride)
|
||||
|
||||
masked_denoised = _invariant_denoise(
|
||||
image, denoise_function,
|
||||
masks=[mask],
|
||||
denoiser_kwargs=denoiser_kwargs
|
||||
)
|
||||
|
||||
loss = mean_squared_error(image[mask], masked_denoised[mask])
|
||||
|
||||
losses.append(loss)
|
||||
|
||||
return parameters_tested, losses
|
164
venv/Lib/site-packages/skimage/restoration/non_local_means.py
Normal file
164
venv/Lib/site-packages/skimage/restoration/non_local_means.py
Normal file
|
@ -0,0 +1,164 @@
|
|||
import numpy as np
|
||||
from warnings import warn
|
||||
from .._shared.utils import convert_to_float
|
||||
from ._nl_means_denoising import (
|
||||
_nl_means_denoising_2d,
|
||||
_nl_means_denoising_3d,
|
||||
_fast_nl_means_denoising_2d,
|
||||
_fast_nl_means_denoising_3d)
|
||||
|
||||
|
||||
def denoise_nl_means(image, patch_size=7, patch_distance=11, h=0.1,
|
||||
multichannel=False, fast_mode=True, sigma=0., *,
|
||||
preserve_range=None):
|
||||
"""Perform non-local means denoising on 2-D or 3-D grayscale images, and
|
||||
2-D RGB images.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : 2D or 3D ndarray
|
||||
Input image to be denoised, which can be 2D or 3D, and grayscale
|
||||
or RGB (for 2D images only, see ``multichannel`` parameter).
|
||||
patch_size : int, optional
|
||||
Size of patches used for denoising.
|
||||
patch_distance : int, optional
|
||||
Maximal distance in pixels where to search patches used for denoising.
|
||||
h : float, optional
|
||||
Cut-off distance (in gray levels). The higher h, the more permissive
|
||||
one is in accepting patches. A higher h results in a smoother image,
|
||||
at the expense of blurring features. For a Gaussian noise of standard
|
||||
deviation sigma, a rule of thumb is to choose the value of h to be
|
||||
sigma of slightly less.
|
||||
multichannel : bool, optional
|
||||
Whether the last axis of the image is to be interpreted as multiple
|
||||
channels or another spatial dimension.
|
||||
fast_mode : bool, optional
|
||||
If True (default value), a fast version of the non-local means
|
||||
algorithm is used. If False, the original version of non-local means is
|
||||
used. See the Notes section for more details about the algorithms.
|
||||
sigma : float, optional
|
||||
The standard deviation of the (Gaussian) noise. If provided, a more
|
||||
robust computation of patch weights is computed that takes the expected
|
||||
noise variance into account (see Notes below).
|
||||
preserve_range : bool, optional
|
||||
Whether to keep the original range of values. Otherwise, the input
|
||||
image is converted according to the conventions of `img_as_float`.
|
||||
Also see https://scikit-image.org/docs/dev/user_guide/data_types.html
|
||||
|
||||
Returns
|
||||
-------
|
||||
result : ndarray
|
||||
Denoised image, of same shape as `image`.
|
||||
|
||||
Notes
|
||||
-----
|
||||
|
||||
The non-local means algorithm is well suited for denoising images with
|
||||
specific textures. The principle of the algorithm is to average the value
|
||||
of a given pixel with values of other pixels in a limited neighbourhood,
|
||||
provided that the *patches* centered on the other pixels are similar enough
|
||||
to the patch centered on the pixel of interest.
|
||||
|
||||
In the original version of the algorithm [1]_, corresponding to
|
||||
``fast=False``, the computational complexity is::
|
||||
|
||||
image.size * patch_size ** image.ndim * patch_distance ** image.ndim
|
||||
|
||||
Hence, changing the size of patches or their maximal distance has a
|
||||
strong effect on computing times, especially for 3-D images.
|
||||
|
||||
However, the default behavior corresponds to ``fast_mode=True``, for which
|
||||
another version of non-local means [2]_ is used, corresponding to a
|
||||
complexity of::
|
||||
|
||||
image.size * patch_distance ** image.ndim
|
||||
|
||||
The computing time depends only weakly on the patch size, thanks to
|
||||
the computation of the integral of patches distances for a given
|
||||
shift, that reduces the number of operations [1]_. Therefore, this
|
||||
algorithm executes faster than the classic algorithm
|
||||
(``fast_mode=False``), at the expense of using twice as much memory.
|
||||
This implementation has been proven to be more efficient compared to
|
||||
other alternatives, see e.g. [3]_.
|
||||
|
||||
Compared to the classic algorithm, all pixels of a patch contribute
|
||||
to the distance to another patch with the same weight, no matter
|
||||
their distance to the center of the patch. This coarser computation
|
||||
of the distance can result in a slightly poorer denoising
|
||||
performance. Moreover, for small images (images with a linear size
|
||||
that is only a few times the patch size), the classic algorithm can
|
||||
be faster due to boundary effects.
|
||||
|
||||
The image is padded using the `reflect` mode of `skimage.util.pad`
|
||||
before denoising.
|
||||
|
||||
If the noise standard deviation, `sigma`, is provided a more robust
|
||||
computation of patch weights is used. Subtracting the known noise variance
|
||||
from the computed patch distances improves the estimates of patch
|
||||
similarity, giving a moderate improvement to denoising performance [4]_.
|
||||
It was also mentioned as an option for the fast variant of the algorithm in
|
||||
[3]_.
|
||||
|
||||
When `sigma` is provided, a smaller `h` should typically be used to
|
||||
avoid oversmoothing. The optimal value for `h` depends on the image
|
||||
content and noise level, but a reasonable starting point is
|
||||
``h = 0.8 * sigma`` when `fast_mode` is `True`, or ``h = 0.6 * sigma`` when
|
||||
`fast_mode` is `False`.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] A. Buades, B. Coll, & J-M. Morel. A non-local algorithm for image
|
||||
denoising. In CVPR 2005, Vol. 2, pp. 60-65, IEEE.
|
||||
:DOI:`10.1109/CVPR.2005.38`
|
||||
|
||||
.. [2] J. Darbon, A. Cunha, T.F. Chan, S. Osher, and G.J. Jensen, Fast
|
||||
nonlocal filtering applied to electron cryomicroscopy, in 5th IEEE
|
||||
International Symposium on Biomedical Imaging: From Nano to Macro,
|
||||
2008, pp. 1331-1334.
|
||||
:DOI:`10.1109/ISBI.2008.4541250`
|
||||
|
||||
.. [3] Jacques Froment. Parameter-Free Fast Pixelwise Non-Local Means
|
||||
Denoising. Image Processing On Line, 2014, vol. 4, pp. 300-326.
|
||||
:DOI:`10.5201/ipol.2014.120`
|
||||
|
||||
.. [4] A. Buades, B. Coll, & J-M. Morel. Non-Local Means Denoising.
|
||||
Image Processing On Line, 2011, vol. 1, pp. 208-212.
|
||||
:DOI:`10.5201/ipol.2011.bcm_nlm`
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> a = np.zeros((40, 40))
|
||||
>>> a[10:-10, 10:-10] = 1.
|
||||
>>> a += 0.3 * np.random.randn(*a.shape)
|
||||
>>> denoised_a = denoise_nl_means(a, 7, 5, 0.1)
|
||||
|
||||
"""
|
||||
if image.ndim == 2:
|
||||
image = image[..., np.newaxis]
|
||||
multichannel = True
|
||||
if image.ndim != 3:
|
||||
raise NotImplementedError("Non-local means denoising is only \
|
||||
implemented for 2D grayscale and RGB images or 3-D grayscale images.")
|
||||
|
||||
if preserve_range is None and np.issubdtype(image.dtype, np.integer):
|
||||
warn('Image dtype is not float. By default denoise_nl_means will '
|
||||
'assume you want to preserve the range of your image '
|
||||
'(preserve_range=True). In scikit-image 0.19 this behavior will '
|
||||
'change to preserve_range=False. To avoid this warning, '
|
||||
'explicitly specify the preserve_range parameter.',
|
||||
stacklevel=2)
|
||||
preserve_range = True
|
||||
|
||||
image = convert_to_float(image, preserve_range)
|
||||
|
||||
kwargs = dict(s=patch_size, d=patch_distance, h=h, var=sigma * sigma)
|
||||
if multichannel: # 2-D images
|
||||
if fast_mode:
|
||||
return _fast_nl_means_denoising_2d(image, **kwargs)
|
||||
else:
|
||||
return _nl_means_denoising_2d(image, **kwargs)
|
||||
else: # 3-D grayscale
|
||||
if fast_mode:
|
||||
return _fast_nl_means_denoising_3d(image, **kwargs)
|
||||
else:
|
||||
return _nl_means_denoising_3d(image, **kwargs)
|
47
venv/Lib/site-packages/skimage/restoration/setup.py
Normal file
47
venv/Lib/site-packages/skimage/restoration/setup.py
Normal file
|
@ -0,0 +1,47 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
|
||||
from skimage._build import cython
|
||||
|
||||
base_path = os.path.abspath(os.path.dirname(__file__))
|
||||
|
||||
|
||||
def configuration(parent_package='', top_path=None):
|
||||
from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
|
||||
|
||||
config = Configuration('restoration', parent_package, top_path)
|
||||
|
||||
cython(['_unwrap_1d.pyx',
|
||||
'_unwrap_2d.pyx',
|
||||
'_unwrap_3d.pyx',
|
||||
'_denoise_cy.pyx',
|
||||
'_nl_means_denoising.pyx'], working_path=base_path)
|
||||
|
||||
config.add_extension('_unwrap_1d', sources=['_unwrap_1d.c'],
|
||||
include_dirs=[get_numpy_include_dirs()])
|
||||
unwrap_sources_2d = ['_unwrap_2d.c', 'unwrap_2d_ljmu.c']
|
||||
config.add_extension('_unwrap_2d', sources=unwrap_sources_2d,
|
||||
include_dirs=[get_numpy_include_dirs()])
|
||||
unwrap_sources_3d = ['_unwrap_3d.c', 'unwrap_3d_ljmu.c']
|
||||
config.add_extension('_unwrap_3d', sources=unwrap_sources_3d,
|
||||
include_dirs=[get_numpy_include_dirs()])
|
||||
config.add_extension('_denoise_cy', sources=['_denoise_cy.c'],
|
||||
include_dirs=[get_numpy_include_dirs()])
|
||||
config.add_extension('_nl_means_denoising',
|
||||
sources=['_nl_means_denoising.c'],
|
||||
include_dirs=[get_numpy_include_dirs(),
|
||||
'../_shared'])
|
||||
|
||||
return config
|
||||
|
||||
if __name__ == '__main__':
|
||||
from numpy.distutils.core import setup
|
||||
setup(maintainer='scikit-image Developers',
|
||||
author='scikit-image Developers',
|
||||
maintainer_email='scikit-image@python.org',
|
||||
description='Restoration',
|
||||
url='https://github.com/scikit-image/scikit-image',
|
||||
license='SciPy License (BSD Style)',
|
||||
**(configuration(top_path='').todict())
|
||||
)
|
|
@ -0,0 +1,9 @@
|
|||
from ..._shared.testing import setup_test, teardown_test
|
||||
|
||||
|
||||
def setup():
|
||||
setup_test()
|
||||
|
||||
|
||||
def teardown():
|
||||
teardown_test()
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
886
venv/Lib/site-packages/skimage/restoration/tests/test_denoise.py
Normal file
886
venv/Lib/site-packages/skimage/restoration/tests/test_denoise.py
Normal file
|
@ -0,0 +1,886 @@
|
|||
import itertools
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from skimage import restoration, data, color, img_as_float
|
||||
from skimage.metrics import structural_similarity
|
||||
from skimage.metrics import peak_signal_noise_ratio
|
||||
from skimage.restoration._denoise import _wavelet_threshold
|
||||
import pywt
|
||||
|
||||
from skimage._shared import testing
|
||||
from skimage._shared.testing import (assert_equal, assert_almost_equal,
|
||||
assert_warns, assert_)
|
||||
from skimage._shared._warnings import expected_warnings
|
||||
from distutils.version import LooseVersion as Version
|
||||
|
||||
|
||||
try:
|
||||
import dask
|
||||
except ImportError:
|
||||
DASK_NOT_INSTALLED_WARNING = 'The optional dask dependency is not installed'
|
||||
else:
|
||||
DASK_NOT_INSTALLED_WARNING = None
|
||||
|
||||
|
||||
np.random.seed(1234)
|
||||
|
||||
|
||||
astro = img_as_float(data.astronaut()[:128, :128])
|
||||
astro_gray = color.rgb2gray(astro)
|
||||
checkerboard_gray = img_as_float(data.checkerboard())
|
||||
checkerboard = color.gray2rgb(checkerboard_gray)
|
||||
# versions with one odd-sized dimension
|
||||
astro_gray_odd = astro_gray[:, :-1]
|
||||
astro_odd = astro[:, :-1]
|
||||
|
||||
|
||||
def test_denoise_tv_chambolle_2d():
|
||||
# astronaut image
|
||||
img = astro_gray.copy()
|
||||
# add noise to astronaut
|
||||
img += 0.5 * img.std() * np.random.rand(*img.shape)
|
||||
# clip noise so that it does not exceed allowed range for float images.
|
||||
img = np.clip(img, 0, 1)
|
||||
# denoise
|
||||
denoised_astro = restoration.denoise_tv_chambolle(img, weight=0.1)
|
||||
# which dtype?
|
||||
assert_(denoised_astro.dtype in [np.float, np.float32, np.float64])
|
||||
from scipy import ndimage as ndi
|
||||
grad = ndi.morphological_gradient(img, size=((3, 3)))
|
||||
grad_denoised = ndi.morphological_gradient(denoised_astro, size=((3, 3)))
|
||||
# test if the total variation has decreased
|
||||
assert_(grad_denoised.dtype == np.float)
|
||||
assert_(np.sqrt((grad_denoised**2).sum()) < np.sqrt((grad**2).sum()))
|
||||
|
||||
|
||||
def test_denoise_tv_chambolle_multichannel():
|
||||
denoised0 = restoration.denoise_tv_chambolle(astro[..., 0], weight=0.1)
|
||||
denoised = restoration.denoise_tv_chambolle(astro, weight=0.1,
|
||||
multichannel=True)
|
||||
assert_equal(denoised[..., 0], denoised0)
|
||||
|
||||
# tile astronaut subset to generate 3D+channels data
|
||||
astro3 = np.tile(astro[:64, :64, np.newaxis, :], [1, 1, 2, 1])
|
||||
# modify along tiled dimension to give non-zero gradient on 3rd axis
|
||||
astro3[:, :, 0, :] = 2*astro3[:, :, 0, :]
|
||||
denoised0 = restoration.denoise_tv_chambolle(astro3[..., 0], weight=0.1)
|
||||
denoised = restoration.denoise_tv_chambolle(astro3, weight=0.1,
|
||||
multichannel=True)
|
||||
assert_equal(denoised[..., 0], denoised0)
|
||||
|
||||
|
||||
def test_denoise_tv_chambolle_float_result_range():
|
||||
# astronaut image
|
||||
img = astro_gray
|
||||
int_astro = np.multiply(img, 255).astype(np.uint8)
|
||||
assert_(np.max(int_astro) > 1)
|
||||
denoised_int_astro = restoration.denoise_tv_chambolle(int_astro,
|
||||
weight=0.1)
|
||||
# test if the value range of output float data is within [0.0:1.0]
|
||||
assert_(denoised_int_astro.dtype == np.float)
|
||||
assert_(np.max(denoised_int_astro) <= 1.0)
|
||||
assert_(np.min(denoised_int_astro) >= 0.0)
|
||||
|
||||
|
||||
def test_denoise_tv_chambolle_3d():
|
||||
"""Apply the TV denoising algorithm on a 3D image representing a sphere."""
|
||||
x, y, z = np.ogrid[0:40, 0:40, 0:40]
|
||||
mask = (x - 22)**2 + (y - 20)**2 + (z - 17)**2 < 8**2
|
||||
mask = 100 * mask.astype(np.float)
|
||||
mask += 60
|
||||
mask += 20 * np.random.rand(*mask.shape)
|
||||
mask[mask < 0] = 0
|
||||
mask[mask > 255] = 255
|
||||
res = restoration.denoise_tv_chambolle(mask.astype(np.uint8), weight=0.1)
|
||||
assert_(res.dtype == np.float)
|
||||
assert_(res.std() * 255 < mask.std())
|
||||
|
||||
|
||||
def test_denoise_tv_chambolle_1d():
|
||||
"""Apply the TV denoising algorithm on a 1D sinusoid."""
|
||||
x = 125 + 100*np.sin(np.linspace(0, 8*np.pi, 1000))
|
||||
x += 20 * np.random.rand(x.size)
|
||||
x = np.clip(x, 0, 255)
|
||||
res = restoration.denoise_tv_chambolle(x.astype(np.uint8), weight=0.1)
|
||||
assert_(res.dtype == np.float)
|
||||
assert_(res.std() * 255 < x.std())
|
||||
|
||||
|
||||
def test_denoise_tv_chambolle_4d():
|
||||
""" TV denoising for a 4D input."""
|
||||
im = 255 * np.random.rand(8, 8, 8, 8)
|
||||
res = restoration.denoise_tv_chambolle(im.astype(np.uint8), weight=0.1)
|
||||
assert_(res.dtype == np.float)
|
||||
assert_(res.std() * 255 < im.std())
|
||||
|
||||
|
||||
def test_denoise_tv_chambolle_weighting():
|
||||
# make sure a specified weight gives consistent results regardless of
|
||||
# the number of input image dimensions
|
||||
rstate = np.random.RandomState(1234)
|
||||
img2d = astro_gray.copy()
|
||||
img2d += 0.15 * rstate.standard_normal(img2d.shape)
|
||||
img2d = np.clip(img2d, 0, 1)
|
||||
|
||||
# generate 4D image by tiling
|
||||
img4d = np.tile(img2d[..., None, None], (1, 1, 2, 2))
|
||||
|
||||
w = 0.2
|
||||
denoised_2d = restoration.denoise_tv_chambolle(img2d, weight=w)
|
||||
denoised_4d = restoration.denoise_tv_chambolle(img4d, weight=w)
|
||||
assert_(structural_similarity(denoised_2d,
|
||||
denoised_4d[:, :, 0, 0]) > 0.99)
|
||||
|
||||
|
||||
def test_denoise_tv_bregman_2d():
|
||||
img = checkerboard_gray.copy()
|
||||
# add some random noise
|
||||
img += 0.5 * img.std() * np.random.rand(*img.shape)
|
||||
img = np.clip(img, 0, 1)
|
||||
|
||||
out1 = restoration.denoise_tv_bregman(img, weight=10)
|
||||
out2 = restoration.denoise_tv_bregman(img, weight=5)
|
||||
|
||||
# make sure noise is reduced in the checkerboard cells
|
||||
assert_(img[30:45, 5:15].std() > out1[30:45, 5:15].std())
|
||||
assert_(out1[30:45, 5:15].std() > out2[30:45, 5:15].std())
|
||||
|
||||
|
||||
def test_denoise_tv_bregman_float_result_range():
|
||||
# astronaut image
|
||||
img = astro_gray.copy()
|
||||
int_astro = np.multiply(img, 255).astype(np.uint8)
|
||||
assert_(np.max(int_astro) > 1)
|
||||
denoised_int_astro = restoration.denoise_tv_bregman(int_astro, weight=60.0)
|
||||
# test if the value range of output float data is within [0.0:1.0]
|
||||
assert_(denoised_int_astro.dtype == np.float)
|
||||
assert_(np.max(denoised_int_astro) <= 1.0)
|
||||
assert_(np.min(denoised_int_astro) >= 0.0)
|
||||
|
||||
|
||||
def test_denoise_tv_bregman_3d():
|
||||
img = checkerboard.copy()
|
||||
# add some random noise
|
||||
img += 0.5 * img.std() * np.random.rand(*img.shape)
|
||||
img = np.clip(img, 0, 1)
|
||||
|
||||
out1 = restoration.denoise_tv_bregman(img, weight=10)
|
||||
out2 = restoration.denoise_tv_bregman(img, weight=5)
|
||||
|
||||
# make sure noise is reduced in the checkerboard cells
|
||||
assert_(img[30:45, 5:15].std() > out1[30:45, 5:15].std())
|
||||
assert_(out1[30:45, 5:15].std() > out2[30:45, 5:15].std())
|
||||
|
||||
|
||||
def test_denoise_tv_bregman_3d_multichannel():
|
||||
img_astro = astro.copy()
|
||||
denoised0 = restoration.denoise_tv_bregman(img_astro[..., 0], weight=60.0)
|
||||
denoised = restoration.denoise_tv_bregman(img_astro, weight=60.0,
|
||||
multichannel=True)
|
||||
|
||||
assert_equal(denoised0, denoised[..., 0])
|
||||
|
||||
|
||||
def test_denoise_tv_bregman_multichannel():
|
||||
img = checkerboard_gray.copy()[:50, :50]
|
||||
# add some random noise
|
||||
img += 0.5 * img.std() * np.random.rand(*img.shape)
|
||||
img = np.clip(img, 0, 1)
|
||||
|
||||
out1 = restoration.denoise_tv_bregman(img, weight=60.0)
|
||||
out2 = restoration.denoise_tv_bregman(img, weight=60.0, multichannel=True)
|
||||
|
||||
assert_equal(out1, out2)
|
||||
|
||||
|
||||
def test_denoise_bilateral_2d():
|
||||
img = checkerboard_gray.copy()[:50, :50]
|
||||
# add some random noise
|
||||
img += 0.5 * img.std() * np.random.rand(*img.shape)
|
||||
img = np.clip(img, 0, 1)
|
||||
|
||||
out1 = restoration.denoise_bilateral(img, sigma_color=0.1,
|
||||
sigma_spatial=10, multichannel=False)
|
||||
out2 = restoration.denoise_bilateral(img, sigma_color=0.2,
|
||||
sigma_spatial=20, multichannel=False)
|
||||
|
||||
# make sure noise is reduced in the checkerboard cells
|
||||
assert_(img[30:45, 5:15].std() > out1[30:45, 5:15].std())
|
||||
assert_(out1[30:45, 5:15].std() > out2[30:45, 5:15].std())
|
||||
|
||||
|
||||
def test_denoise_bilateral_pad():
|
||||
"""This test checks if the bilateral filter is returning an image
|
||||
correctly padded."""
|
||||
img = img_as_float(data.chelsea())[100:200, 100:200]
|
||||
img_bil = restoration.denoise_bilateral(img, sigma_color=0.1,
|
||||
sigma_spatial=10,
|
||||
multichannel=True)
|
||||
condition_padding = np.count_nonzero(np.isclose(img_bil,
|
||||
0,
|
||||
atol=0.001))
|
||||
assert_equal(condition_padding, 0)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('dtype', [np.float32, np.double])
|
||||
def test_denoise_bilateral_types(dtype):
|
||||
img = checkerboard_gray.copy()[:50, :50]
|
||||
# add some random noise
|
||||
img += 0.5 * img.std() * np.random.rand(*img.shape)
|
||||
img = np.clip(img, 0, 1).astype(dtype)
|
||||
|
||||
# check that we can process multiple float types
|
||||
out = restoration.denoise_bilateral(img, sigma_color=0.1,
|
||||
sigma_spatial=10, multichannel=False)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('dtype', [np.float32, np.double])
|
||||
def test_denoise_bregman_types(dtype):
|
||||
img = checkerboard_gray.copy()[:50, :50]
|
||||
# add some random noise
|
||||
img += 0.5 * img.std() * np.random.rand(*img.shape)
|
||||
img = np.clip(img, 0, 1).astype(dtype)
|
||||
|
||||
# check that we can process multiple float types
|
||||
out = restoration.denoise_tv_bregman(img, weight=5)
|
||||
|
||||
|
||||
def test_denoise_bilateral_zeros():
|
||||
img = np.zeros((10, 10))
|
||||
assert_equal(img, restoration.denoise_bilateral(img, multichannel=False))
|
||||
|
||||
|
||||
def test_denoise_bilateral_constant():
|
||||
img = np.ones((10, 10)) * 5
|
||||
assert_equal(img, restoration.denoise_bilateral(img, multichannel=False))
|
||||
|
||||
|
||||
def test_denoise_bilateral_color():
|
||||
img = checkerboard.copy()[:50, :50]
|
||||
# add some random noise
|
||||
img += 0.5 * img.std() * np.random.rand(*img.shape)
|
||||
img = np.clip(img, 0, 1)
|
||||
|
||||
out1 = restoration.denoise_bilateral(img, sigma_color=0.1,
|
||||
sigma_spatial=10, multichannel=True)
|
||||
out2 = restoration.denoise_bilateral(img, sigma_color=0.2,
|
||||
sigma_spatial=20, multichannel=True)
|
||||
|
||||
# make sure noise is reduced in the checkerboard cells
|
||||
assert_(img[30:45, 5:15].std() > out1[30:45, 5:15].std())
|
||||
assert_(out1[30:45, 5:15].std() > out2[30:45, 5:15].std())
|
||||
|
||||
|
||||
def test_denoise_bilateral_3d_grayscale():
|
||||
img = np.ones((50, 50, 3))
|
||||
with testing.raises(ValueError):
|
||||
restoration.denoise_bilateral(img, multichannel=False)
|
||||
|
||||
|
||||
def test_denoise_bilateral_3d_multichannel():
|
||||
img = np.ones((50, 50, 50))
|
||||
with expected_warnings(["grayscale"]):
|
||||
result = restoration.denoise_bilateral(img, multichannel=True)
|
||||
|
||||
assert_equal(result, img)
|
||||
|
||||
|
||||
def test_denoise_bilateral_multidimensional():
|
||||
img = np.ones((10, 10, 10, 10))
|
||||
with testing.raises(ValueError):
|
||||
restoration.denoise_bilateral(img, multichannel=False)
|
||||
with testing.raises(ValueError):
|
||||
restoration.denoise_bilateral(img, multichannel=True)
|
||||
|
||||
|
||||
def test_denoise_bilateral_nan():
|
||||
img = np.full((50, 50), np.NaN)
|
||||
# This is in fact an optional warning for our test suite.
|
||||
# Python 3.5 will not trigger a warning.
|
||||
with expected_warnings([r'invalid|\A\Z']):
|
||||
out = restoration.denoise_bilateral(img, multichannel=False)
|
||||
assert_equal(img, out)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('fast_mode', [False, True])
|
||||
def test_denoise_nl_means_2d(fast_mode):
|
||||
img = np.zeros((40, 40))
|
||||
img[10:-10, 10:-10] = 1.
|
||||
sigma = 0.3
|
||||
img += sigma * np.random.randn(*img.shape)
|
||||
img_f32 = img.astype('float32')
|
||||
for s in [sigma, 0]:
|
||||
denoised = restoration.denoise_nl_means(img, 7, 5, 0.2,
|
||||
fast_mode=fast_mode,
|
||||
multichannel=False,
|
||||
sigma=s)
|
||||
# make sure noise is reduced
|
||||
assert_(img.std() > denoised.std())
|
||||
|
||||
denoised_f32 = restoration.denoise_nl_means(img_f32, 7, 5, 0.2,
|
||||
fast_mode=fast_mode,
|
||||
multichannel=False,
|
||||
sigma=s)
|
||||
# make sure noise is reduced
|
||||
assert_(img.std() > denoised_f32.std())
|
||||
|
||||
# Sheck single precision result
|
||||
assert np.allclose(denoised_f32, denoised, atol=1e-2)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('fast_mode', [False, True])
|
||||
@pytest.mark.parametrize('n_channels', [2, 3, 6])
|
||||
@pytest.mark.parametrize('dtype', ['float64', 'float32'])
|
||||
def test_denoise_nl_means_2d_multichannel(fast_mode, n_channels, dtype):
|
||||
# reduce image size because nl means is slow
|
||||
img = np.copy(astro[:50, :50])
|
||||
img = np.concatenate((img, ) * 2, ) # 6 channels
|
||||
img = img.astype(dtype)
|
||||
|
||||
# add some random noise
|
||||
sigma = 0.1
|
||||
imgn = img + sigma * np.random.standard_normal(img.shape)
|
||||
imgn = np.clip(imgn, 0, 1)
|
||||
imgn = imgn.astype(dtype)
|
||||
|
||||
for s in [sigma, 0]:
|
||||
psnr_noisy = peak_signal_noise_ratio(
|
||||
img[..., :n_channels], imgn[..., :n_channels])
|
||||
denoised = restoration.denoise_nl_means(imgn[..., :n_channels],
|
||||
3, 5, h=0.75 * sigma,
|
||||
fast_mode=fast_mode,
|
||||
multichannel=True,
|
||||
sigma=s)
|
||||
psnr_denoised = peak_signal_noise_ratio(
|
||||
denoised[..., :n_channels], img[..., :n_channels])
|
||||
|
||||
# make sure noise is reduced
|
||||
assert_(psnr_denoised > psnr_noisy)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('fast_mode', [False, True])
|
||||
@pytest.mark.parametrize('dtype', ['float64', 'float32'])
|
||||
def test_denoise_nl_means_3d(fast_mode, dtype):
|
||||
img = np.zeros((12, 12, 8), dtype=dtype)
|
||||
img[5:-5, 5:-5, 2:-2] = 1.
|
||||
sigma = 0.3
|
||||
imgn = img + sigma * np.random.randn(*img.shape)
|
||||
imgn = imgn.astype(dtype)
|
||||
psnr_noisy = peak_signal_noise_ratio(img, imgn)
|
||||
for s in [sigma, 0]:
|
||||
denoised = restoration.denoise_nl_means(imgn, 3, 4, h=0.75 * sigma,
|
||||
fast_mode=fast_mode,
|
||||
multichannel=False, sigma=s)
|
||||
# make sure noise is reduced
|
||||
assert_(peak_signal_noise_ratio(img, denoised) > psnr_noisy)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('fast_mode', [False, True])
|
||||
@pytest.mark.parametrize('dtype', ['float64', 'float32'])
|
||||
def test_denoise_nl_means_multichannel(fast_mode, dtype):
|
||||
# for true 3D data, 3D denoising is better than denoising as 2D+channels
|
||||
img = np.zeros((13, 10, 8), dtype=dtype)
|
||||
img[6, 4:6, 2:-2] = 1.
|
||||
sigma = 0.3
|
||||
imgn = img + sigma * np.random.randn(*img.shape)
|
||||
imgn = imgn.astype(dtype)
|
||||
denoised_wrong_multichannel = restoration.denoise_nl_means(
|
||||
imgn, 3, 4, 0.6 * sigma, fast_mode=fast_mode, multichannel=True)
|
||||
denoised_ok_multichannel = restoration.denoise_nl_means(
|
||||
imgn, 3, 4, 0.6 * sigma, fast_mode=fast_mode, multichannel=False)
|
||||
psnr_wrong = peak_signal_noise_ratio(img, denoised_wrong_multichannel)
|
||||
psnr_ok = peak_signal_noise_ratio(img, denoised_ok_multichannel)
|
||||
assert_(psnr_ok > psnr_wrong)
|
||||
|
||||
|
||||
def test_denoise_nl_means_wrong_dimension():
|
||||
img = np.zeros((5, 5, 5, 5))
|
||||
with testing.raises(NotImplementedError):
|
||||
restoration.denoise_nl_means(img, multichannel=True)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('fast_mode', [False, True])
|
||||
@pytest.mark.parametrize('dtype', ['float64', 'float32'])
|
||||
def test_no_denoising_for_small_h(fast_mode, dtype):
|
||||
img = np.zeros((40, 40))
|
||||
img[10:-10, 10:-10] = 1.
|
||||
img += 0.3*np.random.randn(*img.shape)
|
||||
img = img.astype(dtype)
|
||||
# very small h should result in no averaging with other patches
|
||||
denoised = restoration.denoise_nl_means(img, 7, 5, 0.01,
|
||||
fast_mode=fast_mode,
|
||||
multichannel=False)
|
||||
assert_(np.allclose(denoised, img))
|
||||
denoised = restoration.denoise_nl_means(img, 7, 5, 0.01,
|
||||
fast_mode=fast_mode,
|
||||
multichannel=False)
|
||||
assert_(np.allclose(denoised, img))
|
||||
|
||||
|
||||
@pytest.mark.parametrize('fast_mode', [False, True])
|
||||
def test_denoise_nl_means_2d_dtype(fast_mode):
|
||||
img = np.zeros((40, 40), dtype=int)
|
||||
img_f32 = img.astype('float32')
|
||||
img_f64 = img.astype('float64')
|
||||
|
||||
with expected_warnings(['Image dtype is not float']):
|
||||
assert restoration.denoise_nl_means(
|
||||
img, fast_mode=fast_mode).dtype == 'float64'
|
||||
|
||||
assert restoration.denoise_nl_means(
|
||||
img_f32, fast_mode=fast_mode).dtype == img_f32.dtype
|
||||
|
||||
assert restoration.denoise_nl_means(
|
||||
img_f64, fast_mode=fast_mode).dtype == img_f64.dtype
|
||||
|
||||
|
||||
@pytest.mark.parametrize('fast_mode', [False, True])
|
||||
def test_denoise_nl_means_3d_dtype(fast_mode):
|
||||
img = np.zeros((12, 12, 8), dtype=int)
|
||||
img_f32 = img.astype('float32')
|
||||
img_f64 = img.astype('float64')
|
||||
|
||||
with expected_warnings(['Image dtype is not float']):
|
||||
assert restoration.denoise_nl_means(
|
||||
img, patch_distance=2, fast_mode=fast_mode).dtype == 'float64'
|
||||
|
||||
assert restoration.denoise_nl_means(
|
||||
img_f32, patch_distance=2, fast_mode=fast_mode).dtype == img_f32.dtype
|
||||
|
||||
assert restoration.denoise_nl_means(
|
||||
img_f64, patch_distance=2, fast_mode=fast_mode).dtype == img_f64.dtype
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'img, multichannel, convert2ycbcr',
|
||||
[(astro_gray, False, False),
|
||||
(astro_gray_odd, False, False),
|
||||
(astro_odd, True, False),
|
||||
(astro_odd, True, True)]
|
||||
)
|
||||
def test_wavelet_denoising(img, multichannel, convert2ycbcr):
|
||||
rstate = np.random.RandomState(1234)
|
||||
sigma = 0.1
|
||||
noisy = img + sigma * rstate.randn(*(img.shape))
|
||||
noisy = np.clip(noisy, 0, 1)
|
||||
|
||||
# Verify that SNR is improved when true sigma is used
|
||||
denoised = restoration.denoise_wavelet(noisy, sigma=sigma,
|
||||
multichannel=multichannel,
|
||||
convert2ycbcr=convert2ycbcr,
|
||||
rescale_sigma=True)
|
||||
psnr_noisy = peak_signal_noise_ratio(img, noisy)
|
||||
psnr_denoised = peak_signal_noise_ratio(img, denoised)
|
||||
assert_(psnr_denoised > psnr_noisy)
|
||||
|
||||
# Verify that SNR is improved with internally estimated sigma
|
||||
denoised = restoration.denoise_wavelet(noisy,
|
||||
multichannel=multichannel,
|
||||
convert2ycbcr=convert2ycbcr,
|
||||
rescale_sigma=True)
|
||||
psnr_noisy = peak_signal_noise_ratio(img, noisy)
|
||||
psnr_denoised = peak_signal_noise_ratio(img, denoised)
|
||||
assert_(psnr_denoised > psnr_noisy)
|
||||
|
||||
# SNR is improved less with 1 wavelet level than with the default.
|
||||
denoised_1 = restoration.denoise_wavelet(noisy,
|
||||
multichannel=multichannel,
|
||||
wavelet_levels=1,
|
||||
convert2ycbcr=convert2ycbcr,
|
||||
rescale_sigma=True)
|
||||
psnr_denoised_1 = peak_signal_noise_ratio(img, denoised_1)
|
||||
assert_(psnr_denoised > psnr_denoised_1)
|
||||
assert_(psnr_denoised_1 > psnr_noisy)
|
||||
|
||||
# Test changing noise_std (higher threshold, so less energy in signal)
|
||||
res1 = restoration.denoise_wavelet(noisy, sigma=2 * sigma,
|
||||
multichannel=multichannel,
|
||||
rescale_sigma=True)
|
||||
res2 = restoration.denoise_wavelet(noisy, sigma=sigma,
|
||||
multichannel=multichannel,
|
||||
rescale_sigma=True)
|
||||
assert_(np.sum(res1**2) <= np.sum(res2**2))
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'case, dtype, convert2ycbcr, estimate_sigma',
|
||||
itertools.product(
|
||||
['1d', '2d multichannel'],
|
||||
[np.float16, np.float32, np.float64, np.int16, np.uint8],
|
||||
[True, False],
|
||||
[True, False])
|
||||
)
|
||||
def test_wavelet_denoising_scaling(case, dtype, convert2ycbcr,
|
||||
estimate_sigma):
|
||||
"""Test cases for images without prescaling via img_as_float."""
|
||||
rstate = np.random.RandomState(1234)
|
||||
|
||||
if case == '1d':
|
||||
# 1D single-channel in range [0, 255]
|
||||
x = np.linspace(0, 255, 1024)
|
||||
elif case == '2d multichannel':
|
||||
# 2D multichannel in range [0, 255]
|
||||
x = data.astronaut()[:64, :64]
|
||||
x = x.astype(dtype)
|
||||
|
||||
# add noise and clip to original signal range
|
||||
sigma = 25.
|
||||
noisy = x + sigma * rstate.randn(*x.shape)
|
||||
noisy = np.clip(noisy, x.min(), x.max())
|
||||
noisy = noisy.astype(x.dtype)
|
||||
|
||||
multichannel = x.shape[-1] == 3
|
||||
|
||||
if estimate_sigma:
|
||||
sigma_est = restoration.estimate_sigma(noisy,
|
||||
multichannel=multichannel)
|
||||
else:
|
||||
sigma_est = None
|
||||
|
||||
if convert2ycbcr and not multichannel:
|
||||
# YCbCr requires multichannel == True
|
||||
with testing.raises(ValueError):
|
||||
denoised = restoration.denoise_wavelet(noisy,
|
||||
sigma=sigma_est,
|
||||
wavelet='sym4',
|
||||
multichannel=multichannel,
|
||||
convert2ycbcr=convert2ycbcr,
|
||||
rescale_sigma=True)
|
||||
return
|
||||
|
||||
denoised = restoration.denoise_wavelet(noisy, sigma=sigma_est,
|
||||
wavelet='sym4',
|
||||
multichannel=multichannel,
|
||||
convert2ycbcr=convert2ycbcr,
|
||||
rescale_sigma=True)
|
||||
|
||||
data_range = x.max() - x.min()
|
||||
psnr_noisy = peak_signal_noise_ratio(x, noisy, data_range=data_range)
|
||||
clipped = np.dtype(dtype).kind != 'f'
|
||||
if not clipped:
|
||||
psnr_denoised = peak_signal_noise_ratio(x, denoised,
|
||||
data_range=data_range)
|
||||
|
||||
# output's max value is not substantially smaller than x's
|
||||
assert_(denoised.max() > 0.9 * x.max())
|
||||
else:
|
||||
# have to compare to x_as_float in integer input cases
|
||||
x_as_float = img_as_float(x)
|
||||
f_data_range = x_as_float.max() - x_as_float.min()
|
||||
psnr_denoised = peak_signal_noise_ratio(x_as_float, denoised,
|
||||
data_range=f_data_range)
|
||||
|
||||
# output has been clipped to expected range
|
||||
assert_(denoised.max() <= 1.0)
|
||||
if np.dtype(dtype).kind == 'u':
|
||||
assert_(denoised.min() >= 0)
|
||||
else:
|
||||
assert_(denoised.min() >= -1)
|
||||
|
||||
assert_(psnr_denoised > psnr_noisy)
|
||||
|
||||
|
||||
def test_wavelet_threshold():
|
||||
rstate = np.random.RandomState(1234)
|
||||
|
||||
img = astro_gray
|
||||
sigma = 0.1
|
||||
noisy = img + sigma * rstate.randn(*(img.shape))
|
||||
noisy = np.clip(noisy, 0, 1)
|
||||
|
||||
# employ a single, user-specified threshold instead of BayesShrink sigmas
|
||||
denoised = _wavelet_threshold(noisy, wavelet='db1', method=None,
|
||||
threshold=sigma)
|
||||
psnr_noisy = peak_signal_noise_ratio(img, noisy)
|
||||
psnr_denoised = peak_signal_noise_ratio(img, denoised)
|
||||
assert_(psnr_denoised > psnr_noisy)
|
||||
|
||||
# either method or threshold must be defined
|
||||
with testing.raises(ValueError):
|
||||
_wavelet_threshold(noisy, wavelet='db1', method=None, threshold=None)
|
||||
|
||||
# warns if a threshold is provided in a case where it would be ignored
|
||||
with expected_warnings(["Thresholding method ",]):
|
||||
_wavelet_threshold(noisy, wavelet='db1', method='BayesShrink',
|
||||
threshold=sigma)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'rescale_sigma, method, ndim',
|
||||
itertools.product(
|
||||
[True, False],
|
||||
['VisuShrink', 'BayesShrink'],
|
||||
range(1, 5)
|
||||
)
|
||||
)
|
||||
def test_wavelet_denoising_nd(rescale_sigma, method, ndim):
|
||||
rstate = np.random.RandomState(1234)
|
||||
# Generate a very simple test image
|
||||
if ndim < 3:
|
||||
img = 0.2*np.ones((128, )*ndim)
|
||||
else:
|
||||
img = 0.2*np.ones((16, )*ndim)
|
||||
img[(slice(5, 13), ) * ndim] = 0.8
|
||||
|
||||
sigma = 0.1
|
||||
noisy = img + sigma * rstate.randn(*(img.shape))
|
||||
noisy = np.clip(noisy, 0, 1)
|
||||
|
||||
# Mark H. 2018.08:
|
||||
# The issue arises because when ndim in [1, 2]
|
||||
# ``waverecn`` calls ``_match_coeff_dims``
|
||||
# Which includes a numpy 1.15 deprecation.
|
||||
# for larger number of dimensions _match_coeff_dims isn't called
|
||||
# for some reason.
|
||||
# Verify that SNR is improved with internally estimated sigma
|
||||
denoised = restoration.denoise_wavelet(
|
||||
noisy, method=method,
|
||||
rescale_sigma=rescale_sigma)
|
||||
psnr_noisy = peak_signal_noise_ratio(img, noisy)
|
||||
psnr_denoised = peak_signal_noise_ratio(img, denoised)
|
||||
assert_(psnr_denoised > psnr_noisy)
|
||||
|
||||
|
||||
def test_wavelet_invalid_method():
|
||||
with testing.raises(ValueError):
|
||||
restoration.denoise_wavelet(np.ones(16), method='Unimplemented',
|
||||
rescale_sigma=True)
|
||||
|
||||
|
||||
def test_wavelet_rescale_sigma_deprecation():
|
||||
# No specifying rescale_sigma results in a DeprecationWarning
|
||||
assert_warns(FutureWarning, restoration.denoise_wavelet, np.ones(16))
|
||||
|
||||
|
||||
@pytest.mark.parametrize('rescale_sigma', [True, False])
|
||||
def test_wavelet_denoising_levels(rescale_sigma):
|
||||
rstate = np.random.RandomState(1234)
|
||||
ndim = 2
|
||||
N = 256
|
||||
wavelet = 'db1'
|
||||
# Generate a very simple test image
|
||||
img = 0.2*np.ones((N, )*ndim)
|
||||
img[(slice(5, 13), ) * ndim] = 0.8
|
||||
|
||||
sigma = 0.1
|
||||
noisy = img + sigma * rstate.randn(*(img.shape))
|
||||
noisy = np.clip(noisy, 0, 1)
|
||||
|
||||
denoised = restoration.denoise_wavelet(noisy, wavelet=wavelet,
|
||||
rescale_sigma=rescale_sigma)
|
||||
denoised_1 = restoration.denoise_wavelet(noisy, wavelet=wavelet,
|
||||
wavelet_levels=1,
|
||||
rescale_sigma=rescale_sigma)
|
||||
psnr_noisy = peak_signal_noise_ratio(img, noisy)
|
||||
psnr_denoised = peak_signal_noise_ratio(img, denoised)
|
||||
psnr_denoised_1 = peak_signal_noise_ratio(img, denoised_1)
|
||||
|
||||
# multi-level case should outperform single level case
|
||||
assert_(psnr_denoised > psnr_denoised_1 > psnr_noisy)
|
||||
|
||||
# invalid number of wavelet levels results in a ValueError or UserWarning
|
||||
max_level = pywt.dwt_max_level(np.min(img.shape),
|
||||
pywt.Wavelet(wavelet).dec_len)
|
||||
# exceeding max_level raises a UserWarning in PyWavelets >= 1.0.0
|
||||
with expected_warnings([
|
||||
'all coefficients will experience boundary effects']):
|
||||
restoration.denoise_wavelet(
|
||||
noisy, wavelet=wavelet, wavelet_levels=max_level + 1,
|
||||
rescale_sigma=rescale_sigma)
|
||||
|
||||
with testing.raises(ValueError):
|
||||
restoration.denoise_wavelet(
|
||||
noisy,
|
||||
wavelet=wavelet, wavelet_levels=-1,
|
||||
rescale_sigma=rescale_sigma)
|
||||
|
||||
|
||||
def test_estimate_sigma_gray():
|
||||
rstate = np.random.RandomState(1234)
|
||||
# astronaut image
|
||||
img = astro_gray.copy()
|
||||
sigma = 0.1
|
||||
# add noise to astronaut
|
||||
img += sigma * rstate.standard_normal(img.shape)
|
||||
|
||||
sigma_est = restoration.estimate_sigma(img, multichannel=False)
|
||||
assert_almost_equal(sigma, sigma_est, decimal=2)
|
||||
|
||||
|
||||
def test_estimate_sigma_masked_image():
|
||||
# Verify computation on an image with a large, noise-free border.
|
||||
# (zero regions will be masked out by _sigma_est_dwt to avoid returning
|
||||
# sigma = 0)
|
||||
rstate = np.random.RandomState(1234)
|
||||
# uniform image
|
||||
img = np.zeros((128, 128))
|
||||
center_roi = (slice(32, 96), slice(32, 96))
|
||||
img[center_roi] = 0.8
|
||||
sigma = 0.1
|
||||
|
||||
img[center_roi] = sigma * rstate.standard_normal(img[center_roi].shape)
|
||||
|
||||
sigma_est = restoration.estimate_sigma(img, multichannel=False)
|
||||
assert_almost_equal(sigma, sigma_est, decimal=1)
|
||||
|
||||
|
||||
def test_estimate_sigma_color():
|
||||
rstate = np.random.RandomState(1234)
|
||||
# astronaut image
|
||||
img = astro.copy()
|
||||
sigma = 0.1
|
||||
# add noise to astronaut
|
||||
img += sigma * rstate.standard_normal(img.shape)
|
||||
|
||||
sigma_est = restoration.estimate_sigma(img, multichannel=True,
|
||||
average_sigmas=True)
|
||||
assert_almost_equal(sigma, sigma_est, decimal=2)
|
||||
|
||||
sigma_list = restoration.estimate_sigma(img, multichannel=True,
|
||||
average_sigmas=False)
|
||||
assert_equal(len(sigma_list), img.shape[-1])
|
||||
assert_almost_equal(sigma_list[0], sigma_est, decimal=2)
|
||||
|
||||
# default multichannel=False should raise a warning about last axis size
|
||||
assert_warns(UserWarning, restoration.estimate_sigma, img)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('rescale_sigma', [True, False])
|
||||
def test_wavelet_denoising_args(rescale_sigma):
|
||||
"""
|
||||
Some of the functions inside wavelet denoising throw an error the wrong
|
||||
arguments are passed. This protects against that and verifies that all
|
||||
arguments can be passed.
|
||||
"""
|
||||
img = astro
|
||||
noisy = img.copy() + 0.1 * np.random.randn(*(img.shape))
|
||||
|
||||
for convert2ycbcr in [True, False]:
|
||||
for multichannel in [True, False]:
|
||||
if convert2ycbcr and not multichannel:
|
||||
with testing.raises(ValueError):
|
||||
restoration.denoise_wavelet(noisy,
|
||||
convert2ycbcr=convert2ycbcr,
|
||||
multichannel=multichannel,
|
||||
rescale_sigma=rescale_sigma)
|
||||
continue
|
||||
for sigma in [0.1, [0.1, 0.1, 0.1], None]:
|
||||
if (not multichannel and not convert2ycbcr) or \
|
||||
(isinstance(sigma, list) and not multichannel):
|
||||
continue
|
||||
restoration.denoise_wavelet(noisy, sigma=sigma,
|
||||
convert2ycbcr=convert2ycbcr,
|
||||
multichannel=multichannel,
|
||||
rescale_sigma=rescale_sigma)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('rescale_sigma', [True, False])
|
||||
def test_denoise_wavelet_biorthogonal(rescale_sigma):
|
||||
"""Biorthogonal wavelets should raise a warning during thresholding."""
|
||||
img = astro_gray
|
||||
assert_warns(UserWarning, restoration.denoise_wavelet, img,
|
||||
wavelet='bior2.2', multichannel=False,
|
||||
rescale_sigma=rescale_sigma)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('rescale_sigma', [True, False])
|
||||
def test_cycle_spinning_multichannel(rescale_sigma):
|
||||
sigma = 0.1
|
||||
rstate = np.random.RandomState(1234)
|
||||
|
||||
for multichannel in True, False:
|
||||
if multichannel:
|
||||
img = astro
|
||||
# can either omit or be 0 along the channels axis
|
||||
valid_shifts = [1, (0, 1), (1, 0), (1, 1), (1, 1, 0)]
|
||||
# can either omit or be 1 on channels axis.
|
||||
valid_steps = [1, 2, (1, 2), (1, 2, 1)]
|
||||
# too few or too many shifts or non-zero shift on channels
|
||||
invalid_shifts = [(1, 1, 2), (1, ), (1, 1, 0, 1)]
|
||||
# too few or too many shifts or any shifts <= 0
|
||||
invalid_steps = [(1, ), (1, 1, 1, 1), (0, 1), (-1, -1)]
|
||||
else:
|
||||
img = astro_gray
|
||||
valid_shifts = [1, (0, 1), (1, 0), (1, 1)]
|
||||
valid_steps = [1, 2, (1, 2)]
|
||||
invalid_shifts = [(1, 1, 2), (1, )]
|
||||
invalid_steps = [(1, ), (1, 1, 1), (0, 1), (-1, -1)]
|
||||
|
||||
noisy = img.copy() + 0.1 * rstate.randn(*(img.shape))
|
||||
|
||||
denoise_func = restoration.denoise_wavelet
|
||||
func_kw = dict(sigma=sigma, multichannel=multichannel,
|
||||
rescale_sigma=rescale_sigma)
|
||||
|
||||
# max_shifts=0 is equivalent to just calling denoise_func
|
||||
with expected_warnings([DASK_NOT_INSTALLED_WARNING]):
|
||||
dn_cc = restoration.cycle_spin(noisy, denoise_func, max_shifts=0,
|
||||
func_kw=func_kw,
|
||||
multichannel=multichannel)
|
||||
dn = denoise_func(noisy, **func_kw)
|
||||
assert_equal(dn, dn_cc)
|
||||
|
||||
# denoising with cycle spinning will give better PSNR than without
|
||||
for max_shifts in valid_shifts:
|
||||
with expected_warnings([DASK_NOT_INSTALLED_WARNING]):
|
||||
dn_cc = restoration.cycle_spin(noisy, denoise_func,
|
||||
max_shifts=max_shifts,
|
||||
func_kw=func_kw,
|
||||
multichannel=multichannel)
|
||||
psnr = peak_signal_noise_ratio(img, dn)
|
||||
psnr_cc = peak_signal_noise_ratio(img, dn_cc)
|
||||
assert_(psnr_cc > psnr)
|
||||
|
||||
for shift_steps in valid_steps:
|
||||
with expected_warnings([DASK_NOT_INSTALLED_WARNING]):
|
||||
dn_cc = restoration.cycle_spin(noisy, denoise_func,
|
||||
max_shifts=2,
|
||||
shift_steps=shift_steps,
|
||||
func_kw=func_kw,
|
||||
multichannel=multichannel)
|
||||
psnr = peak_signal_noise_ratio(img, dn)
|
||||
psnr_cc = peak_signal_noise_ratio(img, dn_cc)
|
||||
assert_(psnr_cc > psnr)
|
||||
|
||||
for max_shifts in invalid_shifts:
|
||||
with testing.raises(ValueError):
|
||||
dn_cc = restoration.cycle_spin(noisy, denoise_func,
|
||||
max_shifts=max_shifts,
|
||||
func_kw=func_kw,
|
||||
multichannel=multichannel)
|
||||
for shift_steps in invalid_steps:
|
||||
with testing.raises(ValueError):
|
||||
dn_cc = restoration.cycle_spin(noisy, denoise_func,
|
||||
max_shifts=2,
|
||||
shift_steps=shift_steps,
|
||||
func_kw=func_kw,
|
||||
multichannel=multichannel)
|
||||
|
||||
|
||||
def test_cycle_spinning_num_workers():
|
||||
img = astro_gray
|
||||
sigma = 0.1
|
||||
rstate = np.random.RandomState(1234)
|
||||
noisy = img.copy() + 0.1 * rstate.randn(*(img.shape))
|
||||
|
||||
denoise_func = restoration.denoise_wavelet
|
||||
func_kw = dict(sigma=sigma, multichannel=True, rescale_sigma=True)
|
||||
|
||||
# same results are expected whether using 1 worker or multiple workers
|
||||
dn_cc1 = restoration.cycle_spin(noisy, denoise_func, max_shifts=1,
|
||||
func_kw=func_kw, multichannel=False,
|
||||
num_workers=1)
|
||||
with expected_warnings([DASK_NOT_INSTALLED_WARNING,]):
|
||||
dn_cc2 = restoration.cycle_spin(noisy, denoise_func, max_shifts=1,
|
||||
func_kw=func_kw, multichannel=False,
|
||||
num_workers=4)
|
||||
dn_cc3 = restoration.cycle_spin(noisy, denoise_func, max_shifts=1,
|
||||
func_kw=func_kw, multichannel=False,
|
||||
num_workers=None)
|
||||
assert_almost_equal(dn_cc1, dn_cc2)
|
||||
assert_almost_equal(dn_cc1, dn_cc3)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
testing.run_module_suite()
|
|
@ -0,0 +1,65 @@
|
|||
|
||||
import numpy as np
|
||||
from skimage.restoration import inpaint
|
||||
|
||||
from skimage._shared import testing
|
||||
from skimage._shared.testing import assert_allclose
|
||||
|
||||
|
||||
def test_inpaint_biharmonic_2d():
|
||||
img = np.tile(np.square(np.linspace(0, 1, 5)), (5, 1))
|
||||
mask = np.zeros_like(img)
|
||||
mask[2, 2:] = 1
|
||||
mask[1, 3:] = 1
|
||||
mask[0, 4:] = 1
|
||||
img[np.where(mask)] = 0
|
||||
out = inpaint.inpaint_biharmonic(img, mask)
|
||||
ref = np.array(
|
||||
[[0., 0.0625, 0.25000000, 0.5625000, 0.73925058],
|
||||
[0., 0.0625, 0.25000000, 0.5478048, 0.76557821],
|
||||
[0., 0.0625, 0.25842878, 0.5623079, 0.85927796],
|
||||
[0., 0.0625, 0.25000000, 0.5625000, 1.00000000],
|
||||
[0., 0.0625, 0.25000000, 0.5625000, 1.00000000]]
|
||||
)
|
||||
assert_allclose(ref, out)
|
||||
|
||||
|
||||
def test_inpaint_biharmonic_3d():
|
||||
img = np.tile(np.square(np.linspace(0, 1, 5)), (5, 1))
|
||||
img = np.dstack((img, img.T))
|
||||
mask = np.zeros_like(img)
|
||||
mask[2, 2:, :] = 1
|
||||
mask[1, 3:, :] = 1
|
||||
mask[0, 4:, :] = 1
|
||||
img[np.where(mask)] = 0
|
||||
out = inpaint.inpaint_biharmonic(img, mask)
|
||||
ref = np.dstack((
|
||||
np.array(
|
||||
[[0.0000, 0.0625, 0.25000000, 0.56250000, 0.53752796],
|
||||
[0.0000, 0.0625, 0.25000000, 0.44443780, 0.53762210],
|
||||
[0.0000, 0.0625, 0.23693666, 0.46621112, 0.68615592],
|
||||
[0.0000, 0.0625, 0.25000000, 0.56250000, 1.00000000],
|
||||
[0.0000, 0.0625, 0.25000000, 0.56250000, 1.00000000]]),
|
||||
np.array(
|
||||
[[0.0000, 0.0000, 0.00000000, 0.00000000, 0.19621902],
|
||||
[0.0625, 0.0625, 0.06250000, 0.17470756, 0.30140091],
|
||||
[0.2500, 0.2500, 0.27241289, 0.35155440, 0.43068654],
|
||||
[0.5625, 0.5625, 0.56250000, 0.56250000, 0.56250000],
|
||||
[1.0000, 1.0000, 1.00000000, 1.00000000, 1.00000000]])
|
||||
))
|
||||
assert_allclose(ref, out)
|
||||
|
||||
|
||||
def test_invalid_input():
|
||||
img, mask = np.zeros([]), np.zeros([])
|
||||
with testing.raises(ValueError):
|
||||
inpaint.inpaint_biharmonic(img, mask)
|
||||
|
||||
img, mask = np.zeros((2, 2)), np.zeros((4, 1))
|
||||
with testing.raises(ValueError):
|
||||
inpaint.inpaint_biharmonic(img, mask)
|
||||
|
||||
img = np.ma.array(np.zeros((2, 2)), mask=[[0, 0], [0, 0]])
|
||||
mask = np.zeros((2, 2))
|
||||
with testing.raises(TypeError):
|
||||
inpaint.inpaint_biharmonic(img, mask)
|
|
@ -0,0 +1,89 @@
|
|||
import functools
|
||||
import numpy as np
|
||||
|
||||
from skimage._shared.testing import assert_
|
||||
from skimage.data import binary_blobs
|
||||
from skimage.data import camera, chelsea
|
||||
from skimage.metrics import mean_squared_error as mse
|
||||
from skimage.restoration import (calibrate_denoiser,
|
||||
denoise_wavelet)
|
||||
from skimage.restoration.j_invariant import _invariant_denoise
|
||||
from skimage.util import img_as_float, random_noise
|
||||
|
||||
test_img = img_as_float(camera())
|
||||
test_img_color = img_as_float(chelsea())
|
||||
test_img_3d = img_as_float(binary_blobs(64, n_dim=3)) / 2
|
||||
noisy_img = random_noise(test_img, mode='gaussian', var=0.01)
|
||||
noisy_img_color = random_noise(test_img_color, mode='gaussian', var=0.01)
|
||||
noisy_img_3d = random_noise(test_img_3d, mode='gaussian', var=0.1)
|
||||
|
||||
_denoise_wavelet = functools.partial(denoise_wavelet, rescale_sigma=True)
|
||||
|
||||
def test_invariant_denoise():
|
||||
denoised_img = _invariant_denoise(noisy_img, _denoise_wavelet)
|
||||
|
||||
denoised_mse = mse(denoised_img, test_img)
|
||||
original_mse = mse(noisy_img, test_img)
|
||||
assert_(denoised_mse < original_mse)
|
||||
|
||||
|
||||
def test_invariant_denoise_color():
|
||||
denoised_img_color = _invariant_denoise(
|
||||
noisy_img_color, _denoise_wavelet,
|
||||
denoiser_kwargs=dict(multichannel=True))
|
||||
|
||||
denoised_mse = mse(denoised_img_color, test_img_color)
|
||||
original_mse = mse(noisy_img_color, test_img_color)
|
||||
assert_(denoised_mse < original_mse)
|
||||
|
||||
|
||||
def test_invariant_denoise_3d():
|
||||
denoised_img_3d = _invariant_denoise(noisy_img_3d, _denoise_wavelet)
|
||||
|
||||
denoised_mse = mse(denoised_img_3d, test_img_3d)
|
||||
original_mse = mse(noisy_img_3d, test_img_3d)
|
||||
assert_(denoised_mse < original_mse)
|
||||
|
||||
|
||||
def test_calibrate_denoiser_extra_output():
|
||||
parameter_ranges = {'sigma': np.linspace(0.1, 1, 5) / 2}
|
||||
_, (parameters_tested, losses) = calibrate_denoiser(
|
||||
noisy_img,
|
||||
_denoise_wavelet,
|
||||
denoise_parameters=parameter_ranges,
|
||||
extra_output=True
|
||||
)
|
||||
|
||||
all_denoised = [_invariant_denoise(noisy_img, _denoise_wavelet,
|
||||
denoiser_kwargs=denoiser_kwargs)
|
||||
for denoiser_kwargs in parameters_tested]
|
||||
|
||||
ground_truth_losses = [mse(img, test_img) for img in all_denoised]
|
||||
assert_(np.argmin(losses) == np.argmin(ground_truth_losses))
|
||||
|
||||
|
||||
def test_calibrate_denoiser():
|
||||
parameter_ranges = {'sigma': np.linspace(0.1, 1, 5) / 2}
|
||||
|
||||
denoiser = calibrate_denoiser(noisy_img, _denoise_wavelet,
|
||||
denoise_parameters=parameter_ranges)
|
||||
|
||||
denoised_mse = mse(denoiser(noisy_img), test_img)
|
||||
original_mse = mse(noisy_img, test_img)
|
||||
assert_(denoised_mse < original_mse)
|
||||
|
||||
|
||||
def test_input_image_not_modified():
|
||||
input_image = noisy_img.copy()
|
||||
|
||||
parameter_ranges = {'sigma': np.random.random(5) / 2}
|
||||
calibrate_denoiser(input_image, _denoise_wavelet,
|
||||
denoise_parameters=parameter_ranges)
|
||||
|
||||
assert_(np.all(noisy_img == input_image))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from numpy import testing
|
||||
|
||||
testing.run_module_suite()
|
|
@ -0,0 +1,92 @@
|
|||
import numpy as np
|
||||
from scipy.signal import convolve2d
|
||||
from scipy import ndimage as ndi
|
||||
from skimage._shared.testing import fetch
|
||||
|
||||
import skimage
|
||||
from skimage.data import camera
|
||||
from skimage import restoration
|
||||
from skimage.restoration import uft
|
||||
|
||||
test_img = skimage.img_as_float(camera())
|
||||
|
||||
|
||||
def test_wiener():
|
||||
psf = np.ones((5, 5)) / 25
|
||||
data = convolve2d(test_img, psf, 'same')
|
||||
np.random.seed(0)
|
||||
data += 0.1 * data.std() * np.random.standard_normal(data.shape)
|
||||
deconvolved = restoration.wiener(data, psf, 0.05)
|
||||
|
||||
path = fetch('restoration/tests/camera_wiener.npy')
|
||||
np.testing.assert_allclose(deconvolved, np.load(path), rtol=1e-3)
|
||||
|
||||
_, laplacian = uft.laplacian(2, data.shape)
|
||||
otf = uft.ir2tf(psf, data.shape, is_real=False)
|
||||
deconvolved = restoration.wiener(data, otf, 0.05,
|
||||
reg=laplacian,
|
||||
is_real=False)
|
||||
np.testing.assert_allclose(np.real(deconvolved),
|
||||
np.load(path),
|
||||
rtol=1e-3)
|
||||
|
||||
|
||||
def test_unsupervised_wiener():
|
||||
psf = np.ones((5, 5)) / 25
|
||||
data = convolve2d(test_img, psf, 'same')
|
||||
np.random.seed(0)
|
||||
data += 0.1 * data.std() * np.random.standard_normal(data.shape)
|
||||
deconvolved, _ = restoration.unsupervised_wiener(data, psf)
|
||||
|
||||
path = fetch('restoration/tests/camera_unsup.npy')
|
||||
np.testing.assert_allclose(deconvolved, np.load(path), rtol=1e-3)
|
||||
|
||||
_, laplacian = uft.laplacian(2, data.shape)
|
||||
otf = uft.ir2tf(psf, data.shape, is_real=False)
|
||||
np.random.seed(0)
|
||||
deconvolved = restoration.unsupervised_wiener(
|
||||
data, otf, reg=laplacian, is_real=False,
|
||||
user_params={"callback": lambda x: None})[0]
|
||||
path = fetch('restoration/tests/camera_unsup2.npy')
|
||||
np.testing.assert_allclose(np.real(deconvolved),
|
||||
np.load(path),
|
||||
rtol=1e-3)
|
||||
|
||||
|
||||
def test_image_shape():
|
||||
"""Test that shape of output image in deconvolution is same as input.
|
||||
|
||||
This addresses issue #1172.
|
||||
"""
|
||||
point = np.zeros((5, 5), np.float)
|
||||
point[2, 2] = 1.
|
||||
psf = ndi.gaussian_filter(point, sigma=1.)
|
||||
# image shape: (45, 45), as reported in #1172
|
||||
image = skimage.img_as_float(camera()[110:155, 225:270]) # just the face
|
||||
image_conv = ndi.convolve(image, psf)
|
||||
deconv_sup = restoration.wiener(image_conv, psf, 1)
|
||||
deconv_un = restoration.unsupervised_wiener(image_conv, psf)[0]
|
||||
# test the shape
|
||||
np.testing.assert_equal(image.shape, deconv_sup.shape)
|
||||
np.testing.assert_equal(image.shape, deconv_un.shape)
|
||||
# test the reconstruction error
|
||||
sup_relative_error = np.abs(deconv_sup - image) / image
|
||||
un_relative_error = np.abs(deconv_un - image) / image
|
||||
np.testing.assert_array_less(np.median(sup_relative_error), 0.1)
|
||||
np.testing.assert_array_less(np.median(un_relative_error), 0.1)
|
||||
|
||||
|
||||
def test_richardson_lucy():
|
||||
psf = np.ones((5, 5)) / 25
|
||||
data = convolve2d(test_img, psf, 'same')
|
||||
np.random.seed(0)
|
||||
data += 0.1 * data.std() * np.random.standard_normal(data.shape)
|
||||
deconvolved = restoration.richardson_lucy(data, psf, 5)
|
||||
|
||||
path = fetch('restoration/tests/camera_rl.npy')
|
||||
np.testing.assert_allclose(deconvolved, np.load(path), rtol=1e-3)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from numpy import testing
|
||||
testing.run_module_suite()
|
219
venv/Lib/site-packages/skimage/restoration/tests/test_unwrap.py
Normal file
219
venv/Lib/site-packages/skimage/restoration/tests/test_unwrap.py
Normal file
|
@ -0,0 +1,219 @@
|
|||
|
||||
import numpy as np
|
||||
from skimage.restoration import unwrap_phase
|
||||
import sys
|
||||
|
||||
import warnings
|
||||
from skimage._shared import testing
|
||||
from skimage._shared.testing import (assert_array_almost_equal_nulp,
|
||||
assert_almost_equal, assert_array_equal,
|
||||
assert_, skipif)
|
||||
from skimage._shared._warnings import expected_warnings
|
||||
|
||||
|
||||
def assert_phase_almost_equal(a, b, *args, **kwargs):
|
||||
"""An assert_almost_equal insensitive to phase shifts of n*2*pi."""
|
||||
shift = 2 * np.pi * np.round((b.mean() - a.mean()) / (2 * np.pi))
|
||||
with expected_warnings([r'invalid value encountered|\A\Z',
|
||||
r'divide by zero encountered|\A\Z']):
|
||||
print('assert_phase_allclose, abs', np.max(np.abs(a - (b - shift))))
|
||||
print('assert_phase_allclose, rel',
|
||||
np.max(np.abs((a - (b - shift)) / a)))
|
||||
if np.ma.isMaskedArray(a):
|
||||
assert_(np.ma.isMaskedArray(b))
|
||||
assert_array_equal(a.mask, b.mask)
|
||||
assert_(a.fill_value == b.fill_value)
|
||||
au = np.asarray(a)
|
||||
bu = np.asarray(b)
|
||||
with expected_warnings([r'invalid value encountered|\A\Z',
|
||||
r'divide by zero encountered|\A\Z']):
|
||||
print('assert_phase_allclose, no mask, abs',
|
||||
np.max(np.abs(au - (bu - shift))))
|
||||
print('assert_phase_allclose, no mask, rel',
|
||||
np.max(np.abs((au - (bu - shift)) / au)))
|
||||
assert_array_almost_equal_nulp(a + shift, b, *args, **kwargs)
|
||||
|
||||
|
||||
def check_unwrap(image, mask=None):
|
||||
image_wrapped = np.angle(np.exp(1j * image))
|
||||
if mask is not None:
|
||||
print('Testing a masked image')
|
||||
image = np.ma.array(image, mask=mask, fill_value=0.5)
|
||||
image_wrapped = np.ma.array(image_wrapped, mask=mask, fill_value=0.5)
|
||||
image_unwrapped = unwrap_phase(image_wrapped, seed=0)
|
||||
assert_phase_almost_equal(image_unwrapped, image)
|
||||
|
||||
|
||||
def test_unwrap_1d():
|
||||
image = np.linspace(0, 10 * np.pi, 100)
|
||||
check_unwrap(image)
|
||||
# Masked arrays are not allowed in 1D
|
||||
with testing.raises(ValueError):
|
||||
check_unwrap(image, True)
|
||||
# wrap_around is not allowed in 1D
|
||||
with testing.raises(ValueError):
|
||||
unwrap_phase(image, True, seed=0)
|
||||
|
||||
|
||||
@testing.parametrize("check_with_mask", (False, True))
|
||||
def test_unwrap_2d(check_with_mask):
|
||||
mask = None
|
||||
x, y = np.ogrid[:8, :16]
|
||||
image = 2 * np.pi * (x * 0.2 + y * 0.1)
|
||||
if check_with_mask:
|
||||
mask = np.zeros(image.shape, dtype=np.bool)
|
||||
mask[4:6, 4:8] = True
|
||||
check_unwrap(image, mask)
|
||||
|
||||
|
||||
@testing.parametrize("check_with_mask", (False, True))
|
||||
def test_unwrap_3d(check_with_mask):
|
||||
mask = None
|
||||
x, y, z = np.ogrid[:8, :12, :16]
|
||||
image = 2 * np.pi * (x * 0.2 + y * 0.1 + z * 0.05)
|
||||
if check_with_mask:
|
||||
mask = np.zeros(image.shape, dtype=np.bool)
|
||||
mask[4:6, 4:6, 1:3] = True
|
||||
check_unwrap(image, mask)
|
||||
|
||||
|
||||
def check_wrap_around(ndim, axis):
|
||||
# create a ramp, but with the last pixel along axis equalling the first
|
||||
elements = 100
|
||||
ramp = np.linspace(0, 12 * np.pi, elements)
|
||||
ramp[-1] = ramp[0]
|
||||
image = ramp.reshape(tuple([elements if n == axis else 1
|
||||
for n in range(ndim)]))
|
||||
image_wrapped = np.angle(np.exp(1j * image))
|
||||
|
||||
index_first = tuple([0] * ndim)
|
||||
index_last = tuple([-1 if n == axis else 0 for n in range(ndim)])
|
||||
# unwrap the image without wrap around
|
||||
# We do not want warnings about length 1 dimensions
|
||||
with expected_warnings([r'Image has a length 1 dimension|\A\Z']):
|
||||
image_unwrap_no_wrap_around = unwrap_phase(image_wrapped, seed=0)
|
||||
print('endpoints without wrap_around:',
|
||||
image_unwrap_no_wrap_around[index_first],
|
||||
image_unwrap_no_wrap_around[index_last])
|
||||
# without wrap around, the endpoints of the image should differ
|
||||
assert_(abs(image_unwrap_no_wrap_around[index_first] -
|
||||
image_unwrap_no_wrap_around[index_last]) > np.pi)
|
||||
# unwrap the image with wrap around
|
||||
wrap_around = [n == axis for n in range(ndim)]
|
||||
# We do not want warnings about length 1 dimensions
|
||||
with expected_warnings([r'Image has a length 1 dimension.|\A\Z']):
|
||||
image_unwrap_wrap_around = unwrap_phase(image_wrapped, wrap_around,
|
||||
seed=0)
|
||||
print('endpoints with wrap_around:',
|
||||
image_unwrap_wrap_around[index_first],
|
||||
image_unwrap_wrap_around[index_last])
|
||||
# with wrap around, the endpoints of the image should be equal
|
||||
assert_almost_equal(image_unwrap_wrap_around[index_first],
|
||||
image_unwrap_wrap_around[index_last])
|
||||
|
||||
|
||||
dim_axis = [(ndim, axis) for ndim in (2, 3) for axis in range(ndim)]
|
||||
|
||||
|
||||
@skipif(sys.version_info[:2] == (3, 4),
|
||||
reason="Doesn't work with python 3.4. See issue #3079")
|
||||
@testing.parametrize("ndim, axis", dim_axis)
|
||||
def test_wrap_around(ndim, axis):
|
||||
check_wrap_around(ndim, axis)
|
||||
|
||||
|
||||
def test_mask():
|
||||
length = 100
|
||||
ramps = [np.linspace(0, 4 * np.pi, length),
|
||||
np.linspace(0, 8 * np.pi, length),
|
||||
np.linspace(0, 6 * np.pi, length)]
|
||||
image = np.vstack(ramps)
|
||||
mask_1d = np.ones((length,), dtype=np.bool)
|
||||
mask_1d[0] = mask_1d[-1] = False
|
||||
for i in range(len(ramps)):
|
||||
# mask all ramps but the i'th one
|
||||
mask = np.zeros(image.shape, dtype=np.bool)
|
||||
mask |= mask_1d.reshape(1, -1)
|
||||
mask[i, :] = False # unmask i'th ramp
|
||||
image_wrapped = np.ma.array(np.angle(np.exp(1j * image)), mask=mask)
|
||||
image_unwrapped = unwrap_phase(image_wrapped)
|
||||
image_unwrapped -= image_unwrapped[0, 0] # remove phase shift
|
||||
# The end of the unwrapped array should have value equal to the
|
||||
# endpoint of the unmasked ramp
|
||||
assert_array_almost_equal_nulp(image_unwrapped[:, -1], image[i, -1])
|
||||
assert_(np.ma.isMaskedArray(image_unwrapped))
|
||||
|
||||
# Same tests, but forcing use of the 3D unwrapper by reshaping
|
||||
with expected_warnings(['length 1 dimension']):
|
||||
shape = (1,) + image_wrapped.shape
|
||||
image_wrapped_3d = image_wrapped.reshape(shape)
|
||||
image_unwrapped_3d = unwrap_phase(image_wrapped_3d)
|
||||
# remove phase shift
|
||||
image_unwrapped_3d -= image_unwrapped_3d[0, 0, 0]
|
||||
assert_array_almost_equal_nulp(image_unwrapped_3d[:, :, -1],
|
||||
image[i, -1])
|
||||
|
||||
|
||||
def test_invalid_input():
|
||||
with testing.raises(ValueError):
|
||||
unwrap_phase(np.zeros([]))
|
||||
with testing.raises(ValueError):
|
||||
unwrap_phase(np.zeros((1, 1, 1, 1)))
|
||||
with testing.raises(ValueError):
|
||||
unwrap_phase(np.zeros((1, 1)), 3 * [False])
|
||||
with testing.raises(ValueError):
|
||||
unwrap_phase(np.zeros((1, 1)), 'False')
|
||||
|
||||
|
||||
def test_unwrap_3d_middle_wrap_around():
|
||||
# Segmentation fault in 3D unwrap phase with middle dimension connected
|
||||
# GitHub issue #1171
|
||||
image = np.zeros((20, 30, 40), dtype=np.float32)
|
||||
unwrap = unwrap_phase(image, wrap_around=[False, True, False])
|
||||
assert_(np.all(unwrap == 0))
|
||||
|
||||
|
||||
def test_unwrap_2d_compressed_mask():
|
||||
# ValueError when image is masked array with a compressed mask (no masked
|
||||
# elements). GitHub issue #1346
|
||||
image = np.ma.zeros((10, 10))
|
||||
unwrap = unwrap_phase(image)
|
||||
assert_(np.all(unwrap == 0))
|
||||
|
||||
|
||||
def test_unwrap_2d_all_masked():
|
||||
# Segmentation fault when image is masked array with a all elements masked
|
||||
# GitHub issue #1347
|
||||
# all elements masked
|
||||
image = np.ma.zeros((10, 10))
|
||||
image[:] = np.ma.masked
|
||||
unwrap = unwrap_phase(image)
|
||||
assert_(np.ma.isMaskedArray(unwrap))
|
||||
assert_(np.all(unwrap.mask))
|
||||
|
||||
# 1 unmasked element, still zero edges
|
||||
image = np.ma.zeros((10, 10))
|
||||
image[:] = np.ma.masked
|
||||
image[0, 0] = 0
|
||||
unwrap = unwrap_phase(image)
|
||||
assert_(np.ma.isMaskedArray(unwrap))
|
||||
assert_(np.sum(unwrap.mask) == 99) # all but one masked
|
||||
assert_(unwrap[0, 0] == 0)
|
||||
|
||||
|
||||
def test_unwrap_3d_all_masked():
|
||||
# all elements masked
|
||||
image = np.ma.zeros((10, 10, 10))
|
||||
image[:] = np.ma.masked
|
||||
unwrap = unwrap_phase(image)
|
||||
assert_(np.ma.isMaskedArray(unwrap))
|
||||
assert_(np.all(unwrap.mask))
|
||||
|
||||
# 1 unmasked element, still zero edges
|
||||
image = np.ma.zeros((10, 10, 10))
|
||||
image[:] = np.ma.masked
|
||||
image[0, 0, 0] = 0
|
||||
unwrap = unwrap_phase(image)
|
||||
assert_(np.ma.isMaskedArray(unwrap))
|
||||
assert_(np.sum(unwrap.mask) == 999) # all but one masked
|
||||
assert_(unwrap[0, 0, 0] == 0)
|
449
venv/Lib/site-packages/skimage/restoration/uft.py
Normal file
449
venv/Lib/site-packages/skimage/restoration/uft.py
Normal file
|
@ -0,0 +1,449 @@
|
|||
r"""Function of unitary fourier transform (uft) and utilities
|
||||
|
||||
This module implements the unitary fourier transform, also known as
|
||||
the ortho-normal transform. It is especially useful for convolution
|
||||
[1], as it respects the Parseval equality. The value of the null
|
||||
frequency is equal to
|
||||
|
||||
.. math:: \frac{1}{\sqrt{n}} \sum_i x_i
|
||||
|
||||
so the Fourier transform has the same energy as the original image
|
||||
(see ``image_quad_norm`` function). The transform is applied from the
|
||||
last axis for performance (assuming a C-order array input).
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] B. R. Hunt "A matrix theory proof of the discrete convolution
|
||||
theorem", IEEE Trans. on Audio and Electroacoustics,
|
||||
vol. au-19, no. 4, pp. 285-288, dec. 1971
|
||||
|
||||
"""
|
||||
|
||||
|
||||
import numpy as np
|
||||
from .._shared.fft import fftmodule as fft
|
||||
|
||||
__keywords__ = "fft, Fourier Transform, orthonormal, unitary"
|
||||
|
||||
|
||||
def ufftn(inarray, dim=None):
|
||||
"""N-dimensional unitary Fourier transform.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
inarray : ndarray
|
||||
The array to transform.
|
||||
dim : int, optional
|
||||
The last axis along which to compute the transform. All
|
||||
axes by default.
|
||||
|
||||
Returns
|
||||
-------
|
||||
outarray : ndarray (same shape than inarray)
|
||||
The unitary N-D Fourier transform of ``inarray``.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> input = np.ones((3, 3, 3))
|
||||
>>> output = ufftn(input)
|
||||
>>> np.allclose(np.sum(input) / np.sqrt(input.size), output[0, 0, 0])
|
||||
True
|
||||
>>> output.shape
|
||||
(3, 3, 3)
|
||||
"""
|
||||
if dim is None:
|
||||
dim = inarray.ndim
|
||||
outarray = fft.fftn(inarray, axes=range(-dim, 0), norm='ortho')
|
||||
return outarray
|
||||
|
||||
|
||||
def uifftn(inarray, dim=None):
|
||||
"""N-dimensional unitary inverse Fourier transform.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
inarray : ndarray
|
||||
The array to transform.
|
||||
dim : int, optional
|
||||
The last axis along which to compute the transform. All
|
||||
axes by default.
|
||||
|
||||
Returns
|
||||
-------
|
||||
outarray : ndarray (same shape than inarray)
|
||||
The unitary inverse N-D Fourier transform of ``inarray``.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> input = np.ones((3, 3, 3))
|
||||
>>> output = uifftn(input)
|
||||
>>> np.allclose(np.sum(input) / np.sqrt(input.size), output[0, 0, 0])
|
||||
True
|
||||
>>> output.shape
|
||||
(3, 3, 3)
|
||||
"""
|
||||
if dim is None:
|
||||
dim = inarray.ndim
|
||||
outarray = fft.ifftn(inarray, axes=range(-dim, 0), norm='ortho')
|
||||
return outarray
|
||||
|
||||
|
||||
def urfftn(inarray, dim=None):
|
||||
"""N-dimensional real unitary Fourier transform.
|
||||
|
||||
This transform considers the Hermitian property of the transform on
|
||||
real-valued input.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
inarray : ndarray, shape (M, N, ..., P)
|
||||
The array to transform.
|
||||
dim : int, optional
|
||||
The last axis along which to compute the transform. All
|
||||
axes by default.
|
||||
|
||||
Returns
|
||||
-------
|
||||
outarray : ndarray, shape (M, N, ..., P / 2 + 1)
|
||||
The unitary N-D real Fourier transform of ``inarray``.
|
||||
|
||||
Notes
|
||||
-----
|
||||
The ``urfft`` functions assume an input array of real
|
||||
values. Consequently, the output has a Hermitian property and
|
||||
redundant values are not computed or returned.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> input = np.ones((5, 5, 5))
|
||||
>>> output = urfftn(input)
|
||||
>>> np.allclose(np.sum(input) / np.sqrt(input.size), output[0, 0, 0])
|
||||
True
|
||||
>>> output.shape
|
||||
(5, 5, 3)
|
||||
"""
|
||||
if dim is None:
|
||||
dim = inarray.ndim
|
||||
outarray = fft.rfftn(inarray, axes=range(-dim, 0), norm='ortho')
|
||||
return outarray
|
||||
|
||||
|
||||
def uirfftn(inarray, dim=None, shape=None):
|
||||
"""N-dimensional inverse real unitary Fourier transform.
|
||||
|
||||
This transform considers the Hermitian property of the transform
|
||||
from complex to real input.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
inarray : ndarray
|
||||
The array to transform.
|
||||
dim : int, optional
|
||||
The last axis along which to compute the transform. All
|
||||
axes by default.
|
||||
shape : tuple of int, optional
|
||||
The shape of the output. The shape of ``rfft`` is ambiguous in
|
||||
case of odd-valued input shape. In this case, this parameter
|
||||
should be provided. See ``np.fft.irfftn``.
|
||||
|
||||
Returns
|
||||
-------
|
||||
outarray : ndarray
|
||||
The unitary N-D inverse real Fourier transform of ``inarray``.
|
||||
|
||||
Notes
|
||||
-----
|
||||
The ``uirfft`` function assumes that the output array is
|
||||
real-valued. Consequently, the input is assumed to have a Hermitian
|
||||
property and redundant values are implicit.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> input = np.ones((5, 5, 5))
|
||||
>>> output = uirfftn(urfftn(input), shape=input.shape)
|
||||
>>> np.allclose(input, output)
|
||||
True
|
||||
>>> output.shape
|
||||
(5, 5, 5)
|
||||
"""
|
||||
if dim is None:
|
||||
dim = inarray.ndim
|
||||
outarray = fft.irfftn(inarray, shape, axes=range(-dim, 0), norm='ortho')
|
||||
return outarray
|
||||
|
||||
|
||||
def ufft2(inarray):
|
||||
"""2-dimensional unitary Fourier transform.
|
||||
|
||||
Compute the Fourier transform on the last 2 axes.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
inarray : ndarray
|
||||
The array to transform.
|
||||
|
||||
Returns
|
||||
-------
|
||||
outarray : ndarray (same shape as inarray)
|
||||
The unitary 2-D Fourier transform of ``inarray``.
|
||||
|
||||
See Also
|
||||
--------
|
||||
uifft2, ufftn, urfftn
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> input = np.ones((10, 128, 128))
|
||||
>>> output = ufft2(input)
|
||||
>>> np.allclose(np.sum(input[1, ...]) / np.sqrt(input[1, ...].size),
|
||||
... output[1, 0, 0])
|
||||
True
|
||||
>>> output.shape
|
||||
(10, 128, 128)
|
||||
"""
|
||||
return ufftn(inarray, 2)
|
||||
|
||||
|
||||
def uifft2(inarray):
|
||||
"""2-dimensional inverse unitary Fourier transform.
|
||||
|
||||
Compute the inverse Fourier transform on the last 2 axes.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
inarray : ndarray
|
||||
The array to transform.
|
||||
|
||||
Returns
|
||||
-------
|
||||
outarray : ndarray (same shape as inarray)
|
||||
The unitary 2-D inverse Fourier transform of ``inarray``.
|
||||
|
||||
See Also
|
||||
--------
|
||||
uifft2, uifftn, uirfftn
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> input = np.ones((10, 128, 128))
|
||||
>>> output = uifft2(input)
|
||||
>>> np.allclose(np.sum(input[1, ...]) / np.sqrt(input[1, ...].size),
|
||||
... output[0, 0, 0])
|
||||
True
|
||||
>>> output.shape
|
||||
(10, 128, 128)
|
||||
"""
|
||||
return uifftn(inarray, 2)
|
||||
|
||||
|
||||
def urfft2(inarray):
|
||||
"""2-dimensional real unitary Fourier transform
|
||||
|
||||
Compute the real Fourier transform on the last 2 axes. This
|
||||
transform considers the Hermitian property of the transform from
|
||||
complex to real-valued input.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
inarray : ndarray, shape (M, N, ..., P)
|
||||
The array to transform.
|
||||
|
||||
Returns
|
||||
-------
|
||||
outarray : ndarray, shape (M, N, ..., 2 * (P - 1))
|
||||
The unitary 2-D real Fourier transform of ``inarray``.
|
||||
|
||||
See Also
|
||||
--------
|
||||
ufft2, ufftn, urfftn
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> input = np.ones((10, 128, 128))
|
||||
>>> output = urfft2(input)
|
||||
>>> np.allclose(np.sum(input[1,...]) / np.sqrt(input[1,...].size),
|
||||
... output[1, 0, 0])
|
||||
True
|
||||
>>> output.shape
|
||||
(10, 128, 65)
|
||||
"""
|
||||
return urfftn(inarray, 2)
|
||||
|
||||
|
||||
def uirfft2(inarray, shape=None):
|
||||
"""2-dimensional inverse real unitary Fourier transform.
|
||||
|
||||
Compute the real inverse Fourier transform on the last 2 axes.
|
||||
This transform considers the Hermitian property of the transform
|
||||
from complex to real-valued input.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
inarray : ndarray, shape (M, N, ..., P)
|
||||
The array to transform.
|
||||
shape : tuple of int, optional
|
||||
The shape of the output. The shape of ``rfft`` is ambiguous in
|
||||
case of odd-valued input shape. In this case, this parameter
|
||||
should be provided. See ``np.fft.irfftn``.
|
||||
|
||||
Returns
|
||||
-------
|
||||
outarray : ndarray, shape (M, N, ..., 2 * (P - 1))
|
||||
The unitary 2-D inverse real Fourier transform of ``inarray``.
|
||||
|
||||
See Also
|
||||
--------
|
||||
urfft2, uifftn, uirfftn
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> input = np.ones((10, 128, 128))
|
||||
>>> output = uirfftn(urfftn(input), shape=input.shape)
|
||||
>>> np.allclose(input, output)
|
||||
True
|
||||
>>> output.shape
|
||||
(10, 128, 128)
|
||||
"""
|
||||
return uirfftn(inarray, 2, shape=shape)
|
||||
|
||||
|
||||
def image_quad_norm(inarray):
|
||||
"""Return the quadratic norm of images in Fourier space.
|
||||
|
||||
This function detects whether the input image satisfies the
|
||||
Hermitian property.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
inarray : ndarray
|
||||
Input image. The image data should reside in the final two
|
||||
axes.
|
||||
|
||||
Returns
|
||||
-------
|
||||
norm : float
|
||||
The quadratic norm of ``inarray``.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> input = np.ones((5, 5))
|
||||
>>> image_quad_norm(ufft2(input)) == np.sum(np.abs(input)**2)
|
||||
True
|
||||
>>> image_quad_norm(ufft2(input)) == image_quad_norm(urfft2(input))
|
||||
True
|
||||
"""
|
||||
# If there is a Hermitian symmetry
|
||||
if inarray.shape[-1] != inarray.shape[-2]:
|
||||
return (2 * np.sum(np.sum(np.abs(inarray) ** 2, axis=-1), axis=-1) -
|
||||
np.sum(np.abs(inarray[..., 0]) ** 2, axis=-1))
|
||||
else:
|
||||
return np.sum(np.sum(np.abs(inarray) ** 2, axis=-1), axis=-1)
|
||||
|
||||
|
||||
def ir2tf(imp_resp, shape, dim=None, is_real=True):
|
||||
"""Compute the transfer function of an impulse response (IR).
|
||||
|
||||
This function makes the necessary correct zero-padding, zero
|
||||
convention, correct fft2, etc... to compute the transfer function
|
||||
of IR. To use with unitary Fourier transform for the signal (ufftn
|
||||
or equivalent).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
imp_resp : ndarray
|
||||
The impulse responses.
|
||||
shape : tuple of int
|
||||
A tuple of integer corresponding to the target shape of the
|
||||
transfer function.
|
||||
dim : int, optional
|
||||
The last axis along which to compute the transform. All
|
||||
axes by default.
|
||||
is_real : boolean, optional
|
||||
If True (default), imp_resp is supposed real and the Hermitian property
|
||||
is used with rfftn Fourier transform.
|
||||
|
||||
Returns
|
||||
-------
|
||||
y : complex ndarray
|
||||
The transfer function of shape ``shape``.
|
||||
|
||||
See Also
|
||||
--------
|
||||
ufftn, uifftn, urfftn, uirfftn
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> np.all(np.array([[4, 0], [0, 0]]) == ir2tf(np.ones((2, 2)), (2, 2)))
|
||||
True
|
||||
>>> ir2tf(np.ones((2, 2)), (512, 512)).shape == (512, 257)
|
||||
True
|
||||
>>> ir2tf(np.ones((2, 2)), (512, 512), is_real=False).shape == (512, 512)
|
||||
True
|
||||
|
||||
Notes
|
||||
-----
|
||||
The input array can be composed of multiple-dimensional IR with
|
||||
an arbitrary number of IR. The individual IR must be accessed
|
||||
through the first axes. The last ``dim`` axes contain the space
|
||||
definition.
|
||||
"""
|
||||
if not dim:
|
||||
dim = imp_resp.ndim
|
||||
# Zero padding and fill
|
||||
irpadded = np.zeros(shape)
|
||||
irpadded[tuple([slice(0, s) for s in imp_resp.shape])] = imp_resp
|
||||
# Roll for zero convention of the fft to avoid the phase
|
||||
# problem. Work with odd and even size.
|
||||
for axis, axis_size in enumerate(imp_resp.shape):
|
||||
if axis >= imp_resp.ndim - dim:
|
||||
irpadded = np.roll(irpadded,
|
||||
shift=-int(np.floor(axis_size / 2)),
|
||||
axis=axis)
|
||||
if is_real:
|
||||
return fft.rfftn(irpadded, axes=range(-dim, 0))
|
||||
else:
|
||||
return fft.fftn(irpadded, axes=range(-dim, 0))
|
||||
|
||||
|
||||
def laplacian(ndim, shape, is_real=True):
|
||||
"""Return the transfer function of the Laplacian.
|
||||
|
||||
Laplacian is the second order difference, on row and column.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ndim : int
|
||||
The dimension of the Laplacian.
|
||||
shape : tuple
|
||||
The support on which to compute the transfer function.
|
||||
is_real : boolean, optional
|
||||
If True (default), imp_resp is assumed to be real-valued and
|
||||
the Hermitian property is used with rfftn Fourier transform
|
||||
to return the transfer function.
|
||||
|
||||
Returns
|
||||
-------
|
||||
tf : array_like, complex
|
||||
The transfer function.
|
||||
impr : array_like, real
|
||||
The Laplacian.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> tf, ir = laplacian(2, (32, 32))
|
||||
>>> np.all(ir == np.array([[0, -1, 0], [-1, 4, -1], [0, -1, 0]]))
|
||||
True
|
||||
>>> np.all(tf == ir2tf(ir, (32, 32)))
|
||||
True
|
||||
"""
|
||||
impr = np.zeros([3] * ndim)
|
||||
for dim in range(ndim):
|
||||
idx = tuple([slice(1, 2)] * dim +
|
||||
[slice(None)] +
|
||||
[slice(1, 2)] * (ndim - dim - 1))
|
||||
impr[idx] = np.array([-1.0,
|
||||
0.0,
|
||||
-1.0]).reshape([-1 if i == dim else 1
|
||||
for i in range(ndim)])
|
||||
impr[(slice(1, 2), ) * ndim] = 2.0 * ndim
|
||||
return ir2tf(impr, shape, is_real=is_real), impr
|
113
venv/Lib/site-packages/skimage/restoration/unwrap.py
Normal file
113
venv/Lib/site-packages/skimage/restoration/unwrap.py
Normal file
|
@ -0,0 +1,113 @@
|
|||
import numpy as np
|
||||
|
||||
from .._shared.utils import warn
|
||||
|
||||
from ._unwrap_1d import unwrap_1d
|
||||
from ._unwrap_2d import unwrap_2d
|
||||
from ._unwrap_3d import unwrap_3d
|
||||
|
||||
|
||||
def unwrap_phase(image, wrap_around=False, seed=None):
|
||||
'''Recover the original from a wrapped phase image.
|
||||
|
||||
From an image wrapped to lie in the interval [-pi, pi), recover the
|
||||
original, unwrapped image.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : 1D, 2D or 3D ndarray of floats, optionally a masked array
|
||||
The values should be in the range [-pi, pi). If a masked array is
|
||||
provided, the masked entries will not be changed, and their values
|
||||
will not be used to guide the unwrapping of neighboring, unmasked
|
||||
values. Masked 1D arrays are not allowed, and will raise a
|
||||
`ValueError`.
|
||||
wrap_around : bool or sequence of bool, optional
|
||||
When an element of the sequence is `True`, the unwrapping process
|
||||
will regard the edges along the corresponding axis of the image to be
|
||||
connected and use this connectivity to guide the phase unwrapping
|
||||
process. If only a single boolean is given, it will apply to all axes.
|
||||
Wrap around is not supported for 1D arrays.
|
||||
seed : int, optional
|
||||
Unwrapping 2D or 3D images uses random initialization. This sets the
|
||||
seed of the PRNG to achieve deterministic behavior.
|
||||
|
||||
Returns
|
||||
-------
|
||||
image_unwrapped : array_like, double
|
||||
Unwrapped image of the same shape as the input. If the input `image`
|
||||
was a masked array, the mask will be preserved.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
If called with a masked 1D array or called with a 1D array and
|
||||
``wrap_around=True``.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> c0, c1 = np.ogrid[-1:1:128j, -1:1:128j]
|
||||
>>> image = 12 * np.pi * np.exp(-(c0**2 + c1**2))
|
||||
>>> image_wrapped = np.angle(np.exp(1j * image))
|
||||
>>> image_unwrapped = unwrap_phase(image_wrapped)
|
||||
>>> np.std(image_unwrapped - image) < 1e-6 # A constant offset is normal
|
||||
True
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Miguel Arevallilo Herraez, David R. Burton, Michael J. Lalor,
|
||||
and Munther A. Gdeisat, "Fast two-dimensional phase-unwrapping
|
||||
algorithm based on sorting by reliability following a noncontinuous
|
||||
path", Journal Applied Optics, Vol. 41, No. 35 (2002) 7437,
|
||||
.. [2] Abdul-Rahman, H., Gdeisat, M., Burton, D., & Lalor, M., "Fast
|
||||
three-dimensional phase-unwrapping algorithm based on sorting by
|
||||
reliability following a non-continuous path. In W. Osten,
|
||||
C. Gorecki, & E. L. Novak (Eds.), Optical Metrology (2005) 32--40,
|
||||
International Society for Optics and Photonics.
|
||||
'''
|
||||
if image.ndim not in (1, 2, 3):
|
||||
raise ValueError('Image must be 1, 2, or 3 dimensional')
|
||||
if isinstance(wrap_around, bool):
|
||||
wrap_around = [wrap_around] * image.ndim
|
||||
elif (hasattr(wrap_around, '__getitem__')
|
||||
and not isinstance(wrap_around, str)):
|
||||
if len(wrap_around) != image.ndim:
|
||||
raise ValueError('Length of `wrap_around` must equal the '
|
||||
'dimensionality of image')
|
||||
wrap_around = [bool(wa) for wa in wrap_around]
|
||||
else:
|
||||
raise ValueError('`wrap_around` must be a bool or a sequence with '
|
||||
'length equal to the dimensionality of image')
|
||||
if image.ndim == 1:
|
||||
if np.ma.isMaskedArray(image):
|
||||
raise ValueError('1D masked images cannot be unwrapped')
|
||||
if wrap_around[0]:
|
||||
raise ValueError('`wrap_around` is not supported for 1D images')
|
||||
if image.ndim in (2, 3) and 1 in image.shape:
|
||||
warn('Image has a length 1 dimension. Consider using an '
|
||||
'array of lower dimensionality to use a more efficient '
|
||||
'algorithm')
|
||||
|
||||
if np.ma.isMaskedArray(image):
|
||||
mask = np.require(np.ma.getmaskarray(image), np.uint8, ['C'])
|
||||
else:
|
||||
mask = np.zeros_like(image, dtype=np.uint8, order='C')
|
||||
|
||||
image_not_masked = np.asarray(
|
||||
np.ma.getdata(image), dtype=np.double, order='C')
|
||||
image_unwrapped = np.empty_like(image, dtype=np.double, order='C',
|
||||
subok=False)
|
||||
|
||||
if image.ndim == 1:
|
||||
unwrap_1d(image_not_masked, image_unwrapped)
|
||||
elif image.ndim == 2:
|
||||
unwrap_2d(image_not_masked, mask, image_unwrapped,
|
||||
wrap_around, seed)
|
||||
elif image.ndim == 3:
|
||||
unwrap_3d(image_not_masked, mask, image_unwrapped,
|
||||
wrap_around, seed)
|
||||
|
||||
if np.ma.isMaskedArray(image):
|
||||
return np.ma.array(image_unwrapped, mask=mask,
|
||||
fill_value=image.fill_value)
|
||||
else:
|
||||
return image_unwrapped
|
Loading…
Add table
Add a link
Reference in a new issue