Fixed database typo and removed unnecessary class identifier.

This commit is contained in:
Batuhan Berk Başoğlu 2020-10-14 10:10:37 -04:00
parent 00ad49a143
commit 45fb349a7d
5098 changed files with 952558 additions and 85 deletions

View file

@ -0,0 +1,9 @@
from ..._shared.testing import setup_test, teardown_test
def setup():
setup_test()
def teardown():
teardown_test()

View file

@ -0,0 +1,886 @@
import itertools
import numpy as np
import pytest
from skimage import restoration, data, color, img_as_float
from skimage.metrics import structural_similarity
from skimage.metrics import peak_signal_noise_ratio
from skimage.restoration._denoise import _wavelet_threshold
import pywt
from skimage._shared import testing
from skimage._shared.testing import (assert_equal, assert_almost_equal,
assert_warns, assert_)
from skimage._shared._warnings import expected_warnings
from distutils.version import LooseVersion as Version
try:
import dask
except ImportError:
DASK_NOT_INSTALLED_WARNING = 'The optional dask dependency is not installed'
else:
DASK_NOT_INSTALLED_WARNING = None
np.random.seed(1234)
astro = img_as_float(data.astronaut()[:128, :128])
astro_gray = color.rgb2gray(astro)
checkerboard_gray = img_as_float(data.checkerboard())
checkerboard = color.gray2rgb(checkerboard_gray)
# versions with one odd-sized dimension
astro_gray_odd = astro_gray[:, :-1]
astro_odd = astro[:, :-1]
def test_denoise_tv_chambolle_2d():
# astronaut image
img = astro_gray.copy()
# add noise to astronaut
img += 0.5 * img.std() * np.random.rand(*img.shape)
# clip noise so that it does not exceed allowed range for float images.
img = np.clip(img, 0, 1)
# denoise
denoised_astro = restoration.denoise_tv_chambolle(img, weight=0.1)
# which dtype?
assert_(denoised_astro.dtype in [np.float, np.float32, np.float64])
from scipy import ndimage as ndi
grad = ndi.morphological_gradient(img, size=((3, 3)))
grad_denoised = ndi.morphological_gradient(denoised_astro, size=((3, 3)))
# test if the total variation has decreased
assert_(grad_denoised.dtype == np.float)
assert_(np.sqrt((grad_denoised**2).sum()) < np.sqrt((grad**2).sum()))
def test_denoise_tv_chambolle_multichannel():
denoised0 = restoration.denoise_tv_chambolle(astro[..., 0], weight=0.1)
denoised = restoration.denoise_tv_chambolle(astro, weight=0.1,
multichannel=True)
assert_equal(denoised[..., 0], denoised0)
# tile astronaut subset to generate 3D+channels data
astro3 = np.tile(astro[:64, :64, np.newaxis, :], [1, 1, 2, 1])
# modify along tiled dimension to give non-zero gradient on 3rd axis
astro3[:, :, 0, :] = 2*astro3[:, :, 0, :]
denoised0 = restoration.denoise_tv_chambolle(astro3[..., 0], weight=0.1)
denoised = restoration.denoise_tv_chambolle(astro3, weight=0.1,
multichannel=True)
assert_equal(denoised[..., 0], denoised0)
def test_denoise_tv_chambolle_float_result_range():
# astronaut image
img = astro_gray
int_astro = np.multiply(img, 255).astype(np.uint8)
assert_(np.max(int_astro) > 1)
denoised_int_astro = restoration.denoise_tv_chambolle(int_astro,
weight=0.1)
# test if the value range of output float data is within [0.0:1.0]
assert_(denoised_int_astro.dtype == np.float)
assert_(np.max(denoised_int_astro) <= 1.0)
assert_(np.min(denoised_int_astro) >= 0.0)
def test_denoise_tv_chambolle_3d():
"""Apply the TV denoising algorithm on a 3D image representing a sphere."""
x, y, z = np.ogrid[0:40, 0:40, 0:40]
mask = (x - 22)**2 + (y - 20)**2 + (z - 17)**2 < 8**2
mask = 100 * mask.astype(np.float)
mask += 60
mask += 20 * np.random.rand(*mask.shape)
mask[mask < 0] = 0
mask[mask > 255] = 255
res = restoration.denoise_tv_chambolle(mask.astype(np.uint8), weight=0.1)
assert_(res.dtype == np.float)
assert_(res.std() * 255 < mask.std())
def test_denoise_tv_chambolle_1d():
"""Apply the TV denoising algorithm on a 1D sinusoid."""
x = 125 + 100*np.sin(np.linspace(0, 8*np.pi, 1000))
x += 20 * np.random.rand(x.size)
x = np.clip(x, 0, 255)
res = restoration.denoise_tv_chambolle(x.astype(np.uint8), weight=0.1)
assert_(res.dtype == np.float)
assert_(res.std() * 255 < x.std())
def test_denoise_tv_chambolle_4d():
""" TV denoising for a 4D input."""
im = 255 * np.random.rand(8, 8, 8, 8)
res = restoration.denoise_tv_chambolle(im.astype(np.uint8), weight=0.1)
assert_(res.dtype == np.float)
assert_(res.std() * 255 < im.std())
def test_denoise_tv_chambolle_weighting():
# make sure a specified weight gives consistent results regardless of
# the number of input image dimensions
rstate = np.random.RandomState(1234)
img2d = astro_gray.copy()
img2d += 0.15 * rstate.standard_normal(img2d.shape)
img2d = np.clip(img2d, 0, 1)
# generate 4D image by tiling
img4d = np.tile(img2d[..., None, None], (1, 1, 2, 2))
w = 0.2
denoised_2d = restoration.denoise_tv_chambolle(img2d, weight=w)
denoised_4d = restoration.denoise_tv_chambolle(img4d, weight=w)
assert_(structural_similarity(denoised_2d,
denoised_4d[:, :, 0, 0]) > 0.99)
def test_denoise_tv_bregman_2d():
img = checkerboard_gray.copy()
# add some random noise
img += 0.5 * img.std() * np.random.rand(*img.shape)
img = np.clip(img, 0, 1)
out1 = restoration.denoise_tv_bregman(img, weight=10)
out2 = restoration.denoise_tv_bregman(img, weight=5)
# make sure noise is reduced in the checkerboard cells
assert_(img[30:45, 5:15].std() > out1[30:45, 5:15].std())
assert_(out1[30:45, 5:15].std() > out2[30:45, 5:15].std())
def test_denoise_tv_bregman_float_result_range():
# astronaut image
img = astro_gray.copy()
int_astro = np.multiply(img, 255).astype(np.uint8)
assert_(np.max(int_astro) > 1)
denoised_int_astro = restoration.denoise_tv_bregman(int_astro, weight=60.0)
# test if the value range of output float data is within [0.0:1.0]
assert_(denoised_int_astro.dtype == np.float)
assert_(np.max(denoised_int_astro) <= 1.0)
assert_(np.min(denoised_int_astro) >= 0.0)
def test_denoise_tv_bregman_3d():
img = checkerboard.copy()
# add some random noise
img += 0.5 * img.std() * np.random.rand(*img.shape)
img = np.clip(img, 0, 1)
out1 = restoration.denoise_tv_bregman(img, weight=10)
out2 = restoration.denoise_tv_bregman(img, weight=5)
# make sure noise is reduced in the checkerboard cells
assert_(img[30:45, 5:15].std() > out1[30:45, 5:15].std())
assert_(out1[30:45, 5:15].std() > out2[30:45, 5:15].std())
def test_denoise_tv_bregman_3d_multichannel():
img_astro = astro.copy()
denoised0 = restoration.denoise_tv_bregman(img_astro[..., 0], weight=60.0)
denoised = restoration.denoise_tv_bregman(img_astro, weight=60.0,
multichannel=True)
assert_equal(denoised0, denoised[..., 0])
def test_denoise_tv_bregman_multichannel():
img = checkerboard_gray.copy()[:50, :50]
# add some random noise
img += 0.5 * img.std() * np.random.rand(*img.shape)
img = np.clip(img, 0, 1)
out1 = restoration.denoise_tv_bregman(img, weight=60.0)
out2 = restoration.denoise_tv_bregman(img, weight=60.0, multichannel=True)
assert_equal(out1, out2)
def test_denoise_bilateral_2d():
img = checkerboard_gray.copy()[:50, :50]
# add some random noise
img += 0.5 * img.std() * np.random.rand(*img.shape)
img = np.clip(img, 0, 1)
out1 = restoration.denoise_bilateral(img, sigma_color=0.1,
sigma_spatial=10, multichannel=False)
out2 = restoration.denoise_bilateral(img, sigma_color=0.2,
sigma_spatial=20, multichannel=False)
# make sure noise is reduced in the checkerboard cells
assert_(img[30:45, 5:15].std() > out1[30:45, 5:15].std())
assert_(out1[30:45, 5:15].std() > out2[30:45, 5:15].std())
def test_denoise_bilateral_pad():
"""This test checks if the bilateral filter is returning an image
correctly padded."""
img = img_as_float(data.chelsea())[100:200, 100:200]
img_bil = restoration.denoise_bilateral(img, sigma_color=0.1,
sigma_spatial=10,
multichannel=True)
condition_padding = np.count_nonzero(np.isclose(img_bil,
0,
atol=0.001))
assert_equal(condition_padding, 0)
@pytest.mark.parametrize('dtype', [np.float32, np.double])
def test_denoise_bilateral_types(dtype):
img = checkerboard_gray.copy()[:50, :50]
# add some random noise
img += 0.5 * img.std() * np.random.rand(*img.shape)
img = np.clip(img, 0, 1).astype(dtype)
# check that we can process multiple float types
out = restoration.denoise_bilateral(img, sigma_color=0.1,
sigma_spatial=10, multichannel=False)
@pytest.mark.parametrize('dtype', [np.float32, np.double])
def test_denoise_bregman_types(dtype):
img = checkerboard_gray.copy()[:50, :50]
# add some random noise
img += 0.5 * img.std() * np.random.rand(*img.shape)
img = np.clip(img, 0, 1).astype(dtype)
# check that we can process multiple float types
out = restoration.denoise_tv_bregman(img, weight=5)
def test_denoise_bilateral_zeros():
img = np.zeros((10, 10))
assert_equal(img, restoration.denoise_bilateral(img, multichannel=False))
def test_denoise_bilateral_constant():
img = np.ones((10, 10)) * 5
assert_equal(img, restoration.denoise_bilateral(img, multichannel=False))
def test_denoise_bilateral_color():
img = checkerboard.copy()[:50, :50]
# add some random noise
img += 0.5 * img.std() * np.random.rand(*img.shape)
img = np.clip(img, 0, 1)
out1 = restoration.denoise_bilateral(img, sigma_color=0.1,
sigma_spatial=10, multichannel=True)
out2 = restoration.denoise_bilateral(img, sigma_color=0.2,
sigma_spatial=20, multichannel=True)
# make sure noise is reduced in the checkerboard cells
assert_(img[30:45, 5:15].std() > out1[30:45, 5:15].std())
assert_(out1[30:45, 5:15].std() > out2[30:45, 5:15].std())
def test_denoise_bilateral_3d_grayscale():
img = np.ones((50, 50, 3))
with testing.raises(ValueError):
restoration.denoise_bilateral(img, multichannel=False)
def test_denoise_bilateral_3d_multichannel():
img = np.ones((50, 50, 50))
with expected_warnings(["grayscale"]):
result = restoration.denoise_bilateral(img, multichannel=True)
assert_equal(result, img)
def test_denoise_bilateral_multidimensional():
img = np.ones((10, 10, 10, 10))
with testing.raises(ValueError):
restoration.denoise_bilateral(img, multichannel=False)
with testing.raises(ValueError):
restoration.denoise_bilateral(img, multichannel=True)
def test_denoise_bilateral_nan():
img = np.full((50, 50), np.NaN)
# This is in fact an optional warning for our test suite.
# Python 3.5 will not trigger a warning.
with expected_warnings([r'invalid|\A\Z']):
out = restoration.denoise_bilateral(img, multichannel=False)
assert_equal(img, out)
@pytest.mark.parametrize('fast_mode', [False, True])
def test_denoise_nl_means_2d(fast_mode):
img = np.zeros((40, 40))
img[10:-10, 10:-10] = 1.
sigma = 0.3
img += sigma * np.random.randn(*img.shape)
img_f32 = img.astype('float32')
for s in [sigma, 0]:
denoised = restoration.denoise_nl_means(img, 7, 5, 0.2,
fast_mode=fast_mode,
multichannel=False,
sigma=s)
# make sure noise is reduced
assert_(img.std() > denoised.std())
denoised_f32 = restoration.denoise_nl_means(img_f32, 7, 5, 0.2,
fast_mode=fast_mode,
multichannel=False,
sigma=s)
# make sure noise is reduced
assert_(img.std() > denoised_f32.std())
# Sheck single precision result
assert np.allclose(denoised_f32, denoised, atol=1e-2)
@pytest.mark.parametrize('fast_mode', [False, True])
@pytest.mark.parametrize('n_channels', [2, 3, 6])
@pytest.mark.parametrize('dtype', ['float64', 'float32'])
def test_denoise_nl_means_2d_multichannel(fast_mode, n_channels, dtype):
# reduce image size because nl means is slow
img = np.copy(astro[:50, :50])
img = np.concatenate((img, ) * 2, ) # 6 channels
img = img.astype(dtype)
# add some random noise
sigma = 0.1
imgn = img + sigma * np.random.standard_normal(img.shape)
imgn = np.clip(imgn, 0, 1)
imgn = imgn.astype(dtype)
for s in [sigma, 0]:
psnr_noisy = peak_signal_noise_ratio(
img[..., :n_channels], imgn[..., :n_channels])
denoised = restoration.denoise_nl_means(imgn[..., :n_channels],
3, 5, h=0.75 * sigma,
fast_mode=fast_mode,
multichannel=True,
sigma=s)
psnr_denoised = peak_signal_noise_ratio(
denoised[..., :n_channels], img[..., :n_channels])
# make sure noise is reduced
assert_(psnr_denoised > psnr_noisy)
@pytest.mark.parametrize('fast_mode', [False, True])
@pytest.mark.parametrize('dtype', ['float64', 'float32'])
def test_denoise_nl_means_3d(fast_mode, dtype):
img = np.zeros((12, 12, 8), dtype=dtype)
img[5:-5, 5:-5, 2:-2] = 1.
sigma = 0.3
imgn = img + sigma * np.random.randn(*img.shape)
imgn = imgn.astype(dtype)
psnr_noisy = peak_signal_noise_ratio(img, imgn)
for s in [sigma, 0]:
denoised = restoration.denoise_nl_means(imgn, 3, 4, h=0.75 * sigma,
fast_mode=fast_mode,
multichannel=False, sigma=s)
# make sure noise is reduced
assert_(peak_signal_noise_ratio(img, denoised) > psnr_noisy)
@pytest.mark.parametrize('fast_mode', [False, True])
@pytest.mark.parametrize('dtype', ['float64', 'float32'])
def test_denoise_nl_means_multichannel(fast_mode, dtype):
# for true 3D data, 3D denoising is better than denoising as 2D+channels
img = np.zeros((13, 10, 8), dtype=dtype)
img[6, 4:6, 2:-2] = 1.
sigma = 0.3
imgn = img + sigma * np.random.randn(*img.shape)
imgn = imgn.astype(dtype)
denoised_wrong_multichannel = restoration.denoise_nl_means(
imgn, 3, 4, 0.6 * sigma, fast_mode=fast_mode, multichannel=True)
denoised_ok_multichannel = restoration.denoise_nl_means(
imgn, 3, 4, 0.6 * sigma, fast_mode=fast_mode, multichannel=False)
psnr_wrong = peak_signal_noise_ratio(img, denoised_wrong_multichannel)
psnr_ok = peak_signal_noise_ratio(img, denoised_ok_multichannel)
assert_(psnr_ok > psnr_wrong)
def test_denoise_nl_means_wrong_dimension():
img = np.zeros((5, 5, 5, 5))
with testing.raises(NotImplementedError):
restoration.denoise_nl_means(img, multichannel=True)
@pytest.mark.parametrize('fast_mode', [False, True])
@pytest.mark.parametrize('dtype', ['float64', 'float32'])
def test_no_denoising_for_small_h(fast_mode, dtype):
img = np.zeros((40, 40))
img[10:-10, 10:-10] = 1.
img += 0.3*np.random.randn(*img.shape)
img = img.astype(dtype)
# very small h should result in no averaging with other patches
denoised = restoration.denoise_nl_means(img, 7, 5, 0.01,
fast_mode=fast_mode,
multichannel=False)
assert_(np.allclose(denoised, img))
denoised = restoration.denoise_nl_means(img, 7, 5, 0.01,
fast_mode=fast_mode,
multichannel=False)
assert_(np.allclose(denoised, img))
@pytest.mark.parametrize('fast_mode', [False, True])
def test_denoise_nl_means_2d_dtype(fast_mode):
img = np.zeros((40, 40), dtype=int)
img_f32 = img.astype('float32')
img_f64 = img.astype('float64')
with expected_warnings(['Image dtype is not float']):
assert restoration.denoise_nl_means(
img, fast_mode=fast_mode).dtype == 'float64'
assert restoration.denoise_nl_means(
img_f32, fast_mode=fast_mode).dtype == img_f32.dtype
assert restoration.denoise_nl_means(
img_f64, fast_mode=fast_mode).dtype == img_f64.dtype
@pytest.mark.parametrize('fast_mode', [False, True])
def test_denoise_nl_means_3d_dtype(fast_mode):
img = np.zeros((12, 12, 8), dtype=int)
img_f32 = img.astype('float32')
img_f64 = img.astype('float64')
with expected_warnings(['Image dtype is not float']):
assert restoration.denoise_nl_means(
img, patch_distance=2, fast_mode=fast_mode).dtype == 'float64'
assert restoration.denoise_nl_means(
img_f32, patch_distance=2, fast_mode=fast_mode).dtype == img_f32.dtype
assert restoration.denoise_nl_means(
img_f64, patch_distance=2, fast_mode=fast_mode).dtype == img_f64.dtype
@pytest.mark.parametrize(
'img, multichannel, convert2ycbcr',
[(astro_gray, False, False),
(astro_gray_odd, False, False),
(astro_odd, True, False),
(astro_odd, True, True)]
)
def test_wavelet_denoising(img, multichannel, convert2ycbcr):
rstate = np.random.RandomState(1234)
sigma = 0.1
noisy = img + sigma * rstate.randn(*(img.shape))
noisy = np.clip(noisy, 0, 1)
# Verify that SNR is improved when true sigma is used
denoised = restoration.denoise_wavelet(noisy, sigma=sigma,
multichannel=multichannel,
convert2ycbcr=convert2ycbcr,
rescale_sigma=True)
psnr_noisy = peak_signal_noise_ratio(img, noisy)
psnr_denoised = peak_signal_noise_ratio(img, denoised)
assert_(psnr_denoised > psnr_noisy)
# Verify that SNR is improved with internally estimated sigma
denoised = restoration.denoise_wavelet(noisy,
multichannel=multichannel,
convert2ycbcr=convert2ycbcr,
rescale_sigma=True)
psnr_noisy = peak_signal_noise_ratio(img, noisy)
psnr_denoised = peak_signal_noise_ratio(img, denoised)
assert_(psnr_denoised > psnr_noisy)
# SNR is improved less with 1 wavelet level than with the default.
denoised_1 = restoration.denoise_wavelet(noisy,
multichannel=multichannel,
wavelet_levels=1,
convert2ycbcr=convert2ycbcr,
rescale_sigma=True)
psnr_denoised_1 = peak_signal_noise_ratio(img, denoised_1)
assert_(psnr_denoised > psnr_denoised_1)
assert_(psnr_denoised_1 > psnr_noisy)
# Test changing noise_std (higher threshold, so less energy in signal)
res1 = restoration.denoise_wavelet(noisy, sigma=2 * sigma,
multichannel=multichannel,
rescale_sigma=True)
res2 = restoration.denoise_wavelet(noisy, sigma=sigma,
multichannel=multichannel,
rescale_sigma=True)
assert_(np.sum(res1**2) <= np.sum(res2**2))
@pytest.mark.parametrize(
'case, dtype, convert2ycbcr, estimate_sigma',
itertools.product(
['1d', '2d multichannel'],
[np.float16, np.float32, np.float64, np.int16, np.uint8],
[True, False],
[True, False])
)
def test_wavelet_denoising_scaling(case, dtype, convert2ycbcr,
estimate_sigma):
"""Test cases for images without prescaling via img_as_float."""
rstate = np.random.RandomState(1234)
if case == '1d':
# 1D single-channel in range [0, 255]
x = np.linspace(0, 255, 1024)
elif case == '2d multichannel':
# 2D multichannel in range [0, 255]
x = data.astronaut()[:64, :64]
x = x.astype(dtype)
# add noise and clip to original signal range
sigma = 25.
noisy = x + sigma * rstate.randn(*x.shape)
noisy = np.clip(noisy, x.min(), x.max())
noisy = noisy.astype(x.dtype)
multichannel = x.shape[-1] == 3
if estimate_sigma:
sigma_est = restoration.estimate_sigma(noisy,
multichannel=multichannel)
else:
sigma_est = None
if convert2ycbcr and not multichannel:
# YCbCr requires multichannel == True
with testing.raises(ValueError):
denoised = restoration.denoise_wavelet(noisy,
sigma=sigma_est,
wavelet='sym4',
multichannel=multichannel,
convert2ycbcr=convert2ycbcr,
rescale_sigma=True)
return
denoised = restoration.denoise_wavelet(noisy, sigma=sigma_est,
wavelet='sym4',
multichannel=multichannel,
convert2ycbcr=convert2ycbcr,
rescale_sigma=True)
data_range = x.max() - x.min()
psnr_noisy = peak_signal_noise_ratio(x, noisy, data_range=data_range)
clipped = np.dtype(dtype).kind != 'f'
if not clipped:
psnr_denoised = peak_signal_noise_ratio(x, denoised,
data_range=data_range)
# output's max value is not substantially smaller than x's
assert_(denoised.max() > 0.9 * x.max())
else:
# have to compare to x_as_float in integer input cases
x_as_float = img_as_float(x)
f_data_range = x_as_float.max() - x_as_float.min()
psnr_denoised = peak_signal_noise_ratio(x_as_float, denoised,
data_range=f_data_range)
# output has been clipped to expected range
assert_(denoised.max() <= 1.0)
if np.dtype(dtype).kind == 'u':
assert_(denoised.min() >= 0)
else:
assert_(denoised.min() >= -1)
assert_(psnr_denoised > psnr_noisy)
def test_wavelet_threshold():
rstate = np.random.RandomState(1234)
img = astro_gray
sigma = 0.1
noisy = img + sigma * rstate.randn(*(img.shape))
noisy = np.clip(noisy, 0, 1)
# employ a single, user-specified threshold instead of BayesShrink sigmas
denoised = _wavelet_threshold(noisy, wavelet='db1', method=None,
threshold=sigma)
psnr_noisy = peak_signal_noise_ratio(img, noisy)
psnr_denoised = peak_signal_noise_ratio(img, denoised)
assert_(psnr_denoised > psnr_noisy)
# either method or threshold must be defined
with testing.raises(ValueError):
_wavelet_threshold(noisy, wavelet='db1', method=None, threshold=None)
# warns if a threshold is provided in a case where it would be ignored
with expected_warnings(["Thresholding method ",]):
_wavelet_threshold(noisy, wavelet='db1', method='BayesShrink',
threshold=sigma)
@pytest.mark.parametrize(
'rescale_sigma, method, ndim',
itertools.product(
[True, False],
['VisuShrink', 'BayesShrink'],
range(1, 5)
)
)
def test_wavelet_denoising_nd(rescale_sigma, method, ndim):
rstate = np.random.RandomState(1234)
# Generate a very simple test image
if ndim < 3:
img = 0.2*np.ones((128, )*ndim)
else:
img = 0.2*np.ones((16, )*ndim)
img[(slice(5, 13), ) * ndim] = 0.8
sigma = 0.1
noisy = img + sigma * rstate.randn(*(img.shape))
noisy = np.clip(noisy, 0, 1)
# Mark H. 2018.08:
# The issue arises because when ndim in [1, 2]
# ``waverecn`` calls ``_match_coeff_dims``
# Which includes a numpy 1.15 deprecation.
# for larger number of dimensions _match_coeff_dims isn't called
# for some reason.
# Verify that SNR is improved with internally estimated sigma
denoised = restoration.denoise_wavelet(
noisy, method=method,
rescale_sigma=rescale_sigma)
psnr_noisy = peak_signal_noise_ratio(img, noisy)
psnr_denoised = peak_signal_noise_ratio(img, denoised)
assert_(psnr_denoised > psnr_noisy)
def test_wavelet_invalid_method():
with testing.raises(ValueError):
restoration.denoise_wavelet(np.ones(16), method='Unimplemented',
rescale_sigma=True)
def test_wavelet_rescale_sigma_deprecation():
# No specifying rescale_sigma results in a DeprecationWarning
assert_warns(FutureWarning, restoration.denoise_wavelet, np.ones(16))
@pytest.mark.parametrize('rescale_sigma', [True, False])
def test_wavelet_denoising_levels(rescale_sigma):
rstate = np.random.RandomState(1234)
ndim = 2
N = 256
wavelet = 'db1'
# Generate a very simple test image
img = 0.2*np.ones((N, )*ndim)
img[(slice(5, 13), ) * ndim] = 0.8
sigma = 0.1
noisy = img + sigma * rstate.randn(*(img.shape))
noisy = np.clip(noisy, 0, 1)
denoised = restoration.denoise_wavelet(noisy, wavelet=wavelet,
rescale_sigma=rescale_sigma)
denoised_1 = restoration.denoise_wavelet(noisy, wavelet=wavelet,
wavelet_levels=1,
rescale_sigma=rescale_sigma)
psnr_noisy = peak_signal_noise_ratio(img, noisy)
psnr_denoised = peak_signal_noise_ratio(img, denoised)
psnr_denoised_1 = peak_signal_noise_ratio(img, denoised_1)
# multi-level case should outperform single level case
assert_(psnr_denoised > psnr_denoised_1 > psnr_noisy)
# invalid number of wavelet levels results in a ValueError or UserWarning
max_level = pywt.dwt_max_level(np.min(img.shape),
pywt.Wavelet(wavelet).dec_len)
# exceeding max_level raises a UserWarning in PyWavelets >= 1.0.0
with expected_warnings([
'all coefficients will experience boundary effects']):
restoration.denoise_wavelet(
noisy, wavelet=wavelet, wavelet_levels=max_level + 1,
rescale_sigma=rescale_sigma)
with testing.raises(ValueError):
restoration.denoise_wavelet(
noisy,
wavelet=wavelet, wavelet_levels=-1,
rescale_sigma=rescale_sigma)
def test_estimate_sigma_gray():
rstate = np.random.RandomState(1234)
# astronaut image
img = astro_gray.copy()
sigma = 0.1
# add noise to astronaut
img += sigma * rstate.standard_normal(img.shape)
sigma_est = restoration.estimate_sigma(img, multichannel=False)
assert_almost_equal(sigma, sigma_est, decimal=2)
def test_estimate_sigma_masked_image():
# Verify computation on an image with a large, noise-free border.
# (zero regions will be masked out by _sigma_est_dwt to avoid returning
# sigma = 0)
rstate = np.random.RandomState(1234)
# uniform image
img = np.zeros((128, 128))
center_roi = (slice(32, 96), slice(32, 96))
img[center_roi] = 0.8
sigma = 0.1
img[center_roi] = sigma * rstate.standard_normal(img[center_roi].shape)
sigma_est = restoration.estimate_sigma(img, multichannel=False)
assert_almost_equal(sigma, sigma_est, decimal=1)
def test_estimate_sigma_color():
rstate = np.random.RandomState(1234)
# astronaut image
img = astro.copy()
sigma = 0.1
# add noise to astronaut
img += sigma * rstate.standard_normal(img.shape)
sigma_est = restoration.estimate_sigma(img, multichannel=True,
average_sigmas=True)
assert_almost_equal(sigma, sigma_est, decimal=2)
sigma_list = restoration.estimate_sigma(img, multichannel=True,
average_sigmas=False)
assert_equal(len(sigma_list), img.shape[-1])
assert_almost_equal(sigma_list[0], sigma_est, decimal=2)
# default multichannel=False should raise a warning about last axis size
assert_warns(UserWarning, restoration.estimate_sigma, img)
@pytest.mark.parametrize('rescale_sigma', [True, False])
def test_wavelet_denoising_args(rescale_sigma):
"""
Some of the functions inside wavelet denoising throw an error the wrong
arguments are passed. This protects against that and verifies that all
arguments can be passed.
"""
img = astro
noisy = img.copy() + 0.1 * np.random.randn(*(img.shape))
for convert2ycbcr in [True, False]:
for multichannel in [True, False]:
if convert2ycbcr and not multichannel:
with testing.raises(ValueError):
restoration.denoise_wavelet(noisy,
convert2ycbcr=convert2ycbcr,
multichannel=multichannel,
rescale_sigma=rescale_sigma)
continue
for sigma in [0.1, [0.1, 0.1, 0.1], None]:
if (not multichannel and not convert2ycbcr) or \
(isinstance(sigma, list) and not multichannel):
continue
restoration.denoise_wavelet(noisy, sigma=sigma,
convert2ycbcr=convert2ycbcr,
multichannel=multichannel,
rescale_sigma=rescale_sigma)
@pytest.mark.parametrize('rescale_sigma', [True, False])
def test_denoise_wavelet_biorthogonal(rescale_sigma):
"""Biorthogonal wavelets should raise a warning during thresholding."""
img = astro_gray
assert_warns(UserWarning, restoration.denoise_wavelet, img,
wavelet='bior2.2', multichannel=False,
rescale_sigma=rescale_sigma)
@pytest.mark.parametrize('rescale_sigma', [True, False])
def test_cycle_spinning_multichannel(rescale_sigma):
sigma = 0.1
rstate = np.random.RandomState(1234)
for multichannel in True, False:
if multichannel:
img = astro
# can either omit or be 0 along the channels axis
valid_shifts = [1, (0, 1), (1, 0), (1, 1), (1, 1, 0)]
# can either omit or be 1 on channels axis.
valid_steps = [1, 2, (1, 2), (1, 2, 1)]
# too few or too many shifts or non-zero shift on channels
invalid_shifts = [(1, 1, 2), (1, ), (1, 1, 0, 1)]
# too few or too many shifts or any shifts <= 0
invalid_steps = [(1, ), (1, 1, 1, 1), (0, 1), (-1, -1)]
else:
img = astro_gray
valid_shifts = [1, (0, 1), (1, 0), (1, 1)]
valid_steps = [1, 2, (1, 2)]
invalid_shifts = [(1, 1, 2), (1, )]
invalid_steps = [(1, ), (1, 1, 1), (0, 1), (-1, -1)]
noisy = img.copy() + 0.1 * rstate.randn(*(img.shape))
denoise_func = restoration.denoise_wavelet
func_kw = dict(sigma=sigma, multichannel=multichannel,
rescale_sigma=rescale_sigma)
# max_shifts=0 is equivalent to just calling denoise_func
with expected_warnings([DASK_NOT_INSTALLED_WARNING]):
dn_cc = restoration.cycle_spin(noisy, denoise_func, max_shifts=0,
func_kw=func_kw,
multichannel=multichannel)
dn = denoise_func(noisy, **func_kw)
assert_equal(dn, dn_cc)
# denoising with cycle spinning will give better PSNR than without
for max_shifts in valid_shifts:
with expected_warnings([DASK_NOT_INSTALLED_WARNING]):
dn_cc = restoration.cycle_spin(noisy, denoise_func,
max_shifts=max_shifts,
func_kw=func_kw,
multichannel=multichannel)
psnr = peak_signal_noise_ratio(img, dn)
psnr_cc = peak_signal_noise_ratio(img, dn_cc)
assert_(psnr_cc > psnr)
for shift_steps in valid_steps:
with expected_warnings([DASK_NOT_INSTALLED_WARNING]):
dn_cc = restoration.cycle_spin(noisy, denoise_func,
max_shifts=2,
shift_steps=shift_steps,
func_kw=func_kw,
multichannel=multichannel)
psnr = peak_signal_noise_ratio(img, dn)
psnr_cc = peak_signal_noise_ratio(img, dn_cc)
assert_(psnr_cc > psnr)
for max_shifts in invalid_shifts:
with testing.raises(ValueError):
dn_cc = restoration.cycle_spin(noisy, denoise_func,
max_shifts=max_shifts,
func_kw=func_kw,
multichannel=multichannel)
for shift_steps in invalid_steps:
with testing.raises(ValueError):
dn_cc = restoration.cycle_spin(noisy, denoise_func,
max_shifts=2,
shift_steps=shift_steps,
func_kw=func_kw,
multichannel=multichannel)
def test_cycle_spinning_num_workers():
img = astro_gray
sigma = 0.1
rstate = np.random.RandomState(1234)
noisy = img.copy() + 0.1 * rstate.randn(*(img.shape))
denoise_func = restoration.denoise_wavelet
func_kw = dict(sigma=sigma, multichannel=True, rescale_sigma=True)
# same results are expected whether using 1 worker or multiple workers
dn_cc1 = restoration.cycle_spin(noisy, denoise_func, max_shifts=1,
func_kw=func_kw, multichannel=False,
num_workers=1)
with expected_warnings([DASK_NOT_INSTALLED_WARNING,]):
dn_cc2 = restoration.cycle_spin(noisy, denoise_func, max_shifts=1,
func_kw=func_kw, multichannel=False,
num_workers=4)
dn_cc3 = restoration.cycle_spin(noisy, denoise_func, max_shifts=1,
func_kw=func_kw, multichannel=False,
num_workers=None)
assert_almost_equal(dn_cc1, dn_cc2)
assert_almost_equal(dn_cc1, dn_cc3)
if __name__ == "__main__":
testing.run_module_suite()

View file

@ -0,0 +1,65 @@
import numpy as np
from skimage.restoration import inpaint
from skimage._shared import testing
from skimage._shared.testing import assert_allclose
def test_inpaint_biharmonic_2d():
img = np.tile(np.square(np.linspace(0, 1, 5)), (5, 1))
mask = np.zeros_like(img)
mask[2, 2:] = 1
mask[1, 3:] = 1
mask[0, 4:] = 1
img[np.where(mask)] = 0
out = inpaint.inpaint_biharmonic(img, mask)
ref = np.array(
[[0., 0.0625, 0.25000000, 0.5625000, 0.73925058],
[0., 0.0625, 0.25000000, 0.5478048, 0.76557821],
[0., 0.0625, 0.25842878, 0.5623079, 0.85927796],
[0., 0.0625, 0.25000000, 0.5625000, 1.00000000],
[0., 0.0625, 0.25000000, 0.5625000, 1.00000000]]
)
assert_allclose(ref, out)
def test_inpaint_biharmonic_3d():
img = np.tile(np.square(np.linspace(0, 1, 5)), (5, 1))
img = np.dstack((img, img.T))
mask = np.zeros_like(img)
mask[2, 2:, :] = 1
mask[1, 3:, :] = 1
mask[0, 4:, :] = 1
img[np.where(mask)] = 0
out = inpaint.inpaint_biharmonic(img, mask)
ref = np.dstack((
np.array(
[[0.0000, 0.0625, 0.25000000, 0.56250000, 0.53752796],
[0.0000, 0.0625, 0.25000000, 0.44443780, 0.53762210],
[0.0000, 0.0625, 0.23693666, 0.46621112, 0.68615592],
[0.0000, 0.0625, 0.25000000, 0.56250000, 1.00000000],
[0.0000, 0.0625, 0.25000000, 0.56250000, 1.00000000]]),
np.array(
[[0.0000, 0.0000, 0.00000000, 0.00000000, 0.19621902],
[0.0625, 0.0625, 0.06250000, 0.17470756, 0.30140091],
[0.2500, 0.2500, 0.27241289, 0.35155440, 0.43068654],
[0.5625, 0.5625, 0.56250000, 0.56250000, 0.56250000],
[1.0000, 1.0000, 1.00000000, 1.00000000, 1.00000000]])
))
assert_allclose(ref, out)
def test_invalid_input():
img, mask = np.zeros([]), np.zeros([])
with testing.raises(ValueError):
inpaint.inpaint_biharmonic(img, mask)
img, mask = np.zeros((2, 2)), np.zeros((4, 1))
with testing.raises(ValueError):
inpaint.inpaint_biharmonic(img, mask)
img = np.ma.array(np.zeros((2, 2)), mask=[[0, 0], [0, 0]])
mask = np.zeros((2, 2))
with testing.raises(TypeError):
inpaint.inpaint_biharmonic(img, mask)

View file

@ -0,0 +1,89 @@
import functools
import numpy as np
from skimage._shared.testing import assert_
from skimage.data import binary_blobs
from skimage.data import camera, chelsea
from skimage.metrics import mean_squared_error as mse
from skimage.restoration import (calibrate_denoiser,
denoise_wavelet)
from skimage.restoration.j_invariant import _invariant_denoise
from skimage.util import img_as_float, random_noise
test_img = img_as_float(camera())
test_img_color = img_as_float(chelsea())
test_img_3d = img_as_float(binary_blobs(64, n_dim=3)) / 2
noisy_img = random_noise(test_img, mode='gaussian', var=0.01)
noisy_img_color = random_noise(test_img_color, mode='gaussian', var=0.01)
noisy_img_3d = random_noise(test_img_3d, mode='gaussian', var=0.1)
_denoise_wavelet = functools.partial(denoise_wavelet, rescale_sigma=True)
def test_invariant_denoise():
denoised_img = _invariant_denoise(noisy_img, _denoise_wavelet)
denoised_mse = mse(denoised_img, test_img)
original_mse = mse(noisy_img, test_img)
assert_(denoised_mse < original_mse)
def test_invariant_denoise_color():
denoised_img_color = _invariant_denoise(
noisy_img_color, _denoise_wavelet,
denoiser_kwargs=dict(multichannel=True))
denoised_mse = mse(denoised_img_color, test_img_color)
original_mse = mse(noisy_img_color, test_img_color)
assert_(denoised_mse < original_mse)
def test_invariant_denoise_3d():
denoised_img_3d = _invariant_denoise(noisy_img_3d, _denoise_wavelet)
denoised_mse = mse(denoised_img_3d, test_img_3d)
original_mse = mse(noisy_img_3d, test_img_3d)
assert_(denoised_mse < original_mse)
def test_calibrate_denoiser_extra_output():
parameter_ranges = {'sigma': np.linspace(0.1, 1, 5) / 2}
_, (parameters_tested, losses) = calibrate_denoiser(
noisy_img,
_denoise_wavelet,
denoise_parameters=parameter_ranges,
extra_output=True
)
all_denoised = [_invariant_denoise(noisy_img, _denoise_wavelet,
denoiser_kwargs=denoiser_kwargs)
for denoiser_kwargs in parameters_tested]
ground_truth_losses = [mse(img, test_img) for img in all_denoised]
assert_(np.argmin(losses) == np.argmin(ground_truth_losses))
def test_calibrate_denoiser():
parameter_ranges = {'sigma': np.linspace(0.1, 1, 5) / 2}
denoiser = calibrate_denoiser(noisy_img, _denoise_wavelet,
denoise_parameters=parameter_ranges)
denoised_mse = mse(denoiser(noisy_img), test_img)
original_mse = mse(noisy_img, test_img)
assert_(denoised_mse < original_mse)
def test_input_image_not_modified():
input_image = noisy_img.copy()
parameter_ranges = {'sigma': np.random.random(5) / 2}
calibrate_denoiser(input_image, _denoise_wavelet,
denoise_parameters=parameter_ranges)
assert_(np.all(noisy_img == input_image))
if __name__ == '__main__':
from numpy import testing
testing.run_module_suite()

View file

@ -0,0 +1,92 @@
import numpy as np
from scipy.signal import convolve2d
from scipy import ndimage as ndi
from skimage._shared.testing import fetch
import skimage
from skimage.data import camera
from skimage import restoration
from skimage.restoration import uft
test_img = skimage.img_as_float(camera())
def test_wiener():
psf = np.ones((5, 5)) / 25
data = convolve2d(test_img, psf, 'same')
np.random.seed(0)
data += 0.1 * data.std() * np.random.standard_normal(data.shape)
deconvolved = restoration.wiener(data, psf, 0.05)
path = fetch('restoration/tests/camera_wiener.npy')
np.testing.assert_allclose(deconvolved, np.load(path), rtol=1e-3)
_, laplacian = uft.laplacian(2, data.shape)
otf = uft.ir2tf(psf, data.shape, is_real=False)
deconvolved = restoration.wiener(data, otf, 0.05,
reg=laplacian,
is_real=False)
np.testing.assert_allclose(np.real(deconvolved),
np.load(path),
rtol=1e-3)
def test_unsupervised_wiener():
psf = np.ones((5, 5)) / 25
data = convolve2d(test_img, psf, 'same')
np.random.seed(0)
data += 0.1 * data.std() * np.random.standard_normal(data.shape)
deconvolved, _ = restoration.unsupervised_wiener(data, psf)
path = fetch('restoration/tests/camera_unsup.npy')
np.testing.assert_allclose(deconvolved, np.load(path), rtol=1e-3)
_, laplacian = uft.laplacian(2, data.shape)
otf = uft.ir2tf(psf, data.shape, is_real=False)
np.random.seed(0)
deconvolved = restoration.unsupervised_wiener(
data, otf, reg=laplacian, is_real=False,
user_params={"callback": lambda x: None})[0]
path = fetch('restoration/tests/camera_unsup2.npy')
np.testing.assert_allclose(np.real(deconvolved),
np.load(path),
rtol=1e-3)
def test_image_shape():
"""Test that shape of output image in deconvolution is same as input.
This addresses issue #1172.
"""
point = np.zeros((5, 5), np.float)
point[2, 2] = 1.
psf = ndi.gaussian_filter(point, sigma=1.)
# image shape: (45, 45), as reported in #1172
image = skimage.img_as_float(camera()[110:155, 225:270]) # just the face
image_conv = ndi.convolve(image, psf)
deconv_sup = restoration.wiener(image_conv, psf, 1)
deconv_un = restoration.unsupervised_wiener(image_conv, psf)[0]
# test the shape
np.testing.assert_equal(image.shape, deconv_sup.shape)
np.testing.assert_equal(image.shape, deconv_un.shape)
# test the reconstruction error
sup_relative_error = np.abs(deconv_sup - image) / image
un_relative_error = np.abs(deconv_un - image) / image
np.testing.assert_array_less(np.median(sup_relative_error), 0.1)
np.testing.assert_array_less(np.median(un_relative_error), 0.1)
def test_richardson_lucy():
psf = np.ones((5, 5)) / 25
data = convolve2d(test_img, psf, 'same')
np.random.seed(0)
data += 0.1 * data.std() * np.random.standard_normal(data.shape)
deconvolved = restoration.richardson_lucy(data, psf, 5)
path = fetch('restoration/tests/camera_rl.npy')
np.testing.assert_allclose(deconvolved, np.load(path), rtol=1e-3)
if __name__ == '__main__':
from numpy import testing
testing.run_module_suite()

View file

@ -0,0 +1,219 @@
import numpy as np
from skimage.restoration import unwrap_phase
import sys
import warnings
from skimage._shared import testing
from skimage._shared.testing import (assert_array_almost_equal_nulp,
assert_almost_equal, assert_array_equal,
assert_, skipif)
from skimage._shared._warnings import expected_warnings
def assert_phase_almost_equal(a, b, *args, **kwargs):
"""An assert_almost_equal insensitive to phase shifts of n*2*pi."""
shift = 2 * np.pi * np.round((b.mean() - a.mean()) / (2 * np.pi))
with expected_warnings([r'invalid value encountered|\A\Z',
r'divide by zero encountered|\A\Z']):
print('assert_phase_allclose, abs', np.max(np.abs(a - (b - shift))))
print('assert_phase_allclose, rel',
np.max(np.abs((a - (b - shift)) / a)))
if np.ma.isMaskedArray(a):
assert_(np.ma.isMaskedArray(b))
assert_array_equal(a.mask, b.mask)
assert_(a.fill_value == b.fill_value)
au = np.asarray(a)
bu = np.asarray(b)
with expected_warnings([r'invalid value encountered|\A\Z',
r'divide by zero encountered|\A\Z']):
print('assert_phase_allclose, no mask, abs',
np.max(np.abs(au - (bu - shift))))
print('assert_phase_allclose, no mask, rel',
np.max(np.abs((au - (bu - shift)) / au)))
assert_array_almost_equal_nulp(a + shift, b, *args, **kwargs)
def check_unwrap(image, mask=None):
image_wrapped = np.angle(np.exp(1j * image))
if mask is not None:
print('Testing a masked image')
image = np.ma.array(image, mask=mask, fill_value=0.5)
image_wrapped = np.ma.array(image_wrapped, mask=mask, fill_value=0.5)
image_unwrapped = unwrap_phase(image_wrapped, seed=0)
assert_phase_almost_equal(image_unwrapped, image)
def test_unwrap_1d():
image = np.linspace(0, 10 * np.pi, 100)
check_unwrap(image)
# Masked arrays are not allowed in 1D
with testing.raises(ValueError):
check_unwrap(image, True)
# wrap_around is not allowed in 1D
with testing.raises(ValueError):
unwrap_phase(image, True, seed=0)
@testing.parametrize("check_with_mask", (False, True))
def test_unwrap_2d(check_with_mask):
mask = None
x, y = np.ogrid[:8, :16]
image = 2 * np.pi * (x * 0.2 + y * 0.1)
if check_with_mask:
mask = np.zeros(image.shape, dtype=np.bool)
mask[4:6, 4:8] = True
check_unwrap(image, mask)
@testing.parametrize("check_with_mask", (False, True))
def test_unwrap_3d(check_with_mask):
mask = None
x, y, z = np.ogrid[:8, :12, :16]
image = 2 * np.pi * (x * 0.2 + y * 0.1 + z * 0.05)
if check_with_mask:
mask = np.zeros(image.shape, dtype=np.bool)
mask[4:6, 4:6, 1:3] = True
check_unwrap(image, mask)
def check_wrap_around(ndim, axis):
# create a ramp, but with the last pixel along axis equalling the first
elements = 100
ramp = np.linspace(0, 12 * np.pi, elements)
ramp[-1] = ramp[0]
image = ramp.reshape(tuple([elements if n == axis else 1
for n in range(ndim)]))
image_wrapped = np.angle(np.exp(1j * image))
index_first = tuple([0] * ndim)
index_last = tuple([-1 if n == axis else 0 for n in range(ndim)])
# unwrap the image without wrap around
# We do not want warnings about length 1 dimensions
with expected_warnings([r'Image has a length 1 dimension|\A\Z']):
image_unwrap_no_wrap_around = unwrap_phase(image_wrapped, seed=0)
print('endpoints without wrap_around:',
image_unwrap_no_wrap_around[index_first],
image_unwrap_no_wrap_around[index_last])
# without wrap around, the endpoints of the image should differ
assert_(abs(image_unwrap_no_wrap_around[index_first] -
image_unwrap_no_wrap_around[index_last]) > np.pi)
# unwrap the image with wrap around
wrap_around = [n == axis for n in range(ndim)]
# We do not want warnings about length 1 dimensions
with expected_warnings([r'Image has a length 1 dimension.|\A\Z']):
image_unwrap_wrap_around = unwrap_phase(image_wrapped, wrap_around,
seed=0)
print('endpoints with wrap_around:',
image_unwrap_wrap_around[index_first],
image_unwrap_wrap_around[index_last])
# with wrap around, the endpoints of the image should be equal
assert_almost_equal(image_unwrap_wrap_around[index_first],
image_unwrap_wrap_around[index_last])
dim_axis = [(ndim, axis) for ndim in (2, 3) for axis in range(ndim)]
@skipif(sys.version_info[:2] == (3, 4),
reason="Doesn't work with python 3.4. See issue #3079")
@testing.parametrize("ndim, axis", dim_axis)
def test_wrap_around(ndim, axis):
check_wrap_around(ndim, axis)
def test_mask():
length = 100
ramps = [np.linspace(0, 4 * np.pi, length),
np.linspace(0, 8 * np.pi, length),
np.linspace(0, 6 * np.pi, length)]
image = np.vstack(ramps)
mask_1d = np.ones((length,), dtype=np.bool)
mask_1d[0] = mask_1d[-1] = False
for i in range(len(ramps)):
# mask all ramps but the i'th one
mask = np.zeros(image.shape, dtype=np.bool)
mask |= mask_1d.reshape(1, -1)
mask[i, :] = False # unmask i'th ramp
image_wrapped = np.ma.array(np.angle(np.exp(1j * image)), mask=mask)
image_unwrapped = unwrap_phase(image_wrapped)
image_unwrapped -= image_unwrapped[0, 0] # remove phase shift
# The end of the unwrapped array should have value equal to the
# endpoint of the unmasked ramp
assert_array_almost_equal_nulp(image_unwrapped[:, -1], image[i, -1])
assert_(np.ma.isMaskedArray(image_unwrapped))
# Same tests, but forcing use of the 3D unwrapper by reshaping
with expected_warnings(['length 1 dimension']):
shape = (1,) + image_wrapped.shape
image_wrapped_3d = image_wrapped.reshape(shape)
image_unwrapped_3d = unwrap_phase(image_wrapped_3d)
# remove phase shift
image_unwrapped_3d -= image_unwrapped_3d[0, 0, 0]
assert_array_almost_equal_nulp(image_unwrapped_3d[:, :, -1],
image[i, -1])
def test_invalid_input():
with testing.raises(ValueError):
unwrap_phase(np.zeros([]))
with testing.raises(ValueError):
unwrap_phase(np.zeros((1, 1, 1, 1)))
with testing.raises(ValueError):
unwrap_phase(np.zeros((1, 1)), 3 * [False])
with testing.raises(ValueError):
unwrap_phase(np.zeros((1, 1)), 'False')
def test_unwrap_3d_middle_wrap_around():
# Segmentation fault in 3D unwrap phase with middle dimension connected
# GitHub issue #1171
image = np.zeros((20, 30, 40), dtype=np.float32)
unwrap = unwrap_phase(image, wrap_around=[False, True, False])
assert_(np.all(unwrap == 0))
def test_unwrap_2d_compressed_mask():
# ValueError when image is masked array with a compressed mask (no masked
# elements). GitHub issue #1346
image = np.ma.zeros((10, 10))
unwrap = unwrap_phase(image)
assert_(np.all(unwrap == 0))
def test_unwrap_2d_all_masked():
# Segmentation fault when image is masked array with a all elements masked
# GitHub issue #1347
# all elements masked
image = np.ma.zeros((10, 10))
image[:] = np.ma.masked
unwrap = unwrap_phase(image)
assert_(np.ma.isMaskedArray(unwrap))
assert_(np.all(unwrap.mask))
# 1 unmasked element, still zero edges
image = np.ma.zeros((10, 10))
image[:] = np.ma.masked
image[0, 0] = 0
unwrap = unwrap_phase(image)
assert_(np.ma.isMaskedArray(unwrap))
assert_(np.sum(unwrap.mask) == 99) # all but one masked
assert_(unwrap[0, 0] == 0)
def test_unwrap_3d_all_masked():
# all elements masked
image = np.ma.zeros((10, 10, 10))
image[:] = np.ma.masked
unwrap = unwrap_phase(image)
assert_(np.ma.isMaskedArray(unwrap))
assert_(np.all(unwrap.mask))
# 1 unmasked element, still zero edges
image = np.ma.zeros((10, 10, 10))
image[:] = np.ma.masked
image[0, 0, 0] = 0
unwrap = unwrap_phase(image)
assert_(np.ma.isMaskedArray(unwrap))
assert_(np.sum(unwrap.mask) == 999) # all but one masked
assert_(unwrap[0, 0, 0] == 0)