Fixed database typo and removed unnecessary class identifier.
This commit is contained in:
parent
00ad49a143
commit
45fb349a7d
5098 changed files with 952558 additions and 85 deletions
|
@ -0,0 +1,9 @@
|
|||
from ..._shared.testing import setup_test, teardown_test
|
||||
|
||||
|
||||
def setup():
|
||||
setup_test()
|
||||
|
||||
|
||||
def teardown():
|
||||
teardown_test()
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,146 @@
|
|||
import numpy as np
|
||||
from skimage import data
|
||||
from skimage.color import rgb2gray
|
||||
from skimage.filters import gaussian
|
||||
from skimage.segmentation import active_contour
|
||||
|
||||
from skimage._shared import testing
|
||||
from skimage._shared.testing import assert_equal, assert_allclose
|
||||
from skimage._shared._warnings import expected_warnings
|
||||
|
||||
|
||||
def test_periodic_reference():
|
||||
img = data.astronaut()
|
||||
img = rgb2gray(img)
|
||||
s = np.linspace(0, 2*np.pi, 400)
|
||||
r = 100 + 100*np.sin(s)
|
||||
c = 220 + 100*np.cos(s)
|
||||
init = np.array([r, c]).T
|
||||
snake = active_contour(gaussian(img, 3), init, alpha=0.015, beta=10,
|
||||
w_line=0, w_edge=1, gamma=0.001, coordinates='rc')
|
||||
refr = [98, 99, 100, 101, 102, 103, 104, 105, 106, 108]
|
||||
refc = [299, 298, 298, 298, 298, 297, 297, 296, 296, 295]
|
||||
assert_equal(np.array(snake[:10, 0], dtype=np.int32), refr)
|
||||
assert_equal(np.array(snake[:10, 1], dtype=np.int32), refc)
|
||||
|
||||
|
||||
def test_fixed_reference():
|
||||
img = data.text()
|
||||
r = np.linspace(136, 50, 100)
|
||||
c = np.linspace(5, 424, 100)
|
||||
init = np.array([r, c]).T
|
||||
snake = active_contour(gaussian(img, 1), init, boundary_condition='fixed',
|
||||
alpha=0.1, beta=1.0, w_line=-5, w_edge=0, gamma=0.1,
|
||||
coordinates='rc')
|
||||
refr = [136, 135, 134, 133, 132, 131, 129, 128, 127, 125]
|
||||
refc = [5, 9, 13, 17, 21, 25, 30, 34, 38, 42]
|
||||
assert_equal(np.array(snake[:10, 0], dtype=np.int32), refr)
|
||||
assert_equal(np.array(snake[:10, 1], dtype=np.int32), refc)
|
||||
|
||||
|
||||
def test_free_reference():
|
||||
img = data.text()
|
||||
r = np.linspace(70, 40, 100)
|
||||
c = np.linspace(5, 424, 100)
|
||||
init = np.array([r, c]).T
|
||||
snake = active_contour(gaussian(img, 3), init, boundary_condition='free',
|
||||
alpha=0.1, beta=1.0, w_line=-5, w_edge=0, gamma=0.1,
|
||||
coordinates='rc')
|
||||
refr = [76, 76, 75, 74, 73, 72, 71, 70, 69, 69]
|
||||
refc = [10, 13, 16, 19, 23, 26, 29, 32, 36, 39]
|
||||
assert_equal(np.array(snake[:10, 0], dtype=np.int32), refr)
|
||||
assert_equal(np.array(snake[:10, 1], dtype=np.int32), refc)
|
||||
|
||||
|
||||
def test_RGB():
|
||||
img = gaussian(data.text(), 1)
|
||||
imgR = np.zeros((img.shape[0], img.shape[1], 3))
|
||||
imgG = np.zeros((img.shape[0], img.shape[1], 3))
|
||||
imgRGB = np.zeros((img.shape[0], img.shape[1], 3))
|
||||
imgR[:, :, 0] = img
|
||||
imgG[:, :, 1] = img
|
||||
imgRGB[:, :, :] = img[:, :, None]
|
||||
r = np.linspace(136, 50, 100)
|
||||
c = np.linspace(5, 424, 100)
|
||||
init = np.array([r, c]).T
|
||||
snake = active_contour(imgR, init, boundary_condition='fixed',
|
||||
alpha=0.1, beta=1.0, w_line=-5, w_edge=0, gamma=0.1,
|
||||
coordinates='rc')
|
||||
refr = [136, 135, 134, 133, 132, 131, 129, 128, 127, 125]
|
||||
refc = [5, 9, 13, 17, 21, 25, 30, 34, 38, 42]
|
||||
assert_equal(np.array(snake[:10, 0], dtype=np.int32), refr)
|
||||
assert_equal(np.array(snake[:10, 1], dtype=np.int32), refc)
|
||||
snake = active_contour(imgG, init, boundary_condition='fixed',
|
||||
alpha=0.1, beta=1.0, w_line=-5, w_edge=0, gamma=0.1,
|
||||
coordinates='rc')
|
||||
assert_equal(np.array(snake[:10, 0], dtype=np.int32), refr)
|
||||
assert_equal(np.array(snake[:10, 1], dtype=np.int32), refc)
|
||||
snake = active_contour(imgRGB, init, boundary_condition='fixed',
|
||||
alpha=0.1, beta=1.0, w_line=-5/3., w_edge=0,
|
||||
gamma=0.1, coordinates='rc')
|
||||
assert_equal(np.array(snake[:10, 0], dtype=np.int32), refr)
|
||||
assert_equal(np.array(snake[:10, 1], dtype=np.int32), refc)
|
||||
|
||||
|
||||
def test_end_points():
|
||||
img = data.astronaut()
|
||||
img = rgb2gray(img)
|
||||
s = np.linspace(0, 2*np.pi, 400)
|
||||
r = 100 + 100*np.sin(s)
|
||||
c = 220 + 100*np.cos(s)
|
||||
init = np.array([r, c]).T
|
||||
snake = active_contour(gaussian(img, 3), init,
|
||||
boundary_condition='periodic', alpha=0.015, beta=10,
|
||||
w_line=0, w_edge=1, gamma=0.001, max_iterations=100,
|
||||
coordinates='rc')
|
||||
assert np.sum(np.abs(snake[0, :]-snake[-1, :])) < 2
|
||||
snake = active_contour(gaussian(img, 3), init,
|
||||
boundary_condition='free', alpha=0.015, beta=10,
|
||||
w_line=0, w_edge=1, gamma=0.001, max_iterations=100,
|
||||
coordinates='rc')
|
||||
assert np.sum(np.abs(snake[0, :]-snake[-1, :])) > 2
|
||||
snake = active_contour(gaussian(img, 3), init,
|
||||
boundary_condition='fixed', alpha=0.015, beta=10,
|
||||
w_line=0, w_edge=1, gamma=0.001, max_iterations=100,
|
||||
coordinates='rc')
|
||||
assert_allclose(snake[0, :], [r[0], c[0]], atol=1e-5)
|
||||
|
||||
|
||||
def test_bad_input():
|
||||
img = np.zeros((10, 10))
|
||||
r = np.linspace(136, 50, 100)
|
||||
c = np.linspace(5, 424, 100)
|
||||
init = np.array([r, c]).T
|
||||
with testing.raises(ValueError):
|
||||
active_contour(img, init, boundary_condition='wrong',
|
||||
coordinates='rc')
|
||||
with testing.raises(ValueError):
|
||||
active_contour(img, init, max_iterations=-15,
|
||||
coordinates='rc')
|
||||
|
||||
|
||||
def test_bc_deprecation():
|
||||
with expected_warnings(['boundary_condition']):
|
||||
img = rgb2gray(data.astronaut())
|
||||
s = np.linspace(0, 2*np.pi, 400)
|
||||
r = 100 + 100*np.sin(s)
|
||||
c = 220 + 100*np.cos(s)
|
||||
init = np.array([r, c]).T
|
||||
snake = active_contour(gaussian(img, 3), init,
|
||||
bc='periodic', alpha=0.015, beta=10,
|
||||
w_line=0, w_edge=1, gamma=0.001,
|
||||
max_iterations=100, coordinates='rc')
|
||||
|
||||
|
||||
def test_xy_coord_warning():
|
||||
# this should raise ValueError after 0.18.
|
||||
with expected_warnings(['xy coordinates']):
|
||||
img = rgb2gray(data.astronaut())
|
||||
s = np.linspace(0, 2*np.pi, 400)
|
||||
x = 100 + 100*np.sin(s)
|
||||
y = 220 + 100*np.cos(s)
|
||||
init = np.array([x, y]).T
|
||||
snake = active_contour(gaussian(img, 3), init,
|
||||
boundary_condition='periodic', alpha=0.015,
|
||||
beta=10, w_line=0, w_edge=1, gamma=0.001,
|
||||
max_iterations=100)
|
|
@ -0,0 +1,120 @@
|
|||
import numpy as np
|
||||
from skimage.segmentation import find_boundaries, mark_boundaries
|
||||
|
||||
from skimage._shared.testing import assert_array_equal, assert_allclose
|
||||
|
||||
|
||||
white = (1, 1, 1)
|
||||
|
||||
|
||||
def test_find_boundaries():
|
||||
image = np.zeros((10, 10), dtype=np.uint8)
|
||||
image[2:7, 2:7] = 1
|
||||
|
||||
ref = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
|
||||
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
|
||||
[0, 1, 1, 0, 0, 0, 1, 1, 0, 0],
|
||||
[0, 1, 1, 0, 0, 0, 1, 1, 0, 0],
|
||||
[0, 1, 1, 0, 0, 0, 1, 1, 0, 0],
|
||||
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
|
||||
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
|
||||
|
||||
result = find_boundaries(image)
|
||||
assert_array_equal(result, ref)
|
||||
|
||||
|
||||
def test_find_boundaries_bool():
|
||||
image = np.zeros((5, 5), dtype=np.bool)
|
||||
image[2:5, 2:5] = True
|
||||
|
||||
ref = np.array([[False, False, False, False, False],
|
||||
[False, False, True, True, True],
|
||||
[False, True, True, True, True],
|
||||
[False, True, True, False, False],
|
||||
[False, True, True, False, False]], dtype=np.bool)
|
||||
result = find_boundaries(image)
|
||||
assert_array_equal(result, ref)
|
||||
|
||||
|
||||
def test_mark_boundaries():
|
||||
image = np.zeros((10, 10))
|
||||
label_image = np.zeros((10, 10), dtype=np.uint8)
|
||||
label_image[2:7, 2:7] = 1
|
||||
|
||||
ref = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
|
||||
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
|
||||
[0, 1, 1, 0, 0, 0, 1, 1, 0, 0],
|
||||
[0, 1, 1, 0, 0, 0, 1, 1, 0, 0],
|
||||
[0, 1, 1, 0, 0, 0, 1, 1, 0, 0],
|
||||
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
|
||||
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
|
||||
|
||||
marked = mark_boundaries(image, label_image, color=white, mode='thick')
|
||||
result = np.mean(marked, axis=-1)
|
||||
assert_array_equal(result, ref)
|
||||
|
||||
ref = np.array([[0, 2, 2, 2, 2, 2, 2, 2, 0, 0],
|
||||
[2, 2, 1, 1, 1, 1, 1, 2, 2, 0],
|
||||
[2, 1, 1, 1, 1, 1, 1, 1, 2, 0],
|
||||
[2, 1, 1, 2, 2, 2, 1, 1, 2, 0],
|
||||
[2, 1, 1, 2, 0, 2, 1, 1, 2, 0],
|
||||
[2, 1, 1, 2, 2, 2, 1, 1, 2, 0],
|
||||
[2, 1, 1, 1, 1, 1, 1, 1, 2, 0],
|
||||
[2, 2, 1, 1, 1, 1, 1, 2, 2, 0],
|
||||
[0, 2, 2, 2, 2, 2, 2, 2, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
|
||||
marked = mark_boundaries(image, label_image, color=white,
|
||||
outline_color=(2, 2, 2), mode='thick')
|
||||
result = np.mean(marked, axis=-1)
|
||||
assert_array_equal(result, ref)
|
||||
|
||||
|
||||
def test_mark_boundaries_bool():
|
||||
image = np.zeros((10, 10), dtype=np.bool)
|
||||
label_image = np.zeros((10, 10), dtype=np.uint8)
|
||||
label_image[2:7, 2:7] = 1
|
||||
|
||||
ref = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
|
||||
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
|
||||
[0, 1, 1, 0, 0, 0, 1, 1, 0, 0],
|
||||
[0, 1, 1, 0, 0, 0, 1, 1, 0, 0],
|
||||
[0, 1, 1, 0, 0, 0, 1, 1, 0, 0],
|
||||
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
|
||||
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
|
||||
|
||||
marked = mark_boundaries(image, label_image, color=white, mode='thick')
|
||||
result = np.mean(marked, axis=-1)
|
||||
assert_array_equal(result, ref)
|
||||
|
||||
|
||||
def test_mark_boundaries_subpixel():
|
||||
labels = np.array([[0, 0, 0, 0],
|
||||
[0, 0, 5, 0],
|
||||
[0, 1, 5, 0],
|
||||
[0, 0, 5, 0],
|
||||
[0, 0, 0, 0]], dtype=np.uint8)
|
||||
np.random.seed(0)
|
||||
image = np.round(np.random.rand(*labels.shape), 2)
|
||||
marked = mark_boundaries(image, labels, color=white, mode='subpixel')
|
||||
marked_proj = np.round(np.mean(marked, axis=-1), 2)
|
||||
|
||||
ref_result = np.array(
|
||||
[[ 0.55, 0.63, 0.72, 0.69, 0.6 , 0.55, 0.54],
|
||||
[ 0.45, 0.58, 0.72, 1. , 1. , 1. , 0.69],
|
||||
[ 0.42, 0.54, 0.65, 1. , 0.44, 1. , 0.89],
|
||||
[ 0.69, 1. , 1. , 1. , 0.69, 1. , 0.83],
|
||||
[ 0.96, 1. , 0.38, 1. , 0.79, 1. , 0.53],
|
||||
[ 0.89, 1. , 1. , 1. , 0.38, 1. , 0.16],
|
||||
[ 0.57, 0.78, 0.93, 1. , 0.07, 1. , 0.09],
|
||||
[ 0.2 , 0.52, 0.92, 1. , 1. , 1. , 0.54],
|
||||
[ 0.02, 0.35, 0.83, 0.9 , 0.78, 0.81, 0.87]])
|
||||
assert_allclose(marked_proj, ref_result, atol=0.01)
|
|
@ -0,0 +1,90 @@
|
|||
import numpy as np
|
||||
from skimage.segmentation import chan_vese
|
||||
|
||||
from skimage._shared import testing
|
||||
from skimage._shared.testing import assert_array_equal
|
||||
|
||||
|
||||
def test_chan_vese_flat_level_set():
|
||||
# because the algorithm evolves the level set around the
|
||||
# zero-level, it the level-set has no zero level, the algorithm
|
||||
# will not produce results in theory. However, since a continuous
|
||||
# approximation of the delta function is used, the algorithm
|
||||
# still affects the entirety of the level-set. Therefore with
|
||||
# infinite time, the segmentation will still converge.
|
||||
img = np.zeros((10, 10))
|
||||
img[3:6, 3:6] = np.ones((3, 3))
|
||||
ls = np.full((10, 10), 1000)
|
||||
result = chan_vese(img, mu=0.0, tol=1e-3, init_level_set=ls)
|
||||
assert_array_equal(result.astype(np.float), np.ones((10, 10)))
|
||||
result = chan_vese(img, mu=0.0, tol=1e-3, init_level_set=-ls)
|
||||
assert_array_equal(result.astype(np.float), np.zeros((10, 10)))
|
||||
|
||||
|
||||
def test_chan_vese_small_disk_level_set():
|
||||
img = np.zeros((10, 10))
|
||||
img[3:6, 3:6] = np.ones((3, 3))
|
||||
result = chan_vese(img, mu=0.0, tol=1e-3, init_level_set="small disk")
|
||||
assert_array_equal(result.astype(np.float), img)
|
||||
|
||||
|
||||
def test_chan_vese_simple_shape():
|
||||
img = np.zeros((10, 10))
|
||||
img[3:6, 3:6] = np.ones((3, 3))
|
||||
result = chan_vese(img, mu=0.0, tol=1e-8).astype(np.float)
|
||||
assert_array_equal(result, img)
|
||||
|
||||
|
||||
def test_chan_vese_extended_output():
|
||||
img = np.zeros((10, 10))
|
||||
img[3:6, 3:6] = np.ones((3, 3))
|
||||
result = chan_vese(img, mu=0.0, tol=1e-8, extended_output=True)
|
||||
assert_array_equal(len(result), 3)
|
||||
|
||||
|
||||
def test_chan_vese_remove_noise():
|
||||
ref = np.zeros((10, 10))
|
||||
ref[1:6, 1:6] = np.array([[0, 1, 1, 1, 0],
|
||||
[1, 1, 1, 1, 1],
|
||||
[1, 1, 1, 1, 1],
|
||||
[1, 1, 1, 1, 1],
|
||||
[0, 1, 1, 1, 0]])
|
||||
img = ref.copy()
|
||||
img[8, 3] = 1
|
||||
result = chan_vese(img, mu=0.3, tol=1e-3, max_iter=100, dt=10,
|
||||
init_level_set="disk").astype(np.float)
|
||||
assert_array_equal(result, ref)
|
||||
|
||||
|
||||
def test_chan_vese_incorrect_image_type():
|
||||
img = np.zeros((10, 10, 3))
|
||||
ls = np.zeros((10, 9))
|
||||
with testing.raises(ValueError):
|
||||
chan_vese(img, mu=0.0, init_level_set=ls)
|
||||
|
||||
|
||||
def test_chan_vese_gap_closing():
|
||||
ref = np.zeros((20, 20))
|
||||
ref[8:15, :] = np.ones((7, 20))
|
||||
img = ref.copy()
|
||||
img[:, 6] = np.zeros((20))
|
||||
result = chan_vese(img, mu=0.7, tol=1e-3, max_iter=1000, dt=1000,
|
||||
init_level_set="disk").astype(np.float)
|
||||
assert_array_equal(result, ref)
|
||||
|
||||
|
||||
def test_chan_vese_incorrect_level_set():
|
||||
img = np.zeros((10, 10))
|
||||
ls = np.zeros((10, 9))
|
||||
with testing.raises(ValueError):
|
||||
chan_vese(img, mu=0.0, init_level_set=ls)
|
||||
with testing.raises(ValueError):
|
||||
chan_vese(img, mu=0.0, init_level_set="a")
|
||||
|
||||
|
||||
def test_chan_vese_blank_image():
|
||||
img = np.zeros((10, 10))
|
||||
level_set = np.random.rand(10, 10)
|
||||
ref = level_set > 0
|
||||
result = chan_vese(img, mu=0.0, tol=0.0, init_level_set=level_set)
|
||||
assert_array_equal(result, ref)
|
|
@ -0,0 +1,175 @@
|
|||
import numpy as np
|
||||
from skimage.segmentation import clear_border
|
||||
|
||||
from skimage._shared.testing import assert_array_equal, assert_
|
||||
|
||||
|
||||
def test_clear_border():
|
||||
image = np.array(
|
||||
[[0, 0, 0, 0, 0, 0, 0, 1, 0],
|
||||
[1, 1, 0, 0, 1, 0, 0, 1, 0],
|
||||
[1, 1, 0, 1, 0, 1, 0, 0, 0],
|
||||
[0, 0, 0, 1, 1, 1, 1, 0, 0],
|
||||
[0, 1, 1, 1, 1, 1, 1, 1, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0]])
|
||||
|
||||
# test default case
|
||||
result = clear_border(image.copy())
|
||||
ref = image.copy()
|
||||
ref[1:3, 0:2] = 0
|
||||
ref[0:2, -2] = 0
|
||||
assert_array_equal(result, ref)
|
||||
|
||||
# test buffer
|
||||
result = clear_border(image.copy(), 1)
|
||||
assert_array_equal(result, np.zeros(result.shape))
|
||||
|
||||
# test background value
|
||||
result = clear_border(image.copy(), buffer_size=1, bgval=2)
|
||||
assert_array_equal(result, 2 * np.ones_like(image))
|
||||
|
||||
# test mask
|
||||
mask = np.array([[0, 0, 1, 1, 1, 1, 1, 1, 1],
|
||||
[0, 0, 1, 1, 1, 1, 1, 1, 1],
|
||||
[1, 1, 1, 1, 1, 1, 1, 1, 1],
|
||||
[1, 1, 1, 1, 1, 1, 1, 1, 1],
|
||||
[1, 1, 1, 1, 1, 1, 1, 1, 1],
|
||||
[1, 1, 1, 1, 1, 1, 1, 1, 1]]).astype(np.bool)
|
||||
result = clear_border(image.copy(), mask=mask)
|
||||
ref = image.copy()
|
||||
ref[1:3, 0:2] = 0
|
||||
assert_array_equal(result, ref)
|
||||
|
||||
def test_clear_border_3d():
|
||||
image = np.array([
|
||||
[[0, 0, 0, 0],
|
||||
[0, 0, 0, 0],
|
||||
[0, 0, 0, 0],
|
||||
[1, 0, 0, 0]],
|
||||
[[0, 0, 0, 0],
|
||||
[0, 1, 1, 0],
|
||||
[0, 0, 1, 0],
|
||||
[0, 0, 0, 0]],
|
||||
[[0, 0, 0, 0],
|
||||
[0, 0, 0, 0],
|
||||
[0, 0, 0, 0],
|
||||
[0, 0, 0, 0]],
|
||||
])
|
||||
# test default case
|
||||
result = clear_border(image.copy())
|
||||
ref = image.copy()
|
||||
ref[0, 3, 0] = 0
|
||||
assert_array_equal(result, ref)
|
||||
|
||||
# test buffer
|
||||
result = clear_border(image.copy(), 1)
|
||||
assert_array_equal(result, np.zeros(result.shape))
|
||||
|
||||
# test background value
|
||||
result = clear_border(image.copy(), buffer_size=1, bgval=2)
|
||||
assert_array_equal(result, 2 * np.ones_like(image))
|
||||
|
||||
|
||||
def test_clear_border_non_binary():
|
||||
image = np.array([[1, 2, 3, 1, 2],
|
||||
[3, 3, 5, 4, 2],
|
||||
[3, 4, 5, 4, 2],
|
||||
[3, 3, 2, 1, 2]])
|
||||
|
||||
result = clear_border(image)
|
||||
expected = np.array([[0, 0, 0, 0, 0],
|
||||
[0, 0, 5, 4, 0],
|
||||
[0, 4, 5, 4, 0],
|
||||
[0, 0, 0, 0, 0]])
|
||||
|
||||
assert_array_equal(result, expected)
|
||||
assert_(not np.all(image == result))
|
||||
|
||||
|
||||
def test_clear_border_non_binary_3d():
|
||||
image3d = np.array(
|
||||
[[[1, 2, 3, 1, 2],
|
||||
[3, 3, 3, 4, 2],
|
||||
[3, 4, 3, 4, 2],
|
||||
[3, 3, 2, 1, 2]],
|
||||
[[1, 2, 3, 1, 2],
|
||||
[3, 3, 5, 4, 2],
|
||||
[3, 4, 5, 4, 2],
|
||||
[3, 3, 2, 1, 2]],
|
||||
[[1, 2, 3, 1, 2],
|
||||
[3, 3, 3, 4, 2],
|
||||
[3, 4, 3, 4, 2],
|
||||
[3, 3, 2, 1, 2]],
|
||||
])
|
||||
|
||||
result = clear_border(image3d)
|
||||
expected = np.array(
|
||||
[[[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0]],
|
||||
[[0, 0, 0, 0, 0],
|
||||
[0, 0, 5, 0, 0],
|
||||
[0, 0, 5, 0, 0],
|
||||
[0, 0, 0, 0, 0]],
|
||||
[[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0]],
|
||||
])
|
||||
|
||||
assert_array_equal(result, expected)
|
||||
assert_(not np.all(image3d == result))
|
||||
|
||||
|
||||
def test_clear_border_non_binary_inplace():
|
||||
image = np.array([[1, 2, 3, 1, 2],
|
||||
[3, 3, 5, 4, 2],
|
||||
[3, 4, 5, 4, 2],
|
||||
[3, 3, 2, 1, 2]])
|
||||
|
||||
result = clear_border(image, in_place=True)
|
||||
expected = np.array([[0, 0, 0, 0, 0],
|
||||
[0, 0, 5, 4, 0],
|
||||
[0, 4, 5, 4, 0],
|
||||
[0, 0, 0, 0, 0]])
|
||||
|
||||
assert_array_equal(result, expected)
|
||||
assert_array_equal(image, result)
|
||||
|
||||
|
||||
def test_clear_border_non_binary_inplace_3d():
|
||||
image3d = np.array(
|
||||
[[[1, 2, 3, 1, 2],
|
||||
[3, 3, 3, 4, 2],
|
||||
[3, 4, 3, 4, 2],
|
||||
[3, 3, 2, 1, 2]],
|
||||
[[1, 2, 3, 1, 2],
|
||||
[3, 3, 5, 4, 2],
|
||||
[3, 4, 5, 4, 2],
|
||||
[3, 3, 2, 1, 2]],
|
||||
[[1, 2, 3, 1, 2],
|
||||
[3, 3, 3, 4, 2],
|
||||
[3, 4, 3, 4, 2],
|
||||
[3, 3, 2, 1, 2]],
|
||||
])
|
||||
|
||||
result = clear_border(image3d, in_place=True)
|
||||
expected = np.array(
|
||||
[[[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0]],
|
||||
[[0, 0, 0, 0, 0],
|
||||
[0, 0, 5, 0, 0],
|
||||
[0, 0, 5, 0, 0],
|
||||
[0, 0, 0, 0, 0]],
|
||||
[[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0]],
|
||||
])
|
||||
|
||||
assert_array_equal(result, expected)
|
||||
assert_array_equal(image3d, result)
|
||||
|
|
@ -0,0 +1,82 @@
|
|||
import numpy as np
|
||||
from skimage import data
|
||||
from skimage.segmentation import felzenszwalb
|
||||
|
||||
from skimage._shared import testing
|
||||
from skimage._shared.testing import (assert_greater, test_parallel,
|
||||
assert_equal, assert_array_equal,
|
||||
assert_warns, assert_no_warnings)
|
||||
|
||||
|
||||
@test_parallel()
|
||||
def test_grey():
|
||||
# very weak tests.
|
||||
img = np.zeros((20, 21))
|
||||
img[:10, 10:] = 0.2
|
||||
img[10:, :10] = 0.4
|
||||
img[10:, 10:] = 0.6
|
||||
seg = felzenszwalb(img, sigma=0)
|
||||
# we expect 4 segments:
|
||||
assert_equal(len(np.unique(seg)), 4)
|
||||
# that mostly respect the 4 regions:
|
||||
for i in range(4):
|
||||
hist = np.histogram(img[seg == i], bins=[0, 0.1, 0.3, 0.5, 1])[0]
|
||||
assert_greater(hist[i], 40)
|
||||
|
||||
|
||||
def test_minsize():
|
||||
# single-channel:
|
||||
img = data.coins()[20:168, 0:128]
|
||||
for min_size in np.arange(10, 100, 10):
|
||||
segments = felzenszwalb(img, min_size=min_size, sigma=3)
|
||||
counts = np.bincount(segments.ravel())
|
||||
# actually want to test greater or equal.
|
||||
assert_greater(counts.min() + 1, min_size)
|
||||
# multi-channel:
|
||||
coffee = data.coffee()[::4, ::4]
|
||||
for min_size in np.arange(10, 100, 10):
|
||||
segments = felzenszwalb(coffee, min_size=min_size, sigma=3)
|
||||
counts = np.bincount(segments.ravel())
|
||||
# actually want to test greater or equal.
|
||||
assert_greater(counts.min() + 1, min_size)
|
||||
|
||||
|
||||
def test_3D():
|
||||
grey_img = np.zeros((10, 10))
|
||||
rgb_img = np.zeros((10, 10, 3))
|
||||
three_d_img = np.zeros((10, 10, 10))
|
||||
with assert_no_warnings():
|
||||
felzenszwalb(grey_img, multichannel=True)
|
||||
felzenszwalb(grey_img, multichannel=False)
|
||||
felzenszwalb(rgb_img, multichannel=True)
|
||||
with assert_warns(RuntimeWarning):
|
||||
felzenszwalb(three_d_img, multichannel=True)
|
||||
with testing.raises(ValueError):
|
||||
felzenszwalb(rgb_img, multichannel=False)
|
||||
felzenszwalb(three_d_img, multichannel=False)
|
||||
|
||||
|
||||
def test_color():
|
||||
# very weak tests.
|
||||
img = np.zeros((20, 21, 3))
|
||||
img[:10, :10, 0] = 1
|
||||
img[10:, :10, 1] = 1
|
||||
img[10:, 10:, 2] = 1
|
||||
seg = felzenszwalb(img, sigma=0)
|
||||
# we expect 4 segments:
|
||||
assert_equal(len(np.unique(seg)), 4)
|
||||
assert_array_equal(seg[:10, :10], 0)
|
||||
assert_array_equal(seg[10:, :10], 2)
|
||||
assert_array_equal(seg[:10, 10:], 1)
|
||||
assert_array_equal(seg[10:, 10:], 3)
|
||||
|
||||
|
||||
def test_merging():
|
||||
# test region merging in the post-processing step
|
||||
img = np.array([[0, 0.3], [0.7, 1]])
|
||||
# With scale=0, only the post-processing is performed.
|
||||
seg = felzenszwalb(img, scale=0, sigma=0, min_size=2)
|
||||
# we expect 2 segments:
|
||||
assert_equal(len(np.unique(seg)), 2)
|
||||
assert_array_equal(seg[0, :], 0)
|
||||
assert_array_equal(seg[1, :], 1)
|
211
venv/Lib/site-packages/skimage/segmentation/tests/test_join.py
Normal file
211
venv/Lib/site-packages/skimage/segmentation/tests/test_join.py
Normal file
|
@ -0,0 +1,211 @@
|
|||
import numpy as np
|
||||
from skimage.segmentation import join_segmentations, relabel_sequential
|
||||
|
||||
from skimage._shared import testing
|
||||
from skimage._shared.testing import assert_array_equal
|
||||
import pytest
|
||||
|
||||
|
||||
def test_join_segmentations():
|
||||
s1 = np.array([[0, 0, 1, 1],
|
||||
[0, 2, 1, 1],
|
||||
[2, 2, 2, 1]])
|
||||
s2 = np.array([[0, 1, 1, 0],
|
||||
[0, 1, 1, 0],
|
||||
[0, 1, 1, 1]])
|
||||
|
||||
# test correct join
|
||||
# NOTE: technically, equality to j_ref is not required, only that there
|
||||
# is a one-to-one mapping between j and j_ref. I don't know of an easy way
|
||||
# to check this (i.e. not as error-prone as the function being tested)
|
||||
j = join_segmentations(s1, s2)
|
||||
j_ref = np.array([[0, 1, 3, 2],
|
||||
[0, 5, 3, 2],
|
||||
[4, 5, 5, 3]])
|
||||
assert_array_equal(j, j_ref)
|
||||
|
||||
# test correct exception when arrays are different shapes
|
||||
s3 = np.array([[0, 0, 1, 1], [0, 2, 2, 1]])
|
||||
with testing.raises(ValueError):
|
||||
join_segmentations(s1, s3)
|
||||
|
||||
|
||||
def _check_maps(ar, ar_relab, fw, inv):
|
||||
assert_array_equal(fw[ar], ar_relab)
|
||||
assert_array_equal(inv[ar_relab], ar)
|
||||
|
||||
|
||||
def test_relabel_sequential_offset1():
|
||||
ar = np.array([1, 1, 5, 5, 8, 99, 42])
|
||||
ar_relab, fw, inv = relabel_sequential(ar)
|
||||
_check_maps(ar, ar_relab, fw, inv)
|
||||
ar_relab_ref = np.array([1, 1, 2, 2, 3, 5, 4])
|
||||
assert_array_equal(ar_relab, ar_relab_ref)
|
||||
fw_ref = np.zeros(100, int)
|
||||
fw_ref[1] = 1
|
||||
fw_ref[5] = 2
|
||||
fw_ref[8] = 3
|
||||
fw_ref[42] = 4
|
||||
fw_ref[99] = 5
|
||||
assert_array_equal(fw, fw_ref)
|
||||
inv_ref = np.array([0, 1, 5, 8, 42, 99])
|
||||
assert_array_equal(inv, inv_ref)
|
||||
|
||||
|
||||
def test_relabel_sequential_offset5():
|
||||
ar = np.array([1, 1, 5, 5, 8, 99, 42])
|
||||
ar_relab, fw, inv = relabel_sequential(ar, offset=5)
|
||||
_check_maps(ar, ar_relab, fw, inv)
|
||||
ar_relab_ref = np.array([5, 5, 6, 6, 7, 9, 8])
|
||||
assert_array_equal(ar_relab, ar_relab_ref)
|
||||
fw_ref = np.zeros(100, int)
|
||||
fw_ref[1] = 5
|
||||
fw_ref[5] = 6
|
||||
fw_ref[8] = 7
|
||||
fw_ref[42] = 8
|
||||
fw_ref[99] = 9
|
||||
assert_array_equal(fw, fw_ref)
|
||||
inv_ref = np.array([0, 0, 0, 0, 0, 1, 5, 8, 42, 99])
|
||||
assert_array_equal(inv, inv_ref)
|
||||
|
||||
|
||||
def test_relabel_sequential_offset5_with0():
|
||||
ar = np.array([1, 1, 5, 5, 8, 99, 42, 0])
|
||||
ar_relab, fw, inv = relabel_sequential(ar, offset=5)
|
||||
_check_maps(ar, ar_relab, fw, inv)
|
||||
ar_relab_ref = np.array([5, 5, 6, 6, 7, 9, 8, 0])
|
||||
assert_array_equal(ar_relab, ar_relab_ref)
|
||||
fw_ref = np.zeros(100, int)
|
||||
fw_ref[1] = 5
|
||||
fw_ref[5] = 6
|
||||
fw_ref[8] = 7
|
||||
fw_ref[42] = 8
|
||||
fw_ref[99] = 9
|
||||
assert_array_equal(fw, fw_ref)
|
||||
inv_ref = np.array([0, 0, 0, 0, 0, 1, 5, 8, 42, 99])
|
||||
assert_array_equal(inv, inv_ref)
|
||||
|
||||
|
||||
def test_relabel_sequential_dtype():
|
||||
ar = np.array([1, 1, 5, 5, 8, 99, 42, 0], dtype=np.uint8)
|
||||
ar_relab, fw, inv = relabel_sequential(ar, offset=5)
|
||||
_check_maps(ar.astype(int), ar_relab, fw, inv)
|
||||
ar_relab_ref = np.array([5, 5, 6, 6, 7, 9, 8, 0])
|
||||
assert_array_equal(ar_relab, ar_relab_ref)
|
||||
fw_ref = np.zeros(100, int)
|
||||
fw_ref[1] = 5
|
||||
fw_ref[5] = 6
|
||||
fw_ref[8] = 7
|
||||
fw_ref[42] = 8
|
||||
fw_ref[99] = 9
|
||||
assert_array_equal(fw, fw_ref)
|
||||
inv_ref = np.array([0, 0, 0, 0, 0, 1, 5, 8, 42, 99])
|
||||
assert_array_equal(inv, inv_ref)
|
||||
|
||||
|
||||
def test_relabel_sequential_signed_overflow():
|
||||
imax = np.iinfo(np.int32).max
|
||||
labels = np.array([0, 1, 99, 42, 42], dtype=np.int32)
|
||||
output, fw, inv = relabel_sequential(labels, offset=imax)
|
||||
reference = np.array([0, imax, imax + 2, imax + 1, imax + 1],
|
||||
dtype=np.uint32)
|
||||
assert_array_equal(output, reference)
|
||||
assert output.dtype == reference.dtype
|
||||
|
||||
|
||||
def test_very_large_labels():
|
||||
imax = np.iinfo(np.int64).max
|
||||
labels = np.array([0, 1, imax, 42, 42], dtype=np.int64)
|
||||
output, fw, inv = relabel_sequential(labels, offset=imax)
|
||||
assert np.max(output) == imax + 2
|
||||
|
||||
|
||||
@pytest.mark.parametrize('dtype', (np.byte, np.short, np.intc, np.int_,
|
||||
np.longlong, np.ubyte, np.ushort,
|
||||
np.uintc, np.uint, np.ulonglong))
|
||||
@pytest.mark.parametrize('data_already_sequential', (False, True))
|
||||
def test_relabel_sequential_int_dtype_stability(data_already_sequential,
|
||||
dtype):
|
||||
if data_already_sequential:
|
||||
ar = np.array([1, 3, 0, 2, 5, 4], dtype=dtype)
|
||||
else:
|
||||
ar = np.array([1, 1, 5, 5, 8, 99, 42, 0], dtype=dtype)
|
||||
assert all(a.dtype == dtype for a in relabel_sequential(ar))
|
||||
|
||||
|
||||
def test_relabel_sequential_int_dtype_overflow():
|
||||
ar = np.array([1, 3, 0, 2, 5, 4], dtype=np.uint8)
|
||||
offset = 254
|
||||
ar_relab, fw, inv = relabel_sequential(ar, offset=offset)
|
||||
_check_maps(ar, ar_relab, fw, inv)
|
||||
assert all(a.dtype == np.uint16 for a in (ar_relab, fw))
|
||||
assert inv.dtype == ar.dtype
|
||||
ar_relab_ref = np.where(ar > 0, ar.astype(np.int) + offset - 1, 0)
|
||||
assert_array_equal(ar_relab, ar_relab_ref)
|
||||
|
||||
|
||||
def test_relabel_sequential_negative_values():
|
||||
ar = np.array([1, 1, 5, -5, 8, 99, 42, 0])
|
||||
with pytest.raises(ValueError):
|
||||
relabel_sequential(ar)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('offset', (0, -3))
|
||||
@pytest.mark.parametrize('data_already_sequential', (False, True))
|
||||
def test_relabel_sequential_nonpositive_offset(data_already_sequential,
|
||||
offset):
|
||||
if data_already_sequential:
|
||||
ar = np.array([1, 3, 0, 2, 5, 4])
|
||||
else:
|
||||
ar = np.array([1, 1, 5, 5, 8, 99, 42, 0])
|
||||
with pytest.raises(ValueError):
|
||||
relabel_sequential(ar, offset=offset)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('offset', (1, 5))
|
||||
@pytest.mark.parametrize('with0', (False, True))
|
||||
@pytest.mark.parametrize('input_starts_at_offset', (False, True))
|
||||
def test_relabel_sequential_already_sequential(offset, with0,
|
||||
input_starts_at_offset):
|
||||
if with0:
|
||||
ar = np.array([1, 3, 0, 2, 5, 4])
|
||||
else:
|
||||
ar = np.array([1, 3, 2, 5, 4])
|
||||
if input_starts_at_offset:
|
||||
ar[ar > 0] += offset - 1
|
||||
ar_relab, fw, inv = relabel_sequential(ar, offset=offset)
|
||||
_check_maps(ar, ar_relab, fw, inv)
|
||||
if input_starts_at_offset:
|
||||
ar_relab_ref = ar
|
||||
else:
|
||||
ar_relab_ref = np.where(ar > 0, ar + offset - 1, 0)
|
||||
assert_array_equal(ar_relab, ar_relab_ref)
|
||||
|
||||
|
||||
def test_incorrect_input_dtype():
|
||||
labels = np.array([0, 2, 2, 1, 1, 8], dtype=float)
|
||||
with testing.raises(TypeError):
|
||||
_ = relabel_sequential(labels)
|
||||
|
||||
|
||||
def test_arraymap_call():
|
||||
ar = np.array([1, 1, 5, 5, 8, 99, 42, 0], dtype=np.intp)
|
||||
relabeled, fw, inv = relabel_sequential(ar)
|
||||
testing.assert_array_equal(relabeled, fw(ar))
|
||||
testing.assert_array_equal(ar, inv(relabeled))
|
||||
|
||||
|
||||
def test_arraymap_len():
|
||||
ar = np.array([1, 1, 5, 5, 8, 99, 42, 0], dtype=np.intp)
|
||||
relabeled, fw, inv = relabel_sequential(ar)
|
||||
assert len(fw) == 100
|
||||
assert len(fw) == len(np.array(fw))
|
||||
assert len(inv) == 6
|
||||
assert len(inv) == len(np.array(inv))
|
||||
|
||||
|
||||
def test_arraymap_set():
|
||||
ar = np.array([1, 1, 5, 5, 8, 99, 42, 0], dtype=np.intp)
|
||||
relabeled, fw, inv = relabel_sequential(ar)
|
||||
fw[72] = 6
|
||||
assert fw[72] == 6
|
|
@ -0,0 +1,149 @@
|
|||
import numpy as np
|
||||
from skimage.segmentation import (morphological_chan_vese,
|
||||
morphological_geodesic_active_contour,
|
||||
inverse_gaussian_gradient,
|
||||
circle_level_set,
|
||||
disk_level_set)
|
||||
|
||||
from skimage._shared import testing
|
||||
from skimage._shared.testing import assert_array_equal
|
||||
from skimage._shared._warnings import expected_warnings
|
||||
|
||||
|
||||
def gaussian_blob():
|
||||
coords = np.mgrid[-5:6, -5:6]
|
||||
sqrdistances = (coords ** 2).sum(0)
|
||||
return np.exp(-sqrdistances / 10)
|
||||
|
||||
|
||||
def test_morphsnakes_incorrect_image_shape():
|
||||
img = np.zeros((10, 10, 3))
|
||||
ls = np.zeros((10, 9))
|
||||
|
||||
with testing.raises(ValueError):
|
||||
morphological_chan_vese(img, iterations=1, init_level_set=ls)
|
||||
with testing.raises(ValueError):
|
||||
morphological_geodesic_active_contour(img, iterations=1,
|
||||
init_level_set=ls)
|
||||
|
||||
|
||||
def test_morphsnakes_incorrect_ndim():
|
||||
img = np.zeros((4, 4, 4, 4))
|
||||
ls = np.zeros((4, 4, 4, 4))
|
||||
|
||||
with testing.raises(ValueError):
|
||||
morphological_chan_vese(img, iterations=1, init_level_set=ls)
|
||||
with testing.raises(ValueError):
|
||||
morphological_geodesic_active_contour(img, iterations=1,
|
||||
init_level_set=ls)
|
||||
|
||||
|
||||
def test_morphsnakes_black():
|
||||
img = np.zeros((11, 11))
|
||||
ls = disk_level_set(img.shape, center=(5, 5), radius=3)
|
||||
|
||||
ref_zeros = np.zeros(img.shape, dtype=np.int8)
|
||||
ref_ones = np.ones(img.shape, dtype=np.int8)
|
||||
|
||||
acwe_ls = morphological_chan_vese(img, iterations=6, init_level_set=ls)
|
||||
assert_array_equal(acwe_ls, ref_zeros)
|
||||
|
||||
gac_ls = morphological_geodesic_active_contour(img, iterations=6,
|
||||
init_level_set=ls)
|
||||
assert_array_equal(gac_ls, ref_zeros)
|
||||
|
||||
gac_ls2 = morphological_geodesic_active_contour(img, iterations=6,
|
||||
init_level_set=ls,
|
||||
balloon=1, threshold=-1,
|
||||
smoothing=0)
|
||||
assert_array_equal(gac_ls2, ref_ones)
|
||||
|
||||
assert acwe_ls.dtype == gac_ls.dtype == gac_ls2.dtype == np.int8
|
||||
|
||||
|
||||
def test_morphsnakes_simple_shape_chan_vese():
|
||||
img = gaussian_blob()
|
||||
ls1 = disk_level_set(img.shape, center=(5, 5), radius=3)
|
||||
ls2 = disk_level_set(img.shape, center=(5, 5), radius=6)
|
||||
|
||||
acwe_ls1 = morphological_chan_vese(img, iterations=10, init_level_set=ls1)
|
||||
acwe_ls2 = morphological_chan_vese(img, iterations=10, init_level_set=ls2)
|
||||
|
||||
assert_array_equal(acwe_ls1, acwe_ls2)
|
||||
|
||||
assert acwe_ls1.dtype == acwe_ls2.dtype == np.int8
|
||||
|
||||
|
||||
def test_morphsnakes_simple_shape_geodesic_active_contour():
|
||||
img = np.float_(disk_level_set((11, 11), center=(5, 5), radius=3.5))
|
||||
gimg = inverse_gaussian_gradient(img, alpha=10.0, sigma=1.0)
|
||||
ls = disk_level_set(img.shape, center=(5, 5), radius=6)
|
||||
|
||||
ref = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
|
||||
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
|
||||
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
|
||||
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
|
||||
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
|
||||
dtype=np.int8)
|
||||
|
||||
gac_ls = morphological_geodesic_active_contour(gimg, iterations=10,
|
||||
init_level_set=ls,
|
||||
balloon=-1)
|
||||
assert_array_equal(gac_ls, ref)
|
||||
assert gac_ls.dtype == np.int8
|
||||
|
||||
|
||||
def test_deprecated_circle_level_set():
|
||||
img = gaussian_blob()
|
||||
with expected_warnings(['circle_level_set is deprecated']):
|
||||
ls1 = circle_level_set(img.shape, (5, 5), 3)
|
||||
|
||||
|
||||
def test_init_level_sets():
|
||||
image = np.zeros((6, 6))
|
||||
checkerboard_ls = morphological_chan_vese(image, 0, 'checkerboard')
|
||||
checkerboard_ref = np.array([[0, 0, 0, 0, 0, 1],
|
||||
[0, 0, 0, 0, 0, 1],
|
||||
[0, 0, 0, 0, 0, 1],
|
||||
[0, 0, 0, 0, 0, 1],
|
||||
[0, 0, 0, 0, 0, 1],
|
||||
[1, 1, 1, 1, 1, 0]], dtype=np.int8)
|
||||
|
||||
disk_ls = morphological_geodesic_active_contour(image, 0, 'disk')
|
||||
disk_ref = np.array([[0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 1, 1, 1, 0],
|
||||
[0, 1, 1, 1, 1, 1],
|
||||
[0, 1, 1, 1, 1, 1],
|
||||
[0, 1, 1, 1, 1, 1],
|
||||
[0, 0, 1, 1, 1, 0]], dtype=np.int8)
|
||||
|
||||
assert_array_equal(checkerboard_ls, checkerboard_ref)
|
||||
assert_array_equal(disk_ls, disk_ref)
|
||||
|
||||
|
||||
def test_morphsnakes_3d():
|
||||
image = np.zeros((7, 7, 7))
|
||||
|
||||
evolution = []
|
||||
|
||||
def callback(x):
|
||||
evolution.append(x.sum())
|
||||
|
||||
ls = morphological_chan_vese(image, 5, 'disk',
|
||||
iter_callback=callback)
|
||||
|
||||
# Check that the initial disk level set is correct
|
||||
assert evolution[0] == 81
|
||||
|
||||
# Check that the final level set is correct
|
||||
assert ls.sum() == 0
|
||||
|
||||
# Check that the contour is shrinking at every iteration
|
||||
for v1, v2 in zip(evolution[:-1], evolution[1:]):
|
||||
assert v1 >= v2
|
|
@ -0,0 +1,49 @@
|
|||
import numpy as np
|
||||
from skimage.segmentation import quickshift
|
||||
|
||||
from skimage._shared.testing import (assert_greater, test_parallel,
|
||||
assert_equal, assert_array_equal)
|
||||
|
||||
|
||||
@test_parallel()
|
||||
def test_grey():
|
||||
rnd = np.random.RandomState(0)
|
||||
img = np.zeros((20, 21))
|
||||
img[:10, 10:] = 0.2
|
||||
img[10:, :10] = 0.4
|
||||
img[10:, 10:] = 0.6
|
||||
img += 0.1 * rnd.normal(size=img.shape)
|
||||
seg = quickshift(img, kernel_size=2, max_dist=3, random_seed=0,
|
||||
convert2lab=False, sigma=0)
|
||||
# we expect 4 segments:
|
||||
assert_equal(len(np.unique(seg)), 4)
|
||||
# that mostly respect the 4 regions:
|
||||
for i in range(4):
|
||||
hist = np.histogram(img[seg == i], bins=[0, 0.1, 0.3, 0.5, 1])[0]
|
||||
assert_greater(hist[i], 20)
|
||||
|
||||
|
||||
def test_color():
|
||||
rnd = np.random.RandomState(0)
|
||||
img = np.zeros((20, 21, 3))
|
||||
img[:10, :10, 0] = 1
|
||||
img[10:, :10, 1] = 1
|
||||
img[10:, 10:, 2] = 1
|
||||
img += 0.01 * rnd.normal(size=img.shape)
|
||||
img[img > 1] = 1
|
||||
img[img < 0] = 0
|
||||
seg = quickshift(img, random_seed=0, max_dist=30, kernel_size=10, sigma=0)
|
||||
# we expect 4 segments:
|
||||
assert_equal(len(np.unique(seg)), 4)
|
||||
assert_array_equal(seg[:10, :10], 1)
|
||||
assert_array_equal(seg[10:, :10], 2)
|
||||
assert_array_equal(seg[:10, 10:], 0)
|
||||
assert_array_equal(seg[10:, 10:], 3)
|
||||
|
||||
seg2 = quickshift(img, kernel_size=1, max_dist=2, random_seed=0,
|
||||
convert2lab=False, sigma=0)
|
||||
# very oversegmented:
|
||||
assert_equal(len(np.unique(seg2)), 7)
|
||||
# still don't cross lines
|
||||
assert (seg2[9, :] != seg2[10, :]).all()
|
||||
assert (seg2[:, 9] != seg2[:, 10]).all()
|
|
@ -0,0 +1,439 @@
|
|||
import numpy as np
|
||||
from skimage.segmentation import random_walker
|
||||
from skimage.transform import resize
|
||||
from skimage._shared._warnings import expected_warnings
|
||||
from skimage._shared import testing
|
||||
from skimage._shared.testing import xfail, arch32
|
||||
import scipy
|
||||
from distutils.version import LooseVersion as Version
|
||||
|
||||
|
||||
# older versions of scipy raise a warning with new NumPy because they use
|
||||
# numpy.rank() instead of arr.ndim or numpy.linalg.matrix_rank.
|
||||
SCIPY_RANK_WARNING = r'numpy.linalg.matrix_rank|\A\Z'
|
||||
PYAMG_MISSING_WARNING = r'pyamg|\A\Z'
|
||||
PYAMG_OR_SCIPY_WARNING = SCIPY_RANK_WARNING + '|' + PYAMG_MISSING_WARNING
|
||||
|
||||
if Version(scipy.__version__) < '1.3':
|
||||
NUMPY_MATRIX_WARNING = 'matrix subclass'
|
||||
else:
|
||||
NUMPY_MATRIX_WARNING = None
|
||||
|
||||
|
||||
def make_2d_syntheticdata(lx, ly=None):
|
||||
if ly is None:
|
||||
ly = lx
|
||||
np.random.seed(1234)
|
||||
data = np.zeros((lx, ly)) + 0.1 * np.random.randn(lx, ly)
|
||||
small_l = int(lx // 5)
|
||||
data[lx // 2 - small_l:lx // 2 + small_l,
|
||||
ly // 2 - small_l:ly // 2 + small_l] = 1
|
||||
data[lx // 2 - small_l + 1:lx // 2 + small_l - 1,
|
||||
ly // 2 - small_l + 1:ly // 2 + small_l - 1] = (
|
||||
0.1 * np.random.randn(2 * small_l - 2, 2 * small_l - 2))
|
||||
data[lx // 2 - small_l, ly // 2 - small_l // 8:ly // 2 + small_l // 8] = 0
|
||||
seeds = np.zeros_like(data)
|
||||
seeds[lx // 5, ly // 5] = 1
|
||||
seeds[lx // 2 + small_l // 4, ly // 2 - small_l // 4] = 2
|
||||
return data, seeds
|
||||
|
||||
|
||||
def make_3d_syntheticdata(lx, ly=None, lz=None):
|
||||
if ly is None:
|
||||
ly = lx
|
||||
if lz is None:
|
||||
lz = lx
|
||||
np.random.seed(1234)
|
||||
data = np.zeros((lx, ly, lz)) + 0.1 * np.random.randn(lx, ly, lz)
|
||||
small_l = int(lx // 5)
|
||||
data[lx // 2 - small_l:lx // 2 + small_l,
|
||||
ly // 2 - small_l:ly // 2 + small_l,
|
||||
lz // 2 - small_l:lz // 2 + small_l] = 1
|
||||
data[lx // 2 - small_l + 1:lx // 2 + small_l - 1,
|
||||
ly // 2 - small_l + 1:ly // 2 + small_l - 1,
|
||||
lz // 2 - small_l + 1:lz // 2 + small_l - 1] = 0
|
||||
# make a hole
|
||||
hole_size = np.max([1, small_l // 8])
|
||||
data[lx // 2 - small_l,
|
||||
ly // 2 - hole_size:ly // 2 + hole_size,
|
||||
lz // 2 - hole_size:lz // 2 + hole_size] = 0
|
||||
seeds = np.zeros_like(data)
|
||||
seeds[lx // 5, ly // 5, lz // 5] = 1
|
||||
seeds[lx // 2 + small_l // 4,
|
||||
ly // 2 - small_l // 4,
|
||||
lz // 2 - small_l // 4] = 2
|
||||
return data, seeds
|
||||
|
||||
|
||||
def test_2d_bf():
|
||||
lx = 70
|
||||
ly = 100
|
||||
data, labels = make_2d_syntheticdata(lx, ly)
|
||||
with expected_warnings([NUMPY_MATRIX_WARNING]):
|
||||
labels_bf = random_walker(data, labels, beta=90, mode='bf')
|
||||
assert (labels_bf[25:45, 40:60] == 2).all()
|
||||
assert data.shape == labels.shape
|
||||
with expected_warnings([NUMPY_MATRIX_WARNING]):
|
||||
full_prob_bf = random_walker(data, labels, beta=90, mode='bf',
|
||||
return_full_prob=True)
|
||||
assert (full_prob_bf[1, 25:45, 40:60] >=
|
||||
full_prob_bf[0, 25:45, 40:60]).all()
|
||||
assert data.shape == labels.shape
|
||||
# Now test with more than two labels
|
||||
labels[55, 80] = 3
|
||||
with expected_warnings([NUMPY_MATRIX_WARNING]):
|
||||
full_prob_bf = random_walker(data, labels, beta=90, mode='bf',
|
||||
return_full_prob=True)
|
||||
assert (full_prob_bf[1, 25:45, 40:60] >=
|
||||
full_prob_bf[0, 25:45, 40:60]).all()
|
||||
assert len(full_prob_bf) == 3
|
||||
assert data.shape == labels.shape
|
||||
|
||||
|
||||
def test_2d_cg():
|
||||
lx = 70
|
||||
ly = 100
|
||||
data, labels = make_2d_syntheticdata(lx, ly)
|
||||
with expected_warnings(['"cg" mode' + '|' + SCIPY_RANK_WARNING,
|
||||
NUMPY_MATRIX_WARNING]):
|
||||
labels_cg = random_walker(data, labels, beta=90, mode='cg')
|
||||
assert (labels_cg[25:45, 40:60] == 2).all()
|
||||
assert data.shape == labels.shape
|
||||
with expected_warnings(['"cg" mode' + '|' + SCIPY_RANK_WARNING,
|
||||
NUMPY_MATRIX_WARNING]):
|
||||
full_prob = random_walker(data, labels, beta=90, mode='cg',
|
||||
return_full_prob=True)
|
||||
assert (full_prob[1, 25:45, 40:60] >=
|
||||
full_prob[0, 25:45, 40:60]).all()
|
||||
assert data.shape == labels.shape
|
||||
return data, labels_cg
|
||||
|
||||
|
||||
def test_2d_cg_mg():
|
||||
lx = 70
|
||||
ly = 100
|
||||
data, labels = make_2d_syntheticdata(lx, ly)
|
||||
anticipated_warnings = [
|
||||
'scipy.sparse.sparsetools|%s' % PYAMG_OR_SCIPY_WARNING,
|
||||
NUMPY_MATRIX_WARNING]
|
||||
with expected_warnings(anticipated_warnings):
|
||||
labels_cg_mg = random_walker(data, labels, beta=90, mode='cg_mg')
|
||||
assert (labels_cg_mg[25:45, 40:60] == 2).all()
|
||||
assert data.shape == labels.shape
|
||||
with expected_warnings(anticipated_warnings):
|
||||
full_prob = random_walker(data, labels, beta=90, mode='cg_mg',
|
||||
return_full_prob=True)
|
||||
assert (full_prob[1, 25:45, 40:60] >=
|
||||
full_prob[0, 25:45, 40:60]).all()
|
||||
assert data.shape == labels.shape
|
||||
return data, labels_cg_mg
|
||||
|
||||
|
||||
def test_2d_cg_j():
|
||||
lx = 70
|
||||
ly = 100
|
||||
data, labels = make_2d_syntheticdata(lx, ly)
|
||||
with expected_warnings([NUMPY_MATRIX_WARNING]):
|
||||
labels_cg = random_walker(data, labels, beta=90, mode='cg_j')
|
||||
assert (labels_cg[25:45, 40:60] == 2).all()
|
||||
assert data.shape == labels.shape
|
||||
with expected_warnings([NUMPY_MATRIX_WARNING]):
|
||||
full_prob = random_walker(data, labels, beta=90, mode='cg_j',
|
||||
return_full_prob=True)
|
||||
assert (full_prob[1, 25:45, 40:60]
|
||||
>= full_prob[0, 25:45, 40:60]).all()
|
||||
assert data.shape == labels.shape
|
||||
|
||||
|
||||
def test_types():
|
||||
lx = 70
|
||||
ly = 100
|
||||
data, labels = make_2d_syntheticdata(lx, ly)
|
||||
data = 255 * (data - data.min()) // (data.max() - data.min())
|
||||
data = data.astype(np.uint8)
|
||||
with expected_warnings([PYAMG_OR_SCIPY_WARNING, NUMPY_MATRIX_WARNING]):
|
||||
labels_cg_mg = random_walker(data, labels, beta=90, mode='cg_mg')
|
||||
assert (labels_cg_mg[25:45, 40:60] == 2).all()
|
||||
assert data.shape == labels.shape
|
||||
return data, labels_cg_mg
|
||||
|
||||
|
||||
def test_reorder_labels():
|
||||
lx = 70
|
||||
ly = 100
|
||||
data, labels = make_2d_syntheticdata(lx, ly)
|
||||
labels[labels == 2] = 4
|
||||
with expected_warnings([NUMPY_MATRIX_WARNING]):
|
||||
labels_bf = random_walker(data, labels, beta=90, mode='bf')
|
||||
assert (labels_bf[25:45, 40:60] == 2).all()
|
||||
assert data.shape == labels.shape
|
||||
return data, labels_bf
|
||||
|
||||
|
||||
def test_2d_inactive():
|
||||
lx = 70
|
||||
ly = 100
|
||||
data, labels = make_2d_syntheticdata(lx, ly)
|
||||
labels[10:20, 10:20] = -1
|
||||
labels[46:50, 33:38] = -2
|
||||
with expected_warnings([NUMPY_MATRIX_WARNING]):
|
||||
labels = random_walker(data, labels, beta=90)
|
||||
assert (labels.reshape((lx, ly))[25:45, 40:60] == 2).all()
|
||||
assert data.shape == labels.shape
|
||||
return data, labels
|
||||
|
||||
|
||||
def test_3d():
|
||||
n = 30
|
||||
lx, ly, lz = n, n, n
|
||||
data, labels = make_3d_syntheticdata(lx, ly, lz)
|
||||
with expected_warnings(['"cg" mode' + '|' + SCIPY_RANK_WARNING,
|
||||
NUMPY_MATRIX_WARNING]):
|
||||
labels = random_walker(data, labels, mode='cg')
|
||||
assert (labels.reshape(data.shape)[13:17, 13:17, 13:17] == 2).all()
|
||||
assert data.shape == labels.shape
|
||||
return data, labels
|
||||
|
||||
|
||||
def test_3d_inactive():
|
||||
n = 30
|
||||
lx, ly, lz = n, n, n
|
||||
data, labels = make_3d_syntheticdata(lx, ly, lz)
|
||||
old_labels = np.copy(labels)
|
||||
labels[5:25, 26:29, 26:29] = -1
|
||||
after_labels = np.copy(labels)
|
||||
with expected_warnings(['"cg" mode|CObject type' + '|'
|
||||
+ SCIPY_RANK_WARNING, NUMPY_MATRIX_WARNING]):
|
||||
labels = random_walker(data, labels, mode='cg')
|
||||
assert (labels.reshape(data.shape)[13:17, 13:17, 13:17] == 2).all()
|
||||
assert data.shape == labels.shape
|
||||
return data, labels, old_labels, after_labels
|
||||
|
||||
|
||||
def test_multispectral_2d():
|
||||
lx, ly = 70, 100
|
||||
data, labels = make_2d_syntheticdata(lx, ly)
|
||||
data = data[..., np.newaxis].repeat(2, axis=-1) # Expect identical output
|
||||
with expected_warnings(['"cg" mode' + '|' + SCIPY_RANK_WARNING,
|
||||
NUMPY_MATRIX_WARNING]):
|
||||
multi_labels = random_walker(data, labels, mode='cg',
|
||||
multichannel=True)
|
||||
assert data[..., 0].shape == labels.shape
|
||||
with expected_warnings(['"cg" mode' + '|' + SCIPY_RANK_WARNING,
|
||||
NUMPY_MATRIX_WARNING]):
|
||||
single_labels = random_walker(data[..., 0], labels, mode='cg')
|
||||
assert (multi_labels.reshape(labels.shape)[25:45, 40:60] == 2).all()
|
||||
assert data[..., 0].shape == labels.shape
|
||||
return data, multi_labels, single_labels, labels
|
||||
|
||||
|
||||
def test_multispectral_3d():
|
||||
n = 30
|
||||
lx, ly, lz = n, n, n
|
||||
data, labels = make_3d_syntheticdata(lx, ly, lz)
|
||||
data = data[..., np.newaxis].repeat(2, axis=-1) # Expect identical output
|
||||
with expected_warnings(['"cg" mode' + '|' + SCIPY_RANK_WARNING,
|
||||
NUMPY_MATRIX_WARNING]):
|
||||
multi_labels = random_walker(data, labels, mode='cg',
|
||||
multichannel=True)
|
||||
assert data[..., 0].shape == labels.shape
|
||||
with expected_warnings(['"cg" mode' + '|' + SCIPY_RANK_WARNING,
|
||||
NUMPY_MATRIX_WARNING]):
|
||||
single_labels = random_walker(data[..., 0], labels, mode='cg')
|
||||
assert (multi_labels.reshape(labels.shape)[13:17, 13:17, 13:17] == 2).all()
|
||||
assert (single_labels.reshape(labels.shape)[13:17, 13:17, 13:17] == 2).all()
|
||||
assert data[..., 0].shape == labels.shape
|
||||
return data, multi_labels, single_labels, labels
|
||||
|
||||
|
||||
def test_spacing_0():
|
||||
n = 30
|
||||
lx, ly, lz = n, n, n
|
||||
data, _ = make_3d_syntheticdata(lx, ly, lz)
|
||||
|
||||
# Rescale `data` along Z axis
|
||||
data_aniso = np.zeros((n, n, n // 2))
|
||||
for i, yz in enumerate(data):
|
||||
data_aniso[i, :, :] = resize(yz, (n, n // 2),
|
||||
mode='constant',
|
||||
anti_aliasing=False)
|
||||
|
||||
# Generate new labels
|
||||
small_l = int(lx // 5)
|
||||
labels_aniso = np.zeros_like(data_aniso)
|
||||
labels_aniso[lx // 5, ly // 5, lz // 5] = 1
|
||||
labels_aniso[lx // 2 + small_l // 4,
|
||||
ly // 2 - small_l // 4,
|
||||
lz // 4 - small_l // 8] = 2
|
||||
|
||||
# Test with `spacing` kwarg
|
||||
with expected_warnings(['"cg" mode' + '|' + SCIPY_RANK_WARNING,
|
||||
NUMPY_MATRIX_WARNING]):
|
||||
labels_aniso = random_walker(data_aniso, labels_aniso, mode='cg',
|
||||
spacing=(1., 1., 0.5))
|
||||
|
||||
assert (labels_aniso[13:17, 13:17, 7:9] == 2).all()
|
||||
|
||||
|
||||
@xfail(condition=arch32,
|
||||
reason=('Known test failure on 32-bit platforms. See links for '
|
||||
'details: '
|
||||
'https://github.com/scikit-image/scikit-image/issues/3091 '
|
||||
'https://github.com/scikit-image/scikit-image/issues/3092'))
|
||||
def test_spacing_1():
|
||||
n = 30
|
||||
lx, ly, lz = n, n, n
|
||||
data, _ = make_3d_syntheticdata(lx, ly, lz)
|
||||
|
||||
# Rescale `data` along Y axis
|
||||
# `resize` is not yet 3D capable, so this must be done by looping in 2D.
|
||||
data_aniso = np.zeros((n, n * 2, n))
|
||||
for i, yz in enumerate(data):
|
||||
data_aniso[i, :, :] = resize(yz, (n * 2, n),
|
||||
mode='constant',
|
||||
anti_aliasing=False)
|
||||
|
||||
# Generate new labels
|
||||
small_l = int(lx // 5)
|
||||
labels_aniso = np.zeros_like(data_aniso)
|
||||
labels_aniso[lx // 5, ly // 5, lz // 5] = 1
|
||||
labels_aniso[lx // 2 + small_l // 4,
|
||||
ly - small_l // 2,
|
||||
lz // 2 - small_l // 4] = 2
|
||||
|
||||
# Test with `spacing` kwarg
|
||||
# First, anisotropic along Y
|
||||
with expected_warnings(['"cg" mode' + '|' + SCIPY_RANK_WARNING,
|
||||
NUMPY_MATRIX_WARNING]):
|
||||
labels_aniso = random_walker(data_aniso, labels_aniso, mode='cg',
|
||||
spacing=(1., 2., 1.))
|
||||
assert (labels_aniso[13:17, 26:34, 13:17] == 2).all()
|
||||
|
||||
# Rescale `data` along X axis
|
||||
# `resize` is not yet 3D capable, so this must be done by looping in 2D.
|
||||
data_aniso = np.zeros((n, n * 2, n))
|
||||
for i in range(data.shape[1]):
|
||||
data_aniso[i, :, :] = resize(data[:, 1, :], (n * 2, n),
|
||||
mode='constant',
|
||||
anti_aliasing=False)
|
||||
|
||||
# Generate new labels
|
||||
small_l = int(lx // 5)
|
||||
labels_aniso2 = np.zeros_like(data_aniso)
|
||||
labels_aniso2[lx // 5, ly // 5, lz // 5] = 1
|
||||
labels_aniso2[lx - small_l // 2,
|
||||
ly // 2 + small_l // 4,
|
||||
lz // 2 - small_l // 4] = 2
|
||||
|
||||
# Anisotropic along X
|
||||
with expected_warnings(['"cg" mode' + '|' + SCIPY_RANK_WARNING,
|
||||
NUMPY_MATRIX_WARNING]):
|
||||
labels_aniso2 = random_walker(data_aniso,
|
||||
labels_aniso2,
|
||||
mode='cg', spacing=(2., 1., 1.))
|
||||
assert (labels_aniso2[26:34, 13:17, 13:17] == 2).all()
|
||||
|
||||
|
||||
def test_trivial_cases():
|
||||
# When all voxels are labeled
|
||||
img = np.ones((10, 10))
|
||||
labels = np.ones((10, 10))
|
||||
|
||||
with expected_warnings(["Returning provided labels"]):
|
||||
pass_through = random_walker(img, labels)
|
||||
np.testing.assert_array_equal(pass_through, labels)
|
||||
|
||||
# When all voxels are labeled AND return_full_prob is True
|
||||
labels[:, :5] = 3
|
||||
expected = np.concatenate(((labels == 1)[..., np.newaxis],
|
||||
(labels == 3)[..., np.newaxis]), axis=2)
|
||||
with expected_warnings(["Returning provided labels"]):
|
||||
test = random_walker(img, labels, return_full_prob=True)
|
||||
np.testing.assert_array_equal(test, expected)
|
||||
|
||||
# Unlabeled voxels not connected to seed, so nothing can be done
|
||||
img = np.full((10, 10), False)
|
||||
object_A = np.array([(6,7), (6,8), (7,7), (7,8)])
|
||||
object_B = np.array([(3,1), (4,1), (2,2), (3,2), (4,2), (2,3), (3,3)])
|
||||
for x, y in np.vstack((object_A, object_B)):
|
||||
img[y][x] = True
|
||||
|
||||
markers = np.zeros((10, 10), dtype=np.int8)
|
||||
for x, y in object_B:
|
||||
markers[y][x] = 1
|
||||
|
||||
markers[img == 0] = -1
|
||||
with expected_warnings(["All unlabeled pixels are isolated"]):
|
||||
output_labels = random_walker(img, markers)
|
||||
assert np.all(output_labels[markers == 1] == 1)
|
||||
# Here 0-labeled pixels could not be determined (no connexion to seed)
|
||||
assert np.all(output_labels[markers == 0] == -1)
|
||||
with expected_warnings(["All unlabeled pixels are isolated"]):
|
||||
test = random_walker(img, markers, return_full_prob=True)
|
||||
|
||||
|
||||
def test_length2_spacing():
|
||||
# If this passes without raising an exception (warnings OK), the new
|
||||
# spacing code is working properly.
|
||||
np.random.seed(42)
|
||||
img = np.ones((10, 10)) + 0.2 * np.random.normal(size=(10, 10))
|
||||
labels = np.zeros((10, 10), dtype=np.uint8)
|
||||
labels[2, 4] = 1
|
||||
labels[6, 8] = 4
|
||||
with expected_warnings([NUMPY_MATRIX_WARNING]):
|
||||
random_walker(img, labels, spacing=(1., 2.))
|
||||
|
||||
|
||||
def test_bad_inputs():
|
||||
# Too few dimensions
|
||||
img = np.ones(10)
|
||||
labels = np.arange(10)
|
||||
with testing.raises(ValueError):
|
||||
random_walker(img, labels)
|
||||
with testing.raises(ValueError):
|
||||
random_walker(img, labels, multichannel=True)
|
||||
|
||||
# Too many dimensions
|
||||
np.random.seed(42)
|
||||
img = np.random.normal(size=(3, 3, 3, 3, 3))
|
||||
labels = np.arange(3 ** 5).reshape(img.shape)
|
||||
with testing.raises(ValueError):
|
||||
random_walker(img, labels)
|
||||
with testing.raises(ValueError):
|
||||
random_walker(img, labels, multichannel=True)
|
||||
|
||||
# Spacing incorrect length
|
||||
img = np.random.normal(size=(10, 10))
|
||||
labels = np.zeros((10, 10))
|
||||
labels[2, 4] = 2
|
||||
labels[6, 8] = 5
|
||||
with testing.raises(ValueError):
|
||||
random_walker(img, labels, spacing=(1,))
|
||||
|
||||
# Invalid mode
|
||||
img = np.random.normal(size=(10, 10))
|
||||
labels = np.zeros((10, 10))
|
||||
with testing.raises(ValueError):
|
||||
random_walker(img, labels, mode='bad')
|
||||
|
||||
|
||||
def test_isolated_seeds():
|
||||
np.random.seed(0)
|
||||
a = np.random.random((7, 7))
|
||||
mask = - np.ones(a.shape)
|
||||
# This pixel is an isolated seed
|
||||
mask[1, 1] = 1
|
||||
# Unlabeled pixels
|
||||
mask[3:, 3:] = 0
|
||||
# Seeds connected to unlabeled pixels
|
||||
mask[4, 4] = 2
|
||||
mask[6, 6] = 1
|
||||
|
||||
# Test that no error is raised, and that labels of isolated seeds are OK
|
||||
with expected_warnings([NUMPY_MATRIX_WARNING]):
|
||||
res = random_walker(a, mask)
|
||||
assert res[1, 1] == 1
|
||||
with expected_warnings([NUMPY_MATRIX_WARNING]):
|
||||
res = random_walker(a, mask, return_full_prob=True)
|
||||
assert res[0, 1, 1] == 1
|
||||
assert res[1, 1, 1] == 0
|
474
venv/Lib/site-packages/skimage/segmentation/tests/test_slic.py
Normal file
474
venv/Lib/site-packages/skimage/segmentation/tests/test_slic.py
Normal file
|
@ -0,0 +1,474 @@
|
|||
from itertools import product
|
||||
|
||||
import pytest
|
||||
import numpy as np
|
||||
from skimage.segmentation import slic
|
||||
|
||||
from skimage._shared import testing
|
||||
from skimage._shared.testing import test_parallel, assert_equal
|
||||
|
||||
|
||||
@test_parallel()
|
||||
def test_color_2d():
|
||||
rnd = np.random.RandomState(0)
|
||||
img = np.zeros((20, 21, 3))
|
||||
img[:10, :10, 0] = 1
|
||||
img[10:, :10, 1] = 1
|
||||
img[10:, 10:, 2] = 1
|
||||
img += 0.01 * rnd.normal(size=img.shape)
|
||||
img[img > 1] = 1
|
||||
img[img < 0] = 0
|
||||
seg = slic(img, n_segments=4, sigma=0, enforce_connectivity=False,
|
||||
start_label=0)
|
||||
|
||||
# we expect 4 segments
|
||||
assert_equal(len(np.unique(seg)), 4)
|
||||
assert_equal(seg.shape, img.shape[:-1])
|
||||
assert_equal(seg[:10, :10], 0)
|
||||
assert_equal(seg[10:, :10], 2)
|
||||
assert_equal(seg[:10, 10:], 1)
|
||||
assert_equal(seg[10:, 10:], 3)
|
||||
|
||||
|
||||
def test_multichannel_2d():
|
||||
rnd = np.random.RandomState(0)
|
||||
img = np.zeros((20, 20, 8))
|
||||
img[:10, :10, 0:2] = 1
|
||||
img[:10, 10:, 2:4] = 1
|
||||
img[10:, :10, 4:6] = 1
|
||||
img[10:, 10:, 6:8] = 1
|
||||
img += 0.01 * rnd.normal(size=img.shape)
|
||||
img = np.clip(img, 0, 1, out=img)
|
||||
seg = slic(img, n_segments=4, enforce_connectivity=False, start_label=0)
|
||||
|
||||
# we expect 4 segments
|
||||
assert_equal(len(np.unique(seg)), 4)
|
||||
assert_equal(seg.shape, img.shape[:-1])
|
||||
assert_equal(seg[:10, :10], 0)
|
||||
assert_equal(seg[10:, :10], 2)
|
||||
assert_equal(seg[:10, 10:], 1)
|
||||
assert_equal(seg[10:, 10:], 3)
|
||||
|
||||
|
||||
def test_gray_2d():
|
||||
rnd = np.random.RandomState(0)
|
||||
img = np.zeros((20, 21))
|
||||
img[:10, :10] = 0.33
|
||||
img[10:, :10] = 0.67
|
||||
img[10:, 10:] = 1.00
|
||||
img += 0.0033 * rnd.normal(size=img.shape)
|
||||
img[img > 1] = 1
|
||||
img[img < 0] = 0
|
||||
seg = slic(img, sigma=0, n_segments=4, compactness=1,
|
||||
multichannel=False, convert2lab=False, start_label=0)
|
||||
|
||||
assert_equal(len(np.unique(seg)), 4)
|
||||
assert_equal(seg.shape, img.shape)
|
||||
assert_equal(seg[:10, :10], 0)
|
||||
assert_equal(seg[10:, :10], 2)
|
||||
assert_equal(seg[:10, 10:], 1)
|
||||
assert_equal(seg[10:, 10:], 3)
|
||||
|
||||
|
||||
def test_color_3d():
|
||||
rnd = np.random.RandomState(0)
|
||||
img = np.zeros((20, 21, 22, 3))
|
||||
slices = []
|
||||
for dim_size in img.shape[:-1]:
|
||||
midpoint = dim_size // 2
|
||||
slices.append((slice(None, midpoint), slice(midpoint, None)))
|
||||
slices = list(product(*slices))
|
||||
colors = list(product(*(([0, 1],) * 3)))
|
||||
for s, c in zip(slices, colors):
|
||||
img[s] = c
|
||||
img += 0.01 * rnd.normal(size=img.shape)
|
||||
img[img > 1] = 1
|
||||
img[img < 0] = 0
|
||||
seg = slic(img, sigma=0, n_segments=8, start_label=0)
|
||||
|
||||
assert_equal(len(np.unique(seg)), 8)
|
||||
for s, c in zip(slices, range(8)):
|
||||
assert_equal(seg[s], c)
|
||||
|
||||
|
||||
def test_gray_3d():
|
||||
rnd = np.random.RandomState(0)
|
||||
img = np.zeros((20, 21, 22))
|
||||
slices = []
|
||||
for dim_size in img.shape:
|
||||
midpoint = dim_size // 2
|
||||
slices.append((slice(None, midpoint), slice(midpoint, None)))
|
||||
slices = list(product(*slices))
|
||||
shades = np.arange(0, 1.000001, 1.0 / 7)
|
||||
for s, sh in zip(slices, shades):
|
||||
img[s] = sh
|
||||
img += 0.001 * rnd.normal(size=img.shape)
|
||||
img[img > 1] = 1
|
||||
img[img < 0] = 0
|
||||
seg = slic(img, sigma=0, n_segments=8, compactness=1,
|
||||
multichannel=False, convert2lab=False, start_label=0)
|
||||
|
||||
assert_equal(len(np.unique(seg)), 8)
|
||||
for s, c in zip(slices, range(8)):
|
||||
assert_equal(seg[s], c)
|
||||
|
||||
|
||||
def test_list_sigma():
|
||||
rnd = np.random.RandomState(0)
|
||||
img = np.array([[1, 1, 1, 0, 0, 0],
|
||||
[0, 0, 0, 1, 1, 1]], np.float)
|
||||
img += 0.1 * rnd.normal(size=img.shape)
|
||||
result_sigma = np.array([[0, 0, 0, 1, 1, 1],
|
||||
[0, 0, 0, 1, 1, 1]], np.int)
|
||||
seg_sigma = slic(img, n_segments=2, sigma=[1, 50, 1],
|
||||
multichannel=False, start_label=0)
|
||||
assert_equal(seg_sigma, result_sigma)
|
||||
|
||||
|
||||
def test_spacing():
|
||||
rnd = np.random.RandomState(0)
|
||||
img = np.array([[1, 1, 1, 0, 0],
|
||||
[1, 1, 0, 0, 0]], np.float)
|
||||
result_non_spaced = np.array([[0, 0, 0, 1, 1],
|
||||
[0, 0, 1, 1, 1]], np.int)
|
||||
result_spaced = np.array([[0, 0, 0, 0, 0],
|
||||
[1, 1, 1, 1, 1]], np.int)
|
||||
img += 0.1 * rnd.normal(size=img.shape)
|
||||
seg_non_spaced = slic(img, n_segments=2, sigma=0, multichannel=False,
|
||||
compactness=1.0, start_label=0)
|
||||
seg_spaced = slic(img, n_segments=2, sigma=0, spacing=[1, 500, 1],
|
||||
compactness=1.0, multichannel=False, start_label=0)
|
||||
assert_equal(seg_non_spaced, result_non_spaced)
|
||||
assert_equal(seg_spaced, result_spaced)
|
||||
|
||||
|
||||
def test_invalid_lab_conversion():
|
||||
img = np.array([[1, 1, 1, 0, 0],
|
||||
[1, 1, 0, 0, 0]], np.float) + 1
|
||||
with testing.raises(ValueError):
|
||||
slic(img, multichannel=True, convert2lab=True, start_label=0)
|
||||
|
||||
|
||||
def test_enforce_connectivity():
|
||||
img = np.array([[0, 0, 0, 1, 1, 1],
|
||||
[1, 0, 0, 1, 1, 0],
|
||||
[0, 0, 0, 1, 1, 0]], np.float)
|
||||
|
||||
segments_connected = slic(img, 2, compactness=0.0001,
|
||||
enforce_connectivity=True,
|
||||
convert2lab=False, start_label=0)
|
||||
segments_disconnected = slic(img, 2, compactness=0.0001,
|
||||
enforce_connectivity=False,
|
||||
convert2lab=False, start_label=0)
|
||||
|
||||
# Make sure nothing fatal occurs (e.g. buffer overflow) at low values of
|
||||
# max_size_factor
|
||||
segments_connected_low_max = slic(img, 2, compactness=0.0001,
|
||||
enforce_connectivity=True,
|
||||
convert2lab=False,
|
||||
max_size_factor=0.8,
|
||||
start_label=0)
|
||||
|
||||
result_connected = np.array([[0, 0, 0, 1, 1, 1],
|
||||
[0, 0, 0, 1, 1, 1],
|
||||
[0, 0, 0, 1, 1, 1]], np.float)
|
||||
|
||||
result_disconnected = np.array([[0, 0, 0, 1, 1, 1],
|
||||
[1, 0, 0, 1, 1, 0],
|
||||
[0, 0, 0, 1, 1, 0]], np.float)
|
||||
|
||||
assert_equal(segments_connected, result_connected)
|
||||
assert_equal(segments_disconnected, result_disconnected)
|
||||
assert_equal(segments_connected_low_max, result_connected)
|
||||
|
||||
|
||||
def test_slic_zero():
|
||||
# Same as test_color_2d but with slic_zero=True
|
||||
rnd = np.random.RandomState(0)
|
||||
img = np.zeros((20, 21, 3))
|
||||
img[:10, :10, 0] = 1
|
||||
img[10:, :10, 1] = 1
|
||||
img[10:, 10:, 2] = 1
|
||||
img += 0.01 * rnd.normal(size=img.shape)
|
||||
img[img > 1] = 1
|
||||
img[img < 0] = 0
|
||||
seg = slic(img, n_segments=4, sigma=0, slic_zero=True, start_label=0)
|
||||
|
||||
# we expect 4 segments
|
||||
assert_equal(len(np.unique(seg)), 4)
|
||||
assert_equal(seg.shape, img.shape[:-1])
|
||||
assert_equal(seg[:10, :10], 0)
|
||||
assert_equal(seg[10:, :10], 2)
|
||||
assert_equal(seg[:10, 10:], 1)
|
||||
assert_equal(seg[10:, 10:], 3)
|
||||
|
||||
|
||||
def test_more_segments_than_pixels():
|
||||
rnd = np.random.RandomState(0)
|
||||
img = np.zeros((20, 21))
|
||||
img[:10, :10] = 0.33
|
||||
img[10:, :10] = 0.67
|
||||
img[10:, 10:] = 1.00
|
||||
img += 0.0033 * rnd.normal(size=img.shape)
|
||||
img[img > 1] = 1
|
||||
img[img < 0] = 0
|
||||
seg = slic(img, sigma=0, n_segments=500, compactness=1,
|
||||
multichannel=False, convert2lab=False, start_label=0)
|
||||
assert np.all(seg.ravel() == np.arange(seg.size))
|
||||
|
||||
|
||||
def test_color_2d_mask():
|
||||
rnd = np.random.RandomState(0)
|
||||
msk = np.zeros((20, 21))
|
||||
msk[2:-2, 2:-2] = 1
|
||||
img = np.zeros((20, 21, 3))
|
||||
img[:10, :10, 0] = 1
|
||||
img[10:, :10, 1] = 1
|
||||
img[10:, 10:, 2] = 1
|
||||
img += 0.01 * rnd.normal(size=img.shape)
|
||||
np.clip(img, 0, 1, out=img)
|
||||
seg = slic(img, n_segments=4, sigma=0, enforce_connectivity=False,
|
||||
mask=msk)
|
||||
|
||||
# we expect 4 segments + masked area
|
||||
assert_equal(len(np.unique(seg)), 5)
|
||||
assert_equal(seg.shape, img.shape[:-1])
|
||||
# segments
|
||||
assert_equal(seg[2:10, 2:10], 1)
|
||||
assert_equal(seg[10:-2, 2:10], 4)
|
||||
assert_equal(seg[2:10, 10:-2], 2)
|
||||
assert_equal(seg[10:-2, 10:-2], 3)
|
||||
# non masked area
|
||||
assert_equal(seg[:2, :], 0)
|
||||
assert_equal(seg[-2:, :], 0)
|
||||
assert_equal(seg[:, :2], 0)
|
||||
assert_equal(seg[:, -2:], 0)
|
||||
|
||||
|
||||
def test_multichannel_2d_mask():
|
||||
rnd = np.random.RandomState(0)
|
||||
msk = np.zeros((20, 20))
|
||||
msk[2:-2, 2:-2] = 1
|
||||
img = np.zeros((20, 20, 8))
|
||||
img[:10, :10, 0:2] = 1
|
||||
img[:10, 10:, 2:4] = 1
|
||||
img[10:, :10, 4:6] = 1
|
||||
img[10:, 10:, 6:8] = 1
|
||||
img += 0.01 * rnd.normal(size=img.shape)
|
||||
np.clip(img, 0, 1, out=img)
|
||||
seg = slic(img, n_segments=4, enforce_connectivity=False,
|
||||
mask=msk)
|
||||
|
||||
# we expect 4 segments + masked area
|
||||
assert_equal(len(np.unique(seg)), 5)
|
||||
assert_equal(seg.shape, img.shape[:-1])
|
||||
# segments
|
||||
assert_equal(seg[2:10, 2:10], 2)
|
||||
assert_equal(seg[2:10, 10:-2], 1)
|
||||
assert_equal(seg[10:-2, 2:10], 4)
|
||||
assert_equal(seg[10:-2, 10:-2], 3)
|
||||
# non masked area
|
||||
assert_equal(seg[:2, :], 0)
|
||||
assert_equal(seg[-2:, :], 0)
|
||||
assert_equal(seg[:, :2], 0)
|
||||
assert_equal(seg[:, -2:], 0)
|
||||
|
||||
|
||||
def test_gray_2d_mask():
|
||||
rnd = np.random.RandomState(0)
|
||||
msk = np.zeros((20, 21))
|
||||
msk[2:-2, 2:-2] = 1
|
||||
img = np.zeros((20, 21))
|
||||
img[:10, :10] = 0.33
|
||||
img[10:, :10] = 0.67
|
||||
img[10:, 10:] = 1.00
|
||||
img += 0.0033 * rnd.normal(size=img.shape)
|
||||
np.clip(img, 0, 1, out=img)
|
||||
seg = slic(img, sigma=0, n_segments=4, compactness=1,
|
||||
multichannel=False, convert2lab=False, mask=msk)
|
||||
|
||||
assert_equal(len(np.unique(seg)), 5)
|
||||
assert_equal(seg.shape, img.shape)
|
||||
# segments
|
||||
assert_equal(seg[2:10, 2:10], 1)
|
||||
assert_equal(seg[2:10, 10:-2], 2)
|
||||
assert_equal(seg[10:-2, 2:10], 3)
|
||||
assert_equal(seg[10:-2, 10:-2], 4)
|
||||
# non masked area
|
||||
assert_equal(seg[:2, :], 0)
|
||||
assert_equal(seg[-2:, :], 0)
|
||||
assert_equal(seg[:, :2], 0)
|
||||
assert_equal(seg[:, -2:], 0)
|
||||
|
||||
|
||||
def test_list_sigma_mask():
|
||||
rnd = np.random.RandomState(0)
|
||||
msk = np.zeros((2, 6))
|
||||
msk[:, 1:-1] = 1
|
||||
img = np.array([[1, 1, 1, 0, 0, 0],
|
||||
[0, 0, 0, 1, 1, 1]], np.float)
|
||||
img += 0.1 * rnd.normal(size=img.shape)
|
||||
result_sigma = np.array([[0, 1, 1, 2, 2, 0],
|
||||
[0, 1, 1, 2, 2, 0]], np.int)
|
||||
seg_sigma = slic(img, n_segments=2, sigma=[1, 50, 1],
|
||||
multichannel=False, mask=msk)
|
||||
assert_equal(seg_sigma, result_sigma)
|
||||
|
||||
|
||||
def test_spacing_mask():
|
||||
rnd = np.random.RandomState(0)
|
||||
msk = np.zeros((2, 5))
|
||||
msk[:, 1:-1] = 1
|
||||
img = np.array([[1, 1, 1, 0, 0],
|
||||
[1, 1, 0, 0, 0]], np.float)
|
||||
result_non_spaced = np.array([[0, 1, 1, 2, 0],
|
||||
[0, 1, 2, 2, 0]], np.int)
|
||||
result_spaced = np.array([[0, 1, 1, 1, 0],
|
||||
[0, 2, 2, 2, 0]], np.int)
|
||||
img += 0.1 * rnd.normal(size=img.shape)
|
||||
seg_non_spaced = slic(img, n_segments=2, sigma=0, multichannel=False,
|
||||
compactness=1.0, mask=msk)
|
||||
seg_spaced = slic(img, n_segments=2, sigma=0, spacing=[1, 50, 1],
|
||||
compactness=1.0, multichannel=False, mask=msk)
|
||||
assert_equal(seg_non_spaced, result_non_spaced)
|
||||
assert_equal(seg_spaced, result_spaced)
|
||||
|
||||
|
||||
def test_enforce_connectivity_mask():
|
||||
msk = np.zeros((3, 6))
|
||||
msk[:, 1:-1] = 1
|
||||
img = np.array([[0, 0, 0, 1, 1, 1],
|
||||
[1, 0, 0, 1, 1, 0],
|
||||
[0, 0, 0, 1, 1, 0]], np.float)
|
||||
|
||||
segments_connected = slic(img, 2, compactness=0.0001,
|
||||
enforce_connectivity=True,
|
||||
convert2lab=False, mask=msk)
|
||||
segments_disconnected = slic(img, 2, compactness=0.0001,
|
||||
enforce_connectivity=False,
|
||||
convert2lab=False, mask=msk)
|
||||
|
||||
# Make sure nothing fatal occurs (e.g. buffer overflow) at low values of
|
||||
# max_size_factor
|
||||
segments_connected_low_max = slic(img, 2, compactness=0.0001,
|
||||
enforce_connectivity=True,
|
||||
convert2lab=False,
|
||||
max_size_factor=0.8, mask=msk)
|
||||
|
||||
result_connected = np.array([[0, 1, 1, 2, 2, 0],
|
||||
[0, 1, 1, 2, 2, 0],
|
||||
[0, 1, 1, 2, 2, 0]], np.float)
|
||||
|
||||
result_disconnected = np.array([[0, 1, 1, 2, 2, 0],
|
||||
[0, 1, 1, 2, 2, 0],
|
||||
[0, 1, 1, 2, 2, 0]], np.float)
|
||||
|
||||
assert_equal(segments_connected, result_connected)
|
||||
assert_equal(segments_disconnected, result_disconnected)
|
||||
assert_equal(segments_connected_low_max, result_connected)
|
||||
|
||||
|
||||
def test_slic_zero_mask():
|
||||
|
||||
rnd = np.random.RandomState(0)
|
||||
msk = np.zeros((20, 21))
|
||||
msk[2:-2, 2:-2] = 1
|
||||
img = np.zeros((20, 21, 3))
|
||||
img[:10, :10, 0] = 1
|
||||
img[10:, :10, 1] = 1
|
||||
img[10:, 10:, 2] = 1
|
||||
img += 0.01 * rnd.normal(size=img.shape)
|
||||
np.clip(img, 0, 1, out=img)
|
||||
seg = slic(img, n_segments=4, sigma=0, slic_zero=True,
|
||||
mask=msk)
|
||||
|
||||
# we expect 4 segments + masked area
|
||||
assert_equal(len(np.unique(seg)), 5)
|
||||
assert_equal(seg.shape, img.shape[:-1])
|
||||
# segments
|
||||
assert_equal(seg[2:10, 2:10], 1)
|
||||
assert_equal(seg[2:10, 10:-2], 2)
|
||||
assert_equal(seg[10:-2, 2:10], 3)
|
||||
assert_equal(seg[10:-2, 10:-2], 4)
|
||||
# non masked area
|
||||
assert_equal(seg[:2, :], 0)
|
||||
assert_equal(seg[-2:, :], 0)
|
||||
assert_equal(seg[:, :2], 0)
|
||||
assert_equal(seg[:, -2:], 0)
|
||||
|
||||
|
||||
def test_more_segments_than_pixels_mask():
|
||||
rnd = np.random.RandomState(0)
|
||||
msk = np.zeros((20, 21))
|
||||
msk[2:-2, 2:-2] = 1
|
||||
img = np.zeros((20, 21))
|
||||
img[:10, :10] = 0.33
|
||||
img[10:, :10] = 0.67
|
||||
img[10:, 10:] = 1.00
|
||||
img += 0.0033 * rnd.normal(size=img.shape)
|
||||
np.clip(img, 0, 1, out=img)
|
||||
seg = slic(img, sigma=0, n_segments=500, compactness=1,
|
||||
multichannel=False, convert2lab=False, mask=msk)
|
||||
|
||||
expected = np.arange(seg[2:-2, 2:-2].size) + 1
|
||||
assert np.all(seg[2:-2, 2:-2].ravel() == expected)
|
||||
|
||||
|
||||
def test_color_3d_mask():
|
||||
|
||||
msk = np.zeros((20, 21, 22))
|
||||
msk[2:-2, 2:-2, 2:-2] = 1
|
||||
|
||||
rnd = np.random.RandomState(0)
|
||||
img = np.zeros((20, 21, 22, 3))
|
||||
slices = []
|
||||
for dim_size in msk.shape:
|
||||
midpoint = dim_size // 2
|
||||
slices.append((slice(None, midpoint), slice(midpoint, None)))
|
||||
slices = list(product(*slices))
|
||||
colors = list(product(*(([0, 1],) * 3)))
|
||||
for s, c in zip(slices, colors):
|
||||
img[s] = c
|
||||
img += 0.01 * rnd.normal(size=img.shape)
|
||||
np.clip(img, 0, 1, out=img)
|
||||
|
||||
seg = slic(img, sigma=0, n_segments=8, mask=msk)
|
||||
|
||||
# we expect 8 segments + masked area
|
||||
assert_equal(len(np.unique(seg)), 9)
|
||||
for s, c in zip(slices, range(1, 9)):
|
||||
assert_equal(seg[s][2:-2, 2:-2, 2:-2], c)
|
||||
|
||||
|
||||
def test_gray_3d_mask():
|
||||
|
||||
msk = np.zeros((20, 21, 22))
|
||||
msk[2:-2, 2:-2, 2:-2] = 1
|
||||
|
||||
rnd = np.random.RandomState(0)
|
||||
img = np.zeros((20, 21, 22))
|
||||
slices = []
|
||||
for dim_size in img.shape:
|
||||
midpoint = dim_size // 2
|
||||
slices.append((slice(None, midpoint), slice(midpoint, None)))
|
||||
slices = list(product(*slices))
|
||||
shades = np.linspace(0, 1, 8)
|
||||
for s, sh in zip(slices, shades):
|
||||
img[s] = sh
|
||||
img += 0.001 * rnd.normal(size=img.shape)
|
||||
np.clip(img, 0, 1, out=img)
|
||||
seg = slic(img, sigma=0, n_segments=8, multichannel=False,
|
||||
convert2lab=False, mask=msk)
|
||||
|
||||
# we expect 8 segments + masked area
|
||||
assert_equal(len(np.unique(seg)), 9)
|
||||
for s, c in zip(slices, range(1, 9)):
|
||||
assert_equal(seg[s][2:-2, 2:-2, 2:-2], c)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dtype", ['float32', 'float64', 'uint8', 'int'])
|
||||
def test_dtype_support(dtype):
|
||||
img = np.random.rand(28, 28).astype(dtype)
|
||||
|
||||
# Simply run the function to assert that it runs without error
|
||||
slic(img, start_label=1)
|
|
@ -0,0 +1,498 @@
|
|||
"""test_watershed.py - tests the watershed function
|
||||
|
||||
Originally part of CellProfiler, code licensed under both GPL and BSD licenses.
|
||||
Website: http://www.cellprofiler.org
|
||||
|
||||
Copyright (c) 2003-2009 Massachusetts Institute of Technology
|
||||
Copyright (c) 2009-2011 Broad Institute
|
||||
All rights reserved.
|
||||
|
||||
Original author: Lee Kamentsky
|
||||
"""
|
||||
#Portions of this test were taken from scipy's watershed test in test_ndimage.py
|
||||
#
|
||||
# Copyright (C) 2003-2005 Peter J. Verveer
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions
|
||||
# are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following
|
||||
# disclaimer in the documentation and/or other materials provided
|
||||
# with the distribution.
|
||||
#
|
||||
# 3. The name of the author may not be used to endorse or promote
|
||||
# products derived from this software without specific prior
|
||||
# written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
|
||||
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
|
||||
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
|
||||
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
import math
|
||||
import unittest
|
||||
import pytest
|
||||
import numpy as np
|
||||
from scipy import ndimage as ndi
|
||||
|
||||
from .._watershed import watershed
|
||||
from skimage.measure import label
|
||||
|
||||
eps = 1e-12
|
||||
blob = np.array([[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
|
||||
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
|
||||
[255, 255, 255, 255, 255, 204, 204, 204, 204, 204, 204, 255, 255, 255, 255, 255],
|
||||
[255, 255, 255, 204, 204, 183, 153, 153, 153, 153, 183, 204, 204, 255, 255, 255],
|
||||
[255, 255, 204, 183, 153, 141, 111, 103, 103, 111, 141, 153, 183, 204, 255, 255],
|
||||
[255, 255, 204, 153, 111, 94, 72, 52, 52, 72, 94, 111, 153, 204, 255, 255],
|
||||
[255, 255, 204, 153, 111, 72, 39, 1, 1, 39, 72, 111, 153, 204, 255, 255],
|
||||
[255, 255, 204, 183, 141, 111, 72, 39, 39, 72, 111, 141, 183, 204, 255, 255],
|
||||
[255, 255, 255, 204, 183, 141, 111, 72, 72, 111, 141, 183, 204, 255, 255, 255],
|
||||
[255, 255, 255, 255, 204, 183, 141, 94, 94, 141, 183, 204, 255, 255, 255, 255],
|
||||
[255, 255, 255, 255, 255, 204, 153, 103, 103, 153, 204, 255, 255, 255, 255, 255],
|
||||
[255, 255, 255, 255, 204, 183, 141, 94, 94, 141, 183, 204, 255, 255, 255, 255],
|
||||
[255, 255, 255, 204, 183, 141, 111, 72, 72, 111, 141, 183, 204, 255, 255, 255],
|
||||
[255, 255, 204, 183, 141, 111, 72, 39, 39, 72, 111, 141, 183, 204, 255, 255],
|
||||
[255, 255, 204, 153, 111, 72, 39, 1, 1, 39, 72, 111, 153, 204, 255, 255],
|
||||
[255, 255, 204, 153, 111, 94, 72, 52, 52, 72, 94, 111, 153, 204, 255, 255],
|
||||
[255, 255, 204, 183, 153, 141, 111, 103, 103, 111, 141, 153, 183, 204, 255, 255],
|
||||
[255, 255, 255, 204, 204, 183, 153, 153, 153, 153, 183, 204, 204, 255, 255, 255],
|
||||
[255, 255, 255, 255, 255, 204, 204, 204, 204, 204, 204, 255, 255, 255, 255, 255],
|
||||
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
|
||||
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255]])
|
||||
|
||||
|
||||
def diff(a, b):
|
||||
if not isinstance(a, np.ndarray):
|
||||
a = np.asarray(a)
|
||||
if not isinstance(b, np.ndarray):
|
||||
b = np.asarray(b)
|
||||
if (0 in a.shape) and (0 in b.shape):
|
||||
return 0.0
|
||||
b[a == 0] = 0
|
||||
if (a.dtype in [np.complex64, np.complex128] or
|
||||
b.dtype in [np.complex64, np.complex128]):
|
||||
a = np.asarray(a, np.complex128)
|
||||
b = np.asarray(b, np.complex128)
|
||||
t = ((a.real - b.real)**2).sum() + ((a.imag - b.imag)**2).sum()
|
||||
else:
|
||||
a = np.asarray(a)
|
||||
a = a.astype(np.float64)
|
||||
b = np.asarray(b)
|
||||
b = b.astype(np.float64)
|
||||
t = ((a - b)**2).sum()
|
||||
return math.sqrt(t)
|
||||
|
||||
|
||||
class TestWatershed(unittest.TestCase):
|
||||
eight = np.ones((3, 3), bool)
|
||||
|
||||
def test_watershed01(self):
|
||||
"watershed 1"
|
||||
data = np.array([[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 1, 1, 1, 1, 1, 0],
|
||||
[0, 1, 0, 0, 0, 1, 0],
|
||||
[0, 1, 0, 0, 0, 1, 0],
|
||||
[0, 1, 0, 0, 0, 1, 0],
|
||||
[0, 1, 1, 1, 1, 1, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
|
||||
markers = np.array([[ -1, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[ 0, 0, 0, 0, 0, 0, 0],
|
||||
[ 0, 0, 0, 0, 0, 0, 0],
|
||||
[ 0, 0, 0, 1, 0, 0, 0],
|
||||
[ 0, 0, 0, 0, 0, 0, 0],
|
||||
[ 0, 0, 0, 0, 0, 0, 0],
|
||||
[ 0, 0, 0, 0, 0, 0, 0],
|
||||
[ 0, 0, 0, 0, 0, 0, 0]],
|
||||
np.int8)
|
||||
out = watershed(data, markers, self.eight)
|
||||
expected = np.array([[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, 1, 1, 1, 1, 1, -1],
|
||||
[-1, 1, 1, 1, 1, 1, -1],
|
||||
[-1, 1, 1, 1, 1, 1, -1],
|
||||
[-1, 1, 1, 1, 1, 1, -1],
|
||||
[-1, 1, 1, 1, 1, 1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1]])
|
||||
error = diff(expected, out)
|
||||
assert error < eps
|
||||
|
||||
def test_watershed02(self):
|
||||
"watershed 2"
|
||||
data = np.array([[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 1, 1, 1, 1, 1, 0],
|
||||
[0, 1, 0, 0, 0, 1, 0],
|
||||
[0, 1, 0, 0, 0, 1, 0],
|
||||
[0, 1, 0, 0, 0, 1, 0],
|
||||
[0, 1, 1, 1, 1, 1, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
|
||||
markers = np.array([[-1, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 1, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0]], np.int8)
|
||||
out = watershed(data, markers)
|
||||
error = diff([[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, 1, 1, 1, -1, -1],
|
||||
[-1, 1, 1, 1, 1, 1, -1],
|
||||
[-1, 1, 1, 1, 1, 1, -1],
|
||||
[-1, 1, 1, 1, 1, 1, -1],
|
||||
[-1, -1, 1, 1, 1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1]], out)
|
||||
self.assertTrue(error < eps)
|
||||
|
||||
def test_watershed03(self):
|
||||
"watershed 3"
|
||||
data = np.array([[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 1, 1, 1, 1, 1, 0],
|
||||
[0, 1, 0, 1, 0, 1, 0],
|
||||
[0, 1, 0, 1, 0, 1, 0],
|
||||
[0, 1, 0, 1, 0, 1, 0],
|
||||
[0, 1, 1, 1, 1, 1, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
|
||||
markers = np.array([[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 2, 0, 3, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, -1]], np.int8)
|
||||
out = watershed(data, markers)
|
||||
error = diff([[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, 0, 2, 0, 3, 0, -1],
|
||||
[-1, 2, 2, 0, 3, 3, -1],
|
||||
[-1, 2, 2, 0, 3, 3, -1],
|
||||
[-1, 2, 2, 0, 3, 3, -1],
|
||||
[-1, 0, 2, 0, 3, 0, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1]], out)
|
||||
self.assertTrue(error < eps)
|
||||
|
||||
def test_watershed04(self):
|
||||
"watershed 4"
|
||||
data = np.array([[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 1, 1, 1, 1, 1, 0],
|
||||
[0, 1, 0, 1, 0, 1, 0],
|
||||
[0, 1, 0, 1, 0, 1, 0],
|
||||
[0, 1, 0, 1, 0, 1, 0],
|
||||
[0, 1, 1, 1, 1, 1, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
|
||||
markers = np.array([[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 2, 0, 3, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, -1]], np.int8)
|
||||
out = watershed(data, markers, self.eight)
|
||||
error = diff([[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, 2, 2, 0, 3, 3, -1],
|
||||
[-1, 2, 2, 0, 3, 3, -1],
|
||||
[-1, 2, 2, 0, 3, 3, -1],
|
||||
[-1, 2, 2, 0, 3, 3, -1],
|
||||
[-1, 2, 2, 0, 3, 3, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1]], out)
|
||||
self.assertTrue(error < eps)
|
||||
|
||||
def test_watershed05(self):
|
||||
"watershed 5"
|
||||
data = np.array([[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 1, 1, 1, 1, 1, 0],
|
||||
[0, 1, 0, 1, 0, 1, 0],
|
||||
[0, 1, 0, 1, 0, 1, 0],
|
||||
[0, 1, 0, 1, 0, 1, 0],
|
||||
[0, 1, 1, 1, 1, 1, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
|
||||
markers = np.array([[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 3, 0, 2, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, -1]], np.int8)
|
||||
out = watershed(data, markers, self.eight)
|
||||
error = diff([[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, 3, 3, 0, 2, 2, -1],
|
||||
[-1, 3, 3, 0, 2, 2, -1],
|
||||
[-1, 3, 3, 0, 2, 2, -1],
|
||||
[-1, 3, 3, 0, 2, 2, -1],
|
||||
[-1, 3, 3, 0, 2, 2, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1]], out)
|
||||
self.assertTrue(error < eps)
|
||||
|
||||
def test_watershed06(self):
|
||||
"watershed 6"
|
||||
data = np.array([[0, 1, 0, 0, 0, 1, 0],
|
||||
[0, 1, 0, 0, 0, 1, 0],
|
||||
[0, 1, 0, 0, 0, 1, 0],
|
||||
[0, 1, 1, 1, 1, 1, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0]], np.uint8)
|
||||
markers = np.array([[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 1, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0],
|
||||
[-1, 0, 0, 0, 0, 0, 0]], np.int8)
|
||||
out = watershed(data, markers, self.eight)
|
||||
error = diff([[-1, 1, 1, 1, 1, 1, -1],
|
||||
[-1, 1, 1, 1, 1, 1, -1],
|
||||
[-1, 1, 1, 1, 1, 1, -1],
|
||||
[-1, 1, 1, 1, 1, 1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1],
|
||||
[-1, -1, -1, -1, -1, -1, -1]], out)
|
||||
self.assertTrue(error < eps)
|
||||
|
||||
def test_watershed07(self):
|
||||
"A regression test of a competitive case that failed"
|
||||
data = blob
|
||||
mask = (data != 255)
|
||||
markers = np.zeros(data.shape, int)
|
||||
markers[6, 7] = 1
|
||||
markers[14, 7] = 2
|
||||
out = watershed(data, markers, self.eight, mask=mask)
|
||||
#
|
||||
# The two objects should be the same size, except possibly for the
|
||||
# border region
|
||||
#
|
||||
size1 = np.sum(out == 1)
|
||||
size2 = np.sum(out == 2)
|
||||
self.assertTrue(abs(size1 - size2) <= 6)
|
||||
|
||||
def test_watershed08(self):
|
||||
"The border pixels + an edge are all the same value"
|
||||
data = blob.copy()
|
||||
data[10, 7:9] = 141
|
||||
mask = (data != 255)
|
||||
markers = np.zeros(data.shape, int)
|
||||
markers[6, 7] = 1
|
||||
markers[14, 7] = 2
|
||||
out = watershed(data, markers, self.eight, mask=mask)
|
||||
#
|
||||
# The two objects should be the same size, except possibly for the
|
||||
# border region
|
||||
#
|
||||
size1 = np.sum(out == 1)
|
||||
size2 = np.sum(out == 2)
|
||||
self.assertTrue(abs(size1 - size2) <= 6)
|
||||
|
||||
def test_watershed09(self):
|
||||
"""Test on an image of reasonable size
|
||||
|
||||
This is here both for timing (does it take forever?) and to
|
||||
ensure that the memory constraints are reasonable
|
||||
"""
|
||||
image = np.zeros((1000, 1000))
|
||||
coords = np.random.uniform(0, 1000, (100, 2)).astype(int)
|
||||
markers = np.zeros((1000, 1000), int)
|
||||
idx = 1
|
||||
for x, y in coords:
|
||||
image[x, y] = 1
|
||||
markers[x, y] = idx
|
||||
idx += 1
|
||||
|
||||
image = ndi.gaussian_filter(image, 4)
|
||||
watershed(image, markers, self.eight)
|
||||
ndi.watershed_ift(image.astype(np.uint16), markers, self.eight)
|
||||
|
||||
def test_watershed10(self):
|
||||
"watershed 10"
|
||||
data = np.array([[1, 1, 1, 1],
|
||||
[1, 1, 1, 1],
|
||||
[1, 1, 1, 1],
|
||||
[1, 1, 1, 1]], np.uint8)
|
||||
markers = np.array([[1, 0, 0, 2],
|
||||
[0, 0, 0, 0],
|
||||
[0, 0, 0, 0],
|
||||
[3, 0, 0, 4]], np.int8)
|
||||
out = watershed(data, markers, self.eight)
|
||||
error = diff([[1, 1, 2, 2],
|
||||
[1, 1, 2, 2],
|
||||
[3, 3, 4, 4],
|
||||
[3, 3, 4, 4]], out)
|
||||
self.assertTrue(error < eps)
|
||||
|
||||
def test_watershed11(self):
|
||||
'''Make sure that all points on this plateau are assigned to closest seed'''
|
||||
# https://github.com/scikit-image/scikit-image/issues/803
|
||||
#
|
||||
# Make sure that no point in a level image is farther away
|
||||
# from its seed than any other
|
||||
#
|
||||
image = np.zeros((21, 21))
|
||||
markers = np.zeros((21, 21), int)
|
||||
markers[5, 5] = 1
|
||||
markers[5, 10] = 2
|
||||
markers[10, 5] = 3
|
||||
markers[10, 10] = 4
|
||||
|
||||
structure = np.array([[False, True, False],
|
||||
[True, True, True],
|
||||
[False, True, False]])
|
||||
out = watershed(image, markers, structure)
|
||||
i, j = np.mgrid[0:21, 0:21]
|
||||
d = np.dstack(
|
||||
[np.sqrt((i.astype(float)-i0)**2, (j.astype(float)-j0)**2)
|
||||
for i0, j0 in ((5, 5), (5, 10), (10, 5), (10, 10))])
|
||||
dmin = np.min(d, 2)
|
||||
self.assertTrue(np.all(d[i, j, out[i, j]-1] == dmin))
|
||||
|
||||
|
||||
def test_watershed12(self):
|
||||
"The watershed line"
|
||||
data = np.array([[203, 255, 203, 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, 153],
|
||||
[203, 255, 203, 153, 153, 153, 102, 102, 102, 102, 102, 102, 153, 153, 153, 153],
|
||||
[203, 255, 203, 203, 153, 153, 102, 102, 77, 0, 102, 102, 153, 153, 203, 203],
|
||||
[203, 255, 255, 203, 153, 153, 153, 102, 102, 102, 102, 153, 153, 203, 203, 255],
|
||||
[203, 203, 255, 203, 203, 203, 153, 153, 153, 153, 153, 153, 203, 203, 255, 255],
|
||||
[153, 203, 255, 255, 255, 203, 203, 203, 203, 203, 203, 203, 203, 255, 255, 203],
|
||||
[153, 203, 203, 203, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 203, 203],
|
||||
[153, 153, 153, 203, 203, 203, 203, 203, 255, 203, 203, 203, 203, 203, 203, 153],
|
||||
[102, 102, 153, 153, 153, 153, 203, 203, 255, 203, 203, 255, 203, 153, 153, 153],
|
||||
[102, 102, 102, 102, 102, 153, 203, 255, 255, 203, 203, 203, 203, 153, 102, 153],
|
||||
[102, 51, 51, 102, 102, 153, 203, 255, 203, 203, 153, 153, 153, 153, 102, 153],
|
||||
[ 77, 51, 51, 102, 153, 153, 203, 255, 203, 203, 203, 153, 102, 102, 102, 153],
|
||||
[ 77, 0, 51, 102, 153, 203, 203, 255, 203, 255, 203, 153, 102, 51, 102, 153],
|
||||
[ 77, 0, 51, 102, 153, 203, 255, 255, 203, 203, 203, 153, 102, 0, 102, 153],
|
||||
[102, 0, 51, 102, 153, 203, 255, 203, 203, 153, 153, 153, 102, 102, 102, 153],
|
||||
[102, 102, 102, 102, 153, 203, 255, 203, 153, 153, 153, 153, 153, 153, 153, 153]])
|
||||
markerbin = (data==0)
|
||||
marker = label(markerbin)
|
||||
ws = watershed(data, marker, connectivity=2, watershed_line=True)
|
||||
for lab, area in zip(range(4), [34,74,74,74]):
|
||||
self.assertTrue(np.sum(ws == lab) == area)
|
||||
|
||||
|
||||
|
||||
def test_compact_watershed():
|
||||
image = np.zeros((5, 6))
|
||||
image[:, 3:] = 1
|
||||
seeds = np.zeros((5, 6), dtype=int)
|
||||
seeds[2, 0] = 1
|
||||
seeds[2, 3] = 2
|
||||
compact = watershed(image, seeds, compactness=0.01)
|
||||
expected = np.array([[1, 1, 1, 2, 2, 2],
|
||||
[1, 1, 1, 2, 2, 2],
|
||||
[1, 1, 1, 2, 2, 2],
|
||||
[1, 1, 1, 2, 2, 2],
|
||||
[1, 1, 1, 2, 2, 2]], dtype=int)
|
||||
np.testing.assert_equal(compact, expected)
|
||||
normal = watershed(image, seeds)
|
||||
expected = np.ones(image.shape, dtype=int)
|
||||
expected[2, 3:] = 2
|
||||
np.testing.assert_equal(normal, expected)
|
||||
|
||||
|
||||
def test_numeric_seed_watershed():
|
||||
"""Test that passing just the number of seeds to watershed works."""
|
||||
image = np.zeros((5, 6))
|
||||
image[:, 3:] = 1
|
||||
compact = watershed(image, 2, compactness=0.01)
|
||||
expected = np.array([[1, 1, 1, 1, 2, 2],
|
||||
[1, 1, 1, 1, 2, 2],
|
||||
[1, 1, 1, 1, 2, 2],
|
||||
[1, 1, 1, 1, 2, 2],
|
||||
[1, 1, 1, 1, 2, 2]], dtype=np.int32)
|
||||
np.testing.assert_equal(compact, expected)
|
||||
|
||||
|
||||
def test_incorrect_markers_shape():
|
||||
with pytest.raises(ValueError):
|
||||
image = np.ones((5, 6))
|
||||
markers = np.ones((5, 7))
|
||||
output = watershed(image, markers)
|
||||
|
||||
|
||||
def test_incorrect_mask_shape():
|
||||
with pytest.raises(ValueError):
|
||||
image = np.ones((5, 6))
|
||||
mask = np.ones((5, 7))
|
||||
output = watershed(image, markers=4, mask=mask)
|
||||
|
||||
|
||||
def test_markers_in_mask():
|
||||
data = blob
|
||||
mask = (data != 255)
|
||||
out = watershed(data, 25, connectivity=2, mask=mask)
|
||||
# There should be no markers where the mask is false
|
||||
assert np.all(out[~mask] == 0)
|
||||
|
||||
|
||||
def test_no_markers():
|
||||
data = blob
|
||||
mask = (data != 255)
|
||||
out = watershed(data, mask=mask)
|
||||
assert np.max(out) == 2
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
np.testing.run_module_suite()
|
Loading…
Add table
Add a link
Reference in a new issue