Fixed database typo and removed unnecessary class identifier.

This commit is contained in:
Batuhan Berk Başoğlu 2020-10-14 10:10:37 -04:00
parent 00ad49a143
commit 45fb349a7d
5098 changed files with 952558 additions and 85 deletions

View file

@ -0,0 +1,16 @@
from ._adapted_rand_error import adapted_rand_error
from ._variation_of_information import variation_of_information
from ._contingency_table import contingency_table
from .simple_metrics import (mean_squared_error,
normalized_root_mse,
peak_signal_noise_ratio)
from ._structural_similarity import structural_similarity
__all__ = ['adapted_rand_error',
'variation_of_information',
'contingency_table',
'mean_squared_error',
'normalized_root_mse',
'peak_signal_noise_ratio',
'structural_similarity'
]

View file

@ -0,0 +1,76 @@
from .._shared.utils import check_shape_equality
from ._contingency_table import contingency_table
__all__ = ['adapted_rand_error']
def adapted_rand_error(image_true=None, image_test=None, *, table=None,
ignore_labels=(0,)):
r"""Compute Adapted Rand error as defined by the SNEMI3D contest. [1]_
Parameters
----------
image_true : ndarray of int
Ground-truth label image, same shape as im_test.
image_test : ndarray of int
Test image.
table : scipy.sparse array in crs format, optional
A contingency table built with skimage.evaluate.contingency_table.
If None, it will be computed on the fly.
ignore_labels : sequence of int, optional
Labels to ignore. Any part of the true image labeled with any of these
values will not be counted in the score.
Returns
-------
are : float
The adapted Rand error; equal to :math:`1 - \frac{2pr}{p + r}`,
where ``p`` and ``r`` are the precision and recall described below.
prec : float
The adapted Rand precision: this is the number of pairs of pixels that
have the same label in the test label image *and* in the true image,
divided by the number in the test image.
rec : float
The adapted Rand recall: this is the number of pairs of pixels that
have the same label in the test label image *and* in the true image,
divided by the number in the true image.
Notes
-----
Pixels with label 0 in the true segmentation are ignored in the score.
References
----------
.. [1] Arganda-Carreras I, Turaga SC, Berger DR, et al. (2015)
Crowdsourcing the creation of image segmentation algorithms
for connectomics. Front. Neuroanat. 9:142.
:DOI:`10.3389/fnana.2015.00142`
"""
if image_test is not None and image_true is not None:
check_shape_equality(image_true, image_test)
if table is None:
p_ij = contingency_table(image_true, image_test,
ignore_labels=ignore_labels, normalize=False)
else:
p_ij = table
# Sum of the joint distribution squared
sum_p_ij2 = p_ij.data @ p_ij.data - p_ij.sum()
a_i = p_ij.sum(axis=1).A.ravel()
b_i = p_ij.sum(axis=0).A.ravel()
# Sum of squares of the test segment sizes (this is 2x the number of pairs
# of pixels with the same label in im_test)
sum_a2 = a_i @ a_i - a_i.sum()
# Same for im_true
sum_b2 = b_i @ b_i - b_i.sum()
precision = sum_p_ij2 / sum_a2
recall = sum_p_ij2 / sum_b2
fscore = 2. * precision * recall / (precision + recall)
are = 1. - fscore
return are, precision, recall

View file

@ -0,0 +1,40 @@
import scipy.sparse as sparse
import numpy as np
__all__ = ['contingency_table']
def contingency_table(im_true, im_test, *, ignore_labels=(), normalize=False):
"""
Return the contingency table for all regions in matched segmentations.
Parameters
----------
im_true : ndarray of int
Ground-truth label image, same shape as im_test.
im_test : ndarray of int
Test image.
ignore_labels : sequence of int, optional
Labels to ignore. Any part of the true image labeled with any of these
values will not be counted in the score.
normalize : bool
Determines if the contingency table is normalized by pixel count.
Returns
-------
cont : scipy.sparse.csr_matrix
A contingency table. `cont[i, j]` will equal the number of voxels
labeled `i` in `im_true` and `j` in `im_test`.
"""
im_test_r = im_test.ravel()
im_true_r = im_true.ravel()
ignored = np.zeros(im_true_r.shape, np.bool)
for label in ignore_labels:
ignored[im_true_r == label] = True
data = np.ones(im_true_r.shape)
data[ignored] = 0
if normalize:
data = data / im_true.size
cont = sparse.coo_matrix((data, (im_true_r, im_test_r))).tocsr()
return cont

View file

@ -0,0 +1,232 @@
from warnings import warn
import numpy as np
from scipy.ndimage import uniform_filter, gaussian_filter
from ..util.dtype import dtype_range
from ..util.arraycrop import crop
from .._shared.utils import warn, check_shape_equality
__all__ = ['structural_similarity']
def structural_similarity(im1, im2,
*,
win_size=None, gradient=False, data_range=None,
multichannel=False, gaussian_weights=False,
full=False, **kwargs):
"""
Compute the mean structural similarity index between two images.
Parameters
----------
im1, im2 : ndarray
Images. Any dimensionality with same shape.
win_size : int or None, optional
The side-length of the sliding window used in comparison. Must be an
odd value. If `gaussian_weights` is True, this is ignored and the
window size will depend on `sigma`.
gradient : bool, optional
If True, also return the gradient with respect to im2.
data_range : float, optional
The data range of the input image (distance between minimum and
maximum possible values). By default, this is estimated from the image
data-type.
multichannel : bool, optional
If True, treat the last dimension of the array as channels. Similarity
calculations are done independently for each channel then averaged.
gaussian_weights : bool, optional
If True, each patch has its mean and variance spatially weighted by a
normalized Gaussian kernel of width sigma=1.5.
full : bool, optional
If True, also return the full structural similarity image.
Other Parameters
----------------
use_sample_covariance : bool
If True, normalize covariances by N-1 rather than, N where N is the
number of pixels within the sliding window.
K1 : float
Algorithm parameter, K1 (small constant, see [1]_).
K2 : float
Algorithm parameter, K2 (small constant, see [1]_).
sigma : float
Standard deviation for the Gaussian when `gaussian_weights` is True.
Returns
-------
mssim : float
The mean structural similarity index over the image.
grad : ndarray
The gradient of the structural similarity between im1 and im2 [2]_.
This is only returned if `gradient` is set to True.
S : ndarray
The full SSIM image. This is only returned if `full` is set to True.
Notes
-----
To match the implementation of Wang et. al. [1]_, set `gaussian_weights`
to True, `sigma` to 1.5, and `use_sample_covariance` to False.
.. versionchanged:: 0.16
This function was renamed from ``skimage.measure.compare_ssim`` to
``skimage.metrics.structural_similarity``.
References
----------
.. [1] Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P.
(2004). Image quality assessment: From error visibility to
structural similarity. IEEE Transactions on Image Processing,
13, 600-612.
https://ece.uwaterloo.ca/~z70wang/publications/ssim.pdf,
:DOI:`10.1109/TIP.2003.819861`
.. [2] Avanaki, A. N. (2009). Exact global histogram specification
optimized for structural similarity. Optical Review, 16, 613-621.
:arxiv:`0901.0065`
:DOI:`10.1007/s10043-009-0119-z`
"""
check_shape_equality(im1, im2)
if multichannel:
# loop over channels
args = dict(win_size=win_size,
gradient=gradient,
data_range=data_range,
multichannel=False,
gaussian_weights=gaussian_weights,
full=full)
args.update(kwargs)
nch = im1.shape[-1]
mssim = np.empty(nch)
if gradient:
G = np.empty(im1.shape)
if full:
S = np.empty(im1.shape)
for ch in range(nch):
ch_result = structural_similarity(im1[..., ch],
im2[..., ch], **args)
if gradient and full:
mssim[..., ch], G[..., ch], S[..., ch] = ch_result
elif gradient:
mssim[..., ch], G[..., ch] = ch_result
elif full:
mssim[..., ch], S[..., ch] = ch_result
else:
mssim[..., ch] = ch_result
mssim = mssim.mean()
if gradient and full:
return mssim, G, S
elif gradient:
return mssim, G
elif full:
return mssim, S
else:
return mssim
K1 = kwargs.pop('K1', 0.01)
K2 = kwargs.pop('K2', 0.03)
sigma = kwargs.pop('sigma', 1.5)
if K1 < 0:
raise ValueError("K1 must be positive")
if K2 < 0:
raise ValueError("K2 must be positive")
if sigma < 0:
raise ValueError("sigma must be positive")
use_sample_covariance = kwargs.pop('use_sample_covariance', True)
if gaussian_weights:
# Set to give an 11-tap filter with the default sigma of 1.5 to match
# Wang et. al. 2004.
truncate = 3.5
if win_size is None:
if gaussian_weights:
# set win_size used by crop to match the filter size
r = int(truncate * sigma + 0.5) # radius as in ndimage
win_size = 2 * r + 1
else:
win_size = 7 # backwards compatibility
if np.any((np.asarray(im1.shape) - win_size) < 0):
raise ValueError(
"win_size exceeds image extent. If the input is a multichannel "
"(color) image, set multichannel=True.")
if not (win_size % 2 == 1):
raise ValueError('Window size must be odd.')
if data_range is None:
if im1.dtype != im2.dtype:
warn("Inputs have mismatched dtype. Setting data_range based on "
"im1.dtype.", stacklevel=2)
dmin, dmax = dtype_range[im1.dtype.type]
data_range = dmax - dmin
ndim = im1.ndim
if gaussian_weights:
filter_func = gaussian_filter
filter_args = {'sigma': sigma, 'truncate': truncate}
else:
filter_func = uniform_filter
filter_args = {'size': win_size}
# ndimage filters need floating point data
im1 = im1.astype(np.float64)
im2 = im2.astype(np.float64)
NP = win_size ** ndim
# filter has already normalized by NP
if use_sample_covariance:
cov_norm = NP / (NP - 1) # sample covariance
else:
cov_norm = 1.0 # population covariance to match Wang et. al. 2004
# compute (weighted) means
ux = filter_func(im1, **filter_args)
uy = filter_func(im2, **filter_args)
# compute (weighted) variances and covariances
uxx = filter_func(im1 * im1, **filter_args)
uyy = filter_func(im2 * im2, **filter_args)
uxy = filter_func(im1 * im2, **filter_args)
vx = cov_norm * (uxx - ux * ux)
vy = cov_norm * (uyy - uy * uy)
vxy = cov_norm * (uxy - ux * uy)
R = data_range
C1 = (K1 * R) ** 2
C2 = (K2 * R) ** 2
A1, A2, B1, B2 = ((2 * ux * uy + C1,
2 * vxy + C2,
ux ** 2 + uy ** 2 + C1,
vx + vy + C2))
D = B1 * B2
S = (A1 * A2) / D
# to avoid edge effects will ignore filter radius strip around edges
pad = (win_size - 1) // 2
# compute (weighted) mean of ssim
mssim = crop(S, pad).mean()
if gradient:
# The following is Eqs. 7-8 of Avanaki 2009.
grad = filter_func(A1 / D, **filter_args) * im1
grad += filter_func(-S / B2, **filter_args) * im2
grad += filter_func((ux * (A2 - A1) - uy * (B2 - B1) * S) / D,
**filter_args)
grad *= (2 / im1.size)
if full:
return mssim, grad, S
else:
return mssim, grad
else:
if full:
return mssim, S
else:
return mssim

View file

@ -0,0 +1,136 @@
import numpy as np
import scipy.sparse as sparse
from ._contingency_table import contingency_table
from .._shared.utils import check_shape_equality
__all__ = ['variation_of_information']
def variation_of_information(image0=None, image1=None, *, table=None,
ignore_labels=()):
"""Return symmetric conditional entropies associated with the VI. [1]_
The variation of information is defined as VI(X,Y) = H(X|Y) + H(Y|X).
If X is the ground-truth segmentation, then H(X|Y) can be interpreted
as the amount of under-segmentation and H(X|Y) as the amount
of over-segmentation. In other words, a perfect over-segmentation
will have H(X|Y)=0 and a perfect under-segmentation will have H(Y|X)=0.
Parameters
----------
image0, image1 : ndarray of int
Label images / segmentations, must have same shape.
table : scipy.sparse array in csr format, optional
A contingency table built with skimage.evaluate.contingency_table.
If None, it will be computed with skimage.evaluate.contingency_table.
If given, the entropies will be computed from this table and any images
will be ignored.
ignore_labels : sequence of int, optional
Labels to ignore. Any part of the true image labeled with any of these
values will not be counted in the score.
Returns
-------
vi : ndarray of float, shape (2,)
The conditional entropies of image1|image0 and image0|image1.
References
----------
.. [1] Marina Meilă (2007), Comparing clusteringsan information based
distance, Journal of Multivariate Analysis, Volume 98, Issue 5,
Pages 873-895, ISSN 0047-259X, :DOI:`10.1016/j.jmva.2006.11.013`.
"""
h0g1, h1g0 = _vi_tables(image0, image1, table=table,
ignore_labels=ignore_labels)
# false splits, false merges
return np.array([h1g0.sum(), h0g1.sum()])
def _xlogx(x):
"""Compute x * log_2(x).
We define 0 * log_2(0) = 0
Parameters
----------
x : ndarray or scipy.sparse.csc_matrix or csr_matrix
The input array.
Returns
-------
y : same type as x
Result of x * log_2(x).
"""
y = x.copy()
if isinstance(y, sparse.csc_matrix) or isinstance(y, sparse.csr_matrix):
z = y.data
else:
z = np.asarray(y) # ensure np.matrix converted to np.array
nz = z.nonzero()
z[nz] *= np.log2(z[nz])
return y
def _vi_tables(im_true, im_test, table=None, ignore_labels=()):
"""Compute probability tables used for calculating VI.
Parameters
----------
im_true, im_test : ndarray of int
Input label images, any dimensionality.
table : csr matrix, optional
Pre-computed contingency table.
ignore_labels : sequence of int, optional
Labels to ignore when computing scores.
Returns
-------
hxgy, hygx : ndarray of float
Per-segment conditional entropies of ``im_true`` given ``im_test`` and
vice-versa.
"""
check_shape_equality(im_true, im_test)
if table is None:
# normalize, since it is an identity op if already done
pxy = contingency_table(
im_true, im_test,
ignore_labels=ignore_labels, normalize=True
)
else:
pxy = table
# compute marginal probabilities, converting to 1D array
px = np.ravel(pxy.sum(axis=1))
py = np.ravel(pxy.sum(axis=0))
# use sparse matrix linear algebra to compute VI
# first, compute the inverse diagonal matrices
px_inv = sparse.diags(_invert_nonzero(px))
py_inv = sparse.diags(_invert_nonzero(py))
# then, compute the entropies
hygx = -px @ _xlogx(px_inv @ pxy).sum(axis=1)
hxgy = -_xlogx(pxy @ py_inv).sum(axis=0) @ py
return list(map(np.asarray, [hxgy, hygx]))
def _invert_nonzero(arr):
"""Compute the inverse of the non-zero elements of arr, not changing 0.
Parameters
----------
arr : ndarray
Returns
-------
arr_inv : ndarray
Array containing the inverse of the non-zero elements of arr, and
zero elsewhere.
"""
arr_inv = arr.copy()
nz = np.nonzero(arr)
arr_inv[nz] = 1 / arr[nz]
return arr_inv

View file

@ -0,0 +1,160 @@
import numpy as np
from ..util.dtype import dtype_range
from .._shared.utils import warn, check_shape_equality
__all__ = ['mean_squared_error',
'normalized_root_mse',
'peak_signal_noise_ratio',
]
def _as_floats(image0, image1):
"""
Promote im1, im2 to nearest appropriate floating point precision.
"""
float_type = np.result_type(image0.dtype, image1.dtype, np.float32)
image0 = np.asarray(image0, dtype=float_type)
image1 = np.asarray(image1, dtype=float_type)
return image0, image1
def mean_squared_error(image0, image1):
"""
Compute the mean-squared error between two images.
Parameters
----------
image0, image1 : ndarray
Images. Any dimensionality, must have same shape.
Returns
-------
mse : float
The mean-squared error (MSE) metric.
Notes
-----
.. versionchanged:: 0.16
This function was renamed from ``skimage.measure.compare_mse`` to
``skimage.metrics.mean_squared_error``.
"""
check_shape_equality(image0, image1)
image0, image1 = _as_floats(image0, image1)
return np.mean((image0 - image1) ** 2, dtype=np.float64)
def normalized_root_mse(image_true, image_test, *, normalization='euclidean'):
"""
Compute the normalized root mean-squared error (NRMSE) between two
images.
Parameters
----------
image_true : ndarray
Ground-truth image, same shape as im_test.
image_test : ndarray
Test image.
normalization : {'euclidean', 'min-max', 'mean'}, optional
Controls the normalization method to use in the denominator of the
NRMSE. There is no standard method of normalization across the
literature [1]_. The methods available here are as follows:
- 'euclidean' : normalize by the averaged Euclidean norm of
``im_true``::
NRMSE = RMSE * sqrt(N) / || im_true ||
where || . || denotes the Frobenius norm and ``N = im_true.size``.
This result is equivalent to::
NRMSE = || im_true - im_test || / || im_true ||.
- 'min-max' : normalize by the intensity range of ``im_true``.
- 'mean' : normalize by the mean of ``im_true``
Returns
-------
nrmse : float
The NRMSE metric.
Notes
-----
.. versionchanged:: 0.16
This function was renamed from ``skimage.measure.compare_nrmse`` to
``skimage.metrics.normalized_root_mse``.
References
----------
.. [1] https://en.wikipedia.org/wiki/Root-mean-square_deviation
"""
check_shape_equality(image_true, image_test)
image_true, image_test = _as_floats(image_true, image_test)
# Ensure that both 'Euclidean' and 'euclidean' match
normalization = normalization.lower()
if normalization == 'euclidean':
denom = np.sqrt(np.mean((image_true * image_true), dtype=np.float64))
elif normalization == 'min-max':
denom = image_true.max() - image_true.min()
elif normalization == 'mean':
denom = image_true.mean()
else:
raise ValueError("Unsupported norm_type")
return np.sqrt(mean_squared_error(image_true, image_test)) / denom
def peak_signal_noise_ratio(image_true, image_test, *, data_range=None):
"""
Compute the peak signal to noise ratio (PSNR) for an image.
Parameters
----------
image_true : ndarray
Ground-truth image, same shape as im_test.
image_test : ndarray
Test image.
data_range : int, optional
The data range of the input image (distance between minimum and
maximum possible values). By default, this is estimated from the image
data-type.
Returns
-------
psnr : float
The PSNR metric.
Notes
-----
.. versionchanged:: 0.16
This function was renamed from ``skimage.measure.compare_psnr`` to
``skimage.metrics.peak_signal_noise_ratio``.
References
----------
.. [1] https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
"""
check_shape_equality(image_true, image_test)
if data_range is None:
if image_true.dtype != image_test.dtype:
warn("Inputs have mismatched dtype. Setting data_range based on "
"im_true.", stacklevel=2)
dmin, dmax = dtype_range[image_true.dtype.type]
true_min, true_max = np.min(image_true), np.max(image_true)
if true_max > dmax or true_min < dmin:
raise ValueError(
"im_true has intensity values outside the range expected for "
"its data type. Please manually specify the data_range")
if true_min >= 0:
# most common case (255 for uint8, 1 for float)
data_range = dmax
else:
data_range = dmax - dmin
image_true, image_test = _as_floats(image_true, image_test)
err = mean_squared_error(image_true, image_test)
return 10 * np.log10((data_range ** 2) / err)