Fixed database typo and removed unnecessary class identifier.
This commit is contained in:
parent
00ad49a143
commit
45fb349a7d
5098 changed files with 952558 additions and 85 deletions
19
venv/Lib/site-packages/skimage/exposure/__init__.py
Normal file
19
venv/Lib/site-packages/skimage/exposure/__init__.py
Normal file
|
@ -0,0 +1,19 @@
|
|||
from .exposure import histogram, equalize_hist, \
|
||||
rescale_intensity, cumulative_distribution, \
|
||||
adjust_gamma, adjust_sigmoid, adjust_log, \
|
||||
is_low_contrast
|
||||
|
||||
from ._adapthist import equalize_adapthist
|
||||
from .histogram_matching import match_histograms
|
||||
|
||||
|
||||
__all__ = ['histogram',
|
||||
'equalize_hist',
|
||||
'equalize_adapthist',
|
||||
'rescale_intensity',
|
||||
'cumulative_distribution',
|
||||
'adjust_gamma',
|
||||
'adjust_sigmoid',
|
||||
'adjust_log',
|
||||
'is_low_contrast',
|
||||
'match_histograms']
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
317
venv/Lib/site-packages/skimage/exposure/_adapthist.py
Normal file
317
venv/Lib/site-packages/skimage/exposure/_adapthist.py
Normal file
|
@ -0,0 +1,317 @@
|
|||
"""
|
||||
Adapted code from "Contrast Limited Adaptive Histogram Equalization" by Karel
|
||||
Zuiderveld <karel@cv.ruu.nl>, Graphics Gems IV, Academic Press, 1994.
|
||||
|
||||
http://tog.acm.org/resources/GraphicsGems/
|
||||
|
||||
The Graphics Gems code is copyright-protected. In other words, you cannot
|
||||
claim the text of the code as your own and resell it. Using the code is
|
||||
permitted in any program, product, or library, non-commercial or commercial.
|
||||
Giving credit is not required, though is a nice gesture. The code comes as-is,
|
||||
and if there are any flaws or problems with any Gems code, nobody involved with
|
||||
Gems - authors, editors, publishers, or webmasters - are to be held
|
||||
responsible. Basically, don't be a jerk, and remember that anything free
|
||||
comes with no guarantee.
|
||||
"""
|
||||
import numbers
|
||||
import numpy as np
|
||||
from ..util import img_as_float, img_as_uint
|
||||
from ..color.adapt_rgb import adapt_rgb, hsv_value
|
||||
from ..exposure import rescale_intensity
|
||||
|
||||
|
||||
NR_OF_GRAY = 2 ** 14 # number of grayscale levels to use in CLAHE algorithm
|
||||
|
||||
|
||||
@adapt_rgb(hsv_value)
|
||||
def equalize_adapthist(image, kernel_size=None,
|
||||
clip_limit=0.01, nbins=256):
|
||||
"""Contrast Limited Adaptive Histogram Equalization (CLAHE).
|
||||
|
||||
An algorithm for local contrast enhancement, that uses histograms computed
|
||||
over different tile regions of the image. Local details can therefore be
|
||||
enhanced even in regions that are darker or lighter than most of the image.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : (N1, ...,NN[, C]) ndarray
|
||||
Input image.
|
||||
kernel_size: int or array_like, optional
|
||||
Defines the shape of contextual regions used in the algorithm. If
|
||||
iterable is passed, it must have the same number of elements as
|
||||
``image.ndim`` (without color channel). If integer, it is broadcasted
|
||||
to each `image` dimension. By default, ``kernel_size`` is 1/8 of
|
||||
``image`` height by 1/8 of its width.
|
||||
clip_limit : float, optional
|
||||
Clipping limit, normalized between 0 and 1 (higher values give more
|
||||
contrast).
|
||||
nbins : int, optional
|
||||
Number of gray bins for histogram ("data range").
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : (N1, ...,NN[, C]) ndarray
|
||||
Equalized image with float64 dtype.
|
||||
|
||||
See Also
|
||||
--------
|
||||
equalize_hist, rescale_intensity
|
||||
|
||||
Notes
|
||||
-----
|
||||
* For color images, the following steps are performed:
|
||||
- The image is converted to HSV color space
|
||||
- The CLAHE algorithm is run on the V (Value) channel
|
||||
- The image is converted back to RGB space and returned
|
||||
* For RGBA images, the original alpha channel is removed.
|
||||
|
||||
.. versionchanged:: 0.17
|
||||
The values returned by this function are slightly shifted upwards
|
||||
because of an internal change in rounding behavior.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] http://tog.acm.org/resources/GraphicsGems/
|
||||
.. [2] https://en.wikipedia.org/wiki/CLAHE#CLAHE
|
||||
"""
|
||||
|
||||
if clip_limit == 1.0:
|
||||
return img_as_float(image) # convert to float for consistency
|
||||
|
||||
image = img_as_uint(image)
|
||||
image = np.round(
|
||||
rescale_intensity(image, out_range=(0, NR_OF_GRAY - 1))
|
||||
).astype(np.uint16)
|
||||
|
||||
if kernel_size is None:
|
||||
kernel_size = tuple([image.shape[dim] // 8
|
||||
for dim in range(image.ndim)])
|
||||
elif isinstance(kernel_size, numbers.Number):
|
||||
kernel_size = (kernel_size,) * image.ndim
|
||||
elif len(kernel_size) != image.ndim:
|
||||
ValueError('Incorrect value of `kernel_size`: {}'.format(kernel_size))
|
||||
|
||||
kernel_size = [int(k) for k in kernel_size]
|
||||
|
||||
image = _clahe(image, kernel_size, clip_limit, nbins)
|
||||
image = img_as_float(image)
|
||||
return rescale_intensity(image)
|
||||
|
||||
|
||||
def _clahe(image, kernel_size, clip_limit, nbins):
|
||||
"""Contrast Limited Adaptive Histogram Equalization.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : (N1,...,NN) ndarray
|
||||
Input image.
|
||||
kernel_size: int or N-tuple of int
|
||||
Defines the shape of contextual regions used in the algorithm.
|
||||
clip_limit : float
|
||||
Normalized clipping limit (higher values give more contrast).
|
||||
nbins : int
|
||||
Number of gray bins for histogram ("data range").
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : (N1,...,NN) ndarray
|
||||
Equalized image.
|
||||
|
||||
The number of "effective" graylevels in the output image is set by `nbins`;
|
||||
selecting a small value (eg. 128) speeds up processing and still produce
|
||||
an output image of good quality. The output image will have the same
|
||||
minimum and maximum value as the input image. A clip limit smaller than 1
|
||||
results in standard (non-contrast limited) AHE.
|
||||
"""
|
||||
ndim = image.ndim
|
||||
dtype = image.dtype
|
||||
|
||||
# pad the image such that the shape in each dimension
|
||||
# - is a multiple of the kernel_size and
|
||||
# - is preceded by half a kernel size
|
||||
pad_start_per_dim = [k // 2 for k in kernel_size]
|
||||
|
||||
pad_end_per_dim = [(k - s % k) % k + int(np.ceil(k / 2.))
|
||||
for k, s in zip(kernel_size, image.shape)]
|
||||
|
||||
image = np.pad(image, [[p_i, p_f] for p_i, p_f in
|
||||
zip(pad_start_per_dim, pad_end_per_dim)],
|
||||
mode='reflect')
|
||||
|
||||
# determine gray value bins
|
||||
bin_size = 1 + NR_OF_GRAY // nbins
|
||||
lut = np.arange(NR_OF_GRAY)
|
||||
lut //= bin_size
|
||||
|
||||
image = lut[image]
|
||||
|
||||
# calculate graylevel mappings for each contextual region
|
||||
# rearrange image into flattened contextual regions
|
||||
ns_hist = [int(s / k) - 1 for s, k in zip(image.shape, kernel_size)]
|
||||
hist_blocks_shape = np.array([ns_hist, kernel_size]).T.flatten()
|
||||
hist_blocks_axis_order = np.array([np.arange(0, ndim * 2, 2),
|
||||
np.arange(1, ndim * 2, 2)]).flatten()
|
||||
hist_slices = [slice(k // 2, k // 2 + n * k)
|
||||
for k, n in zip(kernel_size, ns_hist)]
|
||||
hist_blocks = image[tuple(hist_slices)].reshape(hist_blocks_shape)
|
||||
hist_blocks = np.transpose(hist_blocks, axes=hist_blocks_axis_order)
|
||||
hist_block_assembled_shape = hist_blocks.shape
|
||||
hist_blocks = hist_blocks.reshape((np.product(ns_hist), -1))
|
||||
|
||||
# Calculate actual clip limit
|
||||
if clip_limit > 0.0:
|
||||
clim = int(np.clip(clip_limit * np.product(kernel_size), 1, None))
|
||||
else:
|
||||
clim = NR_OF_GRAY # Large value, do not clip (AHE)
|
||||
|
||||
hist = np.apply_along_axis(np.bincount, -1, hist_blocks, minlength=nbins)
|
||||
hist = np.apply_along_axis(clip_histogram, -1, hist, clip_limit=clim)
|
||||
hist = map_histogram(hist, 0, NR_OF_GRAY - 1, np.product(kernel_size))
|
||||
hist = hist.reshape(hist_block_assembled_shape[:ndim] + (-1,))
|
||||
|
||||
# duplicate leading mappings in each dim
|
||||
map_array = np.pad(hist,
|
||||
[[1, 1] for _ in range(ndim)] + [[0, 0]],
|
||||
mode='edge')
|
||||
|
||||
# Perform multilinear interpolation of graylevel mappings
|
||||
# using the convention described here:
|
||||
# https://en.wikipedia.org/w/index.php?title=Adaptive_histogram_
|
||||
# equalization&oldid=936814673#Efficient_computation_by_interpolation
|
||||
|
||||
# rearrange image into blocks for vectorized processing
|
||||
ns_proc = [int(s / k) for s, k in zip(image.shape, kernel_size)]
|
||||
blocks_shape = np.array([ns_proc, kernel_size]).T.flatten()
|
||||
blocks_axis_order = np.array([np.arange(0, ndim * 2, 2),
|
||||
np.arange(1, ndim * 2, 2)]).flatten()
|
||||
blocks = image.reshape(blocks_shape)
|
||||
blocks = np.transpose(blocks, axes=blocks_axis_order)
|
||||
blocks_flattened_shape = blocks.shape
|
||||
blocks = np.reshape(blocks, (np.product(ns_proc),
|
||||
np.product(blocks.shape[ndim:])))
|
||||
|
||||
# calculate interpolation coefficients
|
||||
coeffs = np.meshgrid(*tuple([np.arange(k) / k
|
||||
for k in kernel_size[::-1]]), indexing='ij')
|
||||
coeffs = [np.transpose(c).flatten() for c in coeffs]
|
||||
inv_coeffs = [1 - c for dim, c in enumerate(coeffs)]
|
||||
|
||||
# sum over contributions of neighboring contextual
|
||||
# regions in each direction
|
||||
result = np.zeros(blocks.shape, dtype=np.float32)
|
||||
for iedge, edge in enumerate(np.ndindex(*([2] * ndim))):
|
||||
|
||||
edge_maps = map_array[tuple([slice(e, e + n)
|
||||
for e, n in zip(edge, ns_proc)])]
|
||||
edge_maps = edge_maps.reshape((np.product(ns_proc), -1))
|
||||
|
||||
# apply map
|
||||
edge_mapped = np.take_along_axis(edge_maps, blocks, axis=-1)
|
||||
|
||||
# interpolate
|
||||
edge_coeffs = np.product([[inv_coeffs, coeffs][e][d]
|
||||
for d, e in enumerate(edge[::-1])], 0)
|
||||
|
||||
result += (edge_mapped * edge_coeffs).astype(result.dtype)
|
||||
|
||||
result = result.astype(dtype)
|
||||
|
||||
# rebuild result image from blocks
|
||||
result = result.reshape(blocks_flattened_shape)
|
||||
blocks_axis_rebuild_order =\
|
||||
np.array([np.arange(0, ndim),
|
||||
np.arange(ndim, ndim * 2)]).T.flatten()
|
||||
result = np.transpose(result, axes=blocks_axis_rebuild_order)
|
||||
result = result.reshape(image.shape)
|
||||
|
||||
# undo padding
|
||||
unpad_slices = tuple([slice(p_i, s - p_f) for p_i, p_f, s in
|
||||
zip(pad_start_per_dim, pad_end_per_dim,
|
||||
image.shape)])
|
||||
result = result[unpad_slices]
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def clip_histogram(hist, clip_limit):
|
||||
"""Perform clipping of the histogram and redistribution of bins.
|
||||
|
||||
The histogram is clipped and the number of excess pixels is counted.
|
||||
Afterwards the excess pixels are equally redistributed across the
|
||||
whole histogram (providing the bin count is smaller than the cliplimit).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
hist : ndarray
|
||||
Histogram array.
|
||||
clip_limit : int
|
||||
Maximum allowed bin count.
|
||||
|
||||
Returns
|
||||
-------
|
||||
hist : ndarray
|
||||
Clipped histogram.
|
||||
"""
|
||||
# calculate total number of excess pixels
|
||||
excess_mask = hist > clip_limit
|
||||
excess = hist[excess_mask]
|
||||
n_excess = excess.sum() - excess.size * clip_limit
|
||||
hist[excess_mask] = clip_limit
|
||||
|
||||
# Second part: clip histogram and redistribute excess pixels in each bin
|
||||
bin_incr = n_excess // hist.size # average binincrement
|
||||
upper = clip_limit - bin_incr # Bins larger than upper set to cliplimit
|
||||
|
||||
low_mask = hist < upper
|
||||
n_excess -= hist[low_mask].size * bin_incr
|
||||
hist[low_mask] += bin_incr
|
||||
|
||||
mid_mask = np.logical_and(hist >= upper, hist < clip_limit)
|
||||
mid = hist[mid_mask]
|
||||
n_excess += mid.sum() - mid.size * clip_limit
|
||||
hist[mid_mask] = clip_limit
|
||||
|
||||
while n_excess > 0: # Redistribute remaining excess
|
||||
prev_n_excess = n_excess
|
||||
for index in range(hist.size):
|
||||
under_mask = hist < clip_limit
|
||||
step_size = max(1, np.count_nonzero(under_mask) // n_excess)
|
||||
under_mask = under_mask[index::step_size]
|
||||
hist[index::step_size][under_mask] += 1
|
||||
n_excess -= np.count_nonzero(under_mask)
|
||||
if n_excess <= 0:
|
||||
break
|
||||
if prev_n_excess == n_excess:
|
||||
break
|
||||
|
||||
return hist
|
||||
|
||||
|
||||
def map_histogram(hist, min_val, max_val, n_pixels):
|
||||
"""Calculate the equalized lookup table (mapping).
|
||||
|
||||
It does so by cumulating the input histogram.
|
||||
Histogram bins are assumed to be represented by the last array dimension.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
hist : ndarray
|
||||
Clipped histogram.
|
||||
min_val : int
|
||||
Minimum value for mapping.
|
||||
max_val : int
|
||||
Maximum value for mapping.
|
||||
n_pixels : int
|
||||
Number of pixels in the region.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray
|
||||
Mapped intensity LUT.
|
||||
"""
|
||||
out = np.cumsum(hist, axis=-1).astype(float)
|
||||
out *= (max_val - min_val) / n_pixels
|
||||
out += min_val
|
||||
np.clip(out, a_min=None, a_max=max_val, out=out)
|
||||
|
||||
return out.astype(int)
|
640
venv/Lib/site-packages/skimage/exposure/exposure.py
Normal file
640
venv/Lib/site-packages/skimage/exposure/exposure.py
Normal file
|
@ -0,0 +1,640 @@
|
|||
import numpy as np
|
||||
|
||||
from ..color.colorconv import rgb2gray, rgba2rgb
|
||||
from ..util.dtype import dtype_range, dtype_limits
|
||||
from .._shared.utils import warn
|
||||
|
||||
|
||||
__all__ = ['histogram', 'cumulative_distribution', 'equalize_hist',
|
||||
'rescale_intensity', 'adjust_gamma', 'adjust_log', 'adjust_sigmoid']
|
||||
|
||||
|
||||
DTYPE_RANGE = dtype_range.copy()
|
||||
DTYPE_RANGE.update((d.__name__, limits) for d, limits in dtype_range.items())
|
||||
DTYPE_RANGE.update({'uint10': (0, 2 ** 10 - 1),
|
||||
'uint12': (0, 2 ** 12 - 1),
|
||||
'uint14': (0, 2 ** 14 - 1),
|
||||
'bool': dtype_range[np.bool_],
|
||||
'float': dtype_range[np.float64]})
|
||||
|
||||
|
||||
def _offset_array(arr, low_boundary, high_boundary):
|
||||
"""Offset the array to get the lowest value at 0 if negative."""
|
||||
if low_boundary < 0:
|
||||
offset = low_boundary
|
||||
dyn_range = high_boundary - low_boundary
|
||||
# get smallest dtype that can hold both minimum and offset maximum
|
||||
offset_dtype = np.promote_types(np.min_scalar_type(dyn_range),
|
||||
np.min_scalar_type(low_boundary))
|
||||
if arr.dtype != offset_dtype:
|
||||
# prevent overflow errors when offsetting
|
||||
arr = arr.astype(offset_dtype)
|
||||
arr = arr - offset
|
||||
else:
|
||||
offset = 0
|
||||
return arr, offset
|
||||
|
||||
|
||||
def _bincount_histogram(image, source_range):
|
||||
"""
|
||||
Efficient histogram calculation for an image of integers.
|
||||
|
||||
This function is significantly more efficient than np.histogram but
|
||||
works only on images of integers. It is based on np.bincount.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : array
|
||||
Input image.
|
||||
source_range : string
|
||||
'image' determines the range from the input image.
|
||||
'dtype' determines the range from the expected range of the images
|
||||
of that data type.
|
||||
|
||||
Returns
|
||||
-------
|
||||
hist : array
|
||||
The values of the histogram.
|
||||
bin_centers : array
|
||||
The values at the center of the bins.
|
||||
"""
|
||||
if source_range not in ['image', 'dtype']:
|
||||
raise ValueError('Incorrect value for `source_range` argument: {}'.format(source_range))
|
||||
if source_range == 'image':
|
||||
image_min = int(image.min().astype(np.int64))
|
||||
image_max = int(image.max().astype(np.int64))
|
||||
elif source_range == 'dtype':
|
||||
image_min, image_max = dtype_limits(image, clip_negative=False)
|
||||
image, offset = _offset_array(image, image_min, image_max)
|
||||
hist = np.bincount(image.ravel(), minlength=image_max - image_min + 1)
|
||||
bin_centers = np.arange(image_min, image_max + 1)
|
||||
if source_range == 'image':
|
||||
idx = max(image_min, 0)
|
||||
hist = hist[idx:]
|
||||
return hist, bin_centers
|
||||
|
||||
|
||||
def histogram(image, nbins=256, source_range='image', normalize=False):
|
||||
"""Return histogram of image.
|
||||
|
||||
Unlike `numpy.histogram`, this function returns the centers of bins and
|
||||
does not rebin integer arrays. For integer arrays, each integer value has
|
||||
its own bin, which improves speed and intensity-resolution.
|
||||
|
||||
The histogram is computed on the flattened image: for color images, the
|
||||
function should be used separately on each channel to obtain a histogram
|
||||
for each color channel.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : array
|
||||
Input image.
|
||||
nbins : int, optional
|
||||
Number of bins used to calculate histogram. This value is ignored for
|
||||
integer arrays.
|
||||
source_range : string, optional
|
||||
'image' (default) determines the range from the input image.
|
||||
'dtype' determines the range from the expected range of the images
|
||||
of that data type.
|
||||
normalize : bool, optional
|
||||
If True, normalize the histogram by the sum of its values.
|
||||
|
||||
Returns
|
||||
-------
|
||||
hist : array
|
||||
The values of the histogram.
|
||||
bin_centers : array
|
||||
The values at the center of the bins.
|
||||
|
||||
See Also
|
||||
--------
|
||||
cumulative_distribution
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from skimage import data, exposure, img_as_float
|
||||
>>> image = img_as_float(data.camera())
|
||||
>>> np.histogram(image, bins=2)
|
||||
(array([107432, 154712]), array([0. , 0.5, 1. ]))
|
||||
>>> exposure.histogram(image, nbins=2)
|
||||
(array([107432, 154712]), array([0.25, 0.75]))
|
||||
"""
|
||||
sh = image.shape
|
||||
if len(sh) == 3 and sh[-1] < 4:
|
||||
warn("This might be a color image. The histogram will be "
|
||||
"computed on the flattened image. You can instead "
|
||||
"apply this function to each color channel.")
|
||||
|
||||
image = image.flatten()
|
||||
# For integer types, histogramming with bincount is more efficient.
|
||||
if np.issubdtype(image.dtype, np.integer):
|
||||
hist, bin_centers = _bincount_histogram(image, source_range)
|
||||
else:
|
||||
if source_range == 'image':
|
||||
hist_range = None
|
||||
elif source_range == 'dtype':
|
||||
hist_range = dtype_limits(image, clip_negative=False)
|
||||
else:
|
||||
ValueError('Wrong value for the `source_range` argument')
|
||||
hist, bin_edges = np.histogram(image, bins=nbins, range=hist_range)
|
||||
bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2.
|
||||
|
||||
if normalize:
|
||||
hist = hist / np.sum(hist)
|
||||
return hist, bin_centers
|
||||
|
||||
|
||||
def cumulative_distribution(image, nbins=256):
|
||||
"""Return cumulative distribution function (cdf) for the given image.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : array
|
||||
Image array.
|
||||
nbins : int, optional
|
||||
Number of bins for image histogram.
|
||||
|
||||
Returns
|
||||
-------
|
||||
img_cdf : array
|
||||
Values of cumulative distribution function.
|
||||
bin_centers : array
|
||||
Centers of bins.
|
||||
|
||||
See Also
|
||||
--------
|
||||
histogram
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] https://en.wikipedia.org/wiki/Cumulative_distribution_function
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from skimage import data, exposure, img_as_float
|
||||
>>> image = img_as_float(data.camera())
|
||||
>>> hi = exposure.histogram(image)
|
||||
>>> cdf = exposure.cumulative_distribution(image)
|
||||
>>> np.alltrue(cdf[0] == np.cumsum(hi[0])/float(image.size))
|
||||
True
|
||||
"""
|
||||
hist, bin_centers = histogram(image, nbins)
|
||||
img_cdf = hist.cumsum()
|
||||
img_cdf = img_cdf / float(img_cdf[-1])
|
||||
return img_cdf, bin_centers
|
||||
|
||||
|
||||
def equalize_hist(image, nbins=256, mask=None):
|
||||
"""Return image after histogram equalization.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : array
|
||||
Image array.
|
||||
nbins : int, optional
|
||||
Number of bins for image histogram. Note: this argument is
|
||||
ignored for integer images, for which each integer is its own
|
||||
bin.
|
||||
mask: ndarray of bools or 0s and 1s, optional
|
||||
Array of same shape as `image`. Only points at which mask == True
|
||||
are used for the equalization, which is applied to the whole image.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : float array
|
||||
Image array after histogram equalization.
|
||||
|
||||
Notes
|
||||
-----
|
||||
This function is adapted from [1]_ with the author's permission.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] http://www.janeriksolem.net/histogram-equalization-with-python-and.html
|
||||
.. [2] https://en.wikipedia.org/wiki/Histogram_equalization
|
||||
|
||||
"""
|
||||
if mask is not None:
|
||||
mask = np.array(mask, dtype=bool)
|
||||
cdf, bin_centers = cumulative_distribution(image[mask], nbins)
|
||||
else:
|
||||
cdf, bin_centers = cumulative_distribution(image, nbins)
|
||||
out = np.interp(image.flat, bin_centers, cdf)
|
||||
return out.reshape(image.shape)
|
||||
|
||||
|
||||
def intensity_range(image, range_values='image', clip_negative=False):
|
||||
"""Return image intensity range (min, max) based on desired value type.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : array
|
||||
Input image.
|
||||
range_values : str or 2-tuple, optional
|
||||
The image intensity range is configured by this parameter.
|
||||
The possible values for this parameter are enumerated below.
|
||||
|
||||
'image'
|
||||
Return image min/max as the range.
|
||||
'dtype'
|
||||
Return min/max of the image's dtype as the range.
|
||||
dtype-name
|
||||
Return intensity range based on desired `dtype`. Must be valid key
|
||||
in `DTYPE_RANGE`. Note: `image` is ignored for this range type.
|
||||
2-tuple
|
||||
Return `range_values` as min/max intensities. Note that there's no
|
||||
reason to use this function if you just want to specify the
|
||||
intensity range explicitly. This option is included for functions
|
||||
that use `intensity_range` to support all desired range types.
|
||||
|
||||
clip_negative : bool, optional
|
||||
If True, clip the negative range (i.e. return 0 for min intensity)
|
||||
even if the image dtype allows negative values.
|
||||
"""
|
||||
if range_values == 'dtype':
|
||||
range_values = image.dtype.type
|
||||
|
||||
if range_values == 'image':
|
||||
i_min = np.min(image)
|
||||
i_max = np.max(image)
|
||||
elif range_values in DTYPE_RANGE:
|
||||
i_min, i_max = DTYPE_RANGE[range_values]
|
||||
if clip_negative:
|
||||
i_min = 0
|
||||
else:
|
||||
i_min, i_max = range_values
|
||||
return i_min, i_max
|
||||
|
||||
|
||||
def _output_dtype(dtype_or_range):
|
||||
"""Determine the output dtype for rescale_intensity.
|
||||
|
||||
The dtype is determined according to the following rules:
|
||||
- if ``dtype_or_range`` is a dtype, that is the output dtype.
|
||||
- if ``dtype_or_range`` is a dtype string, that is the dtype used, unless
|
||||
it is not a NumPy data type (e.g. 'uint12' for 12-bit unsigned integers),
|
||||
in which case the data type that can contain it will be used
|
||||
(e.g. uint16 in this case).
|
||||
- if ``dtype_or_range`` is a pair of values, the output data type will be
|
||||
float.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
dtype_or_range : type, string, or 2-tuple of int/float
|
||||
The desired range for the output, expressed as either a NumPy dtype or
|
||||
as a (min, max) pair of numbers.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out_dtype : type
|
||||
The data type appropriate for the desired output.
|
||||
"""
|
||||
if type(dtype_or_range) in [list, tuple, np.ndarray]:
|
||||
# pair of values: always return float.
|
||||
return np.float_
|
||||
if type(dtype_or_range) == type:
|
||||
# already a type: return it
|
||||
return dtype_or_range
|
||||
if dtype_or_range in DTYPE_RANGE:
|
||||
# string key in DTYPE_RANGE dictionary
|
||||
try:
|
||||
# if it's a canonical numpy dtype, convert
|
||||
return np.dtype(dtype_or_range).type
|
||||
except TypeError: # uint10, uint12, uint14
|
||||
# otherwise, return uint16
|
||||
return np.uint16
|
||||
else:
|
||||
raise ValueError(
|
||||
'Incorrect value for out_range, should be a valid image data '
|
||||
f'type or a pair of values, got {dtype_or_range}.'
|
||||
)
|
||||
|
||||
|
||||
def rescale_intensity(image, in_range='image', out_range='dtype'):
|
||||
"""Return image after stretching or shrinking its intensity levels.
|
||||
|
||||
The desired intensity range of the input and output, `in_range` and
|
||||
`out_range` respectively, are used to stretch or shrink the intensity range
|
||||
of the input image. See examples below.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : array
|
||||
Image array.
|
||||
in_range, out_range : str or 2-tuple, optional
|
||||
Min and max intensity values of input and output image.
|
||||
The possible values for this parameter are enumerated below.
|
||||
|
||||
'image'
|
||||
Use image min/max as the intensity range.
|
||||
'dtype'
|
||||
Use min/max of the image's dtype as the intensity range.
|
||||
dtype-name
|
||||
Use intensity range based on desired `dtype`. Must be valid key
|
||||
in `DTYPE_RANGE`.
|
||||
2-tuple
|
||||
Use `range_values` as explicit min/max intensities.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : array
|
||||
Image array after rescaling its intensity. This image is the same dtype
|
||||
as the input image.
|
||||
|
||||
Notes
|
||||
-----
|
||||
.. versionchanged:: 0.17
|
||||
The dtype of the output array has changed to match the output dtype, or
|
||||
float if the output range is specified by a pair of floats.
|
||||
|
||||
See Also
|
||||
--------
|
||||
equalize_hist
|
||||
|
||||
Examples
|
||||
--------
|
||||
By default, the min/max intensities of the input image are stretched to
|
||||
the limits allowed by the image's dtype, since `in_range` defaults to
|
||||
'image' and `out_range` defaults to 'dtype':
|
||||
|
||||
>>> image = np.array([51, 102, 153], dtype=np.uint8)
|
||||
>>> rescale_intensity(image)
|
||||
array([ 0, 127, 255], dtype=uint8)
|
||||
|
||||
It's easy to accidentally convert an image dtype from uint8 to float:
|
||||
|
||||
>>> 1.0 * image
|
||||
array([ 51., 102., 153.])
|
||||
|
||||
Use `rescale_intensity` to rescale to the proper range for float dtypes:
|
||||
|
||||
>>> image_float = 1.0 * image
|
||||
>>> rescale_intensity(image_float)
|
||||
array([0. , 0.5, 1. ])
|
||||
|
||||
To maintain the low contrast of the original, use the `in_range` parameter:
|
||||
|
||||
>>> rescale_intensity(image_float, in_range=(0, 255))
|
||||
array([0.2, 0.4, 0.6])
|
||||
|
||||
If the min/max value of `in_range` is more/less than the min/max image
|
||||
intensity, then the intensity levels are clipped:
|
||||
|
||||
>>> rescale_intensity(image_float, in_range=(0, 102))
|
||||
array([0.5, 1. , 1. ])
|
||||
|
||||
If you have an image with signed integers but want to rescale the image to
|
||||
just the positive range, use the `out_range` parameter. In that case, the
|
||||
output dtype will be float:
|
||||
|
||||
>>> image = np.array([-10, 0, 10], dtype=np.int8)
|
||||
>>> rescale_intensity(image, out_range=(0, 127))
|
||||
array([ 0. , 63.5, 127. ])
|
||||
|
||||
To get the desired range with a specific dtype, use ``.astype()``:
|
||||
|
||||
>>> rescale_intensity(image, out_range=(0, 127)).astype(np.int8)
|
||||
array([ 0, 63, 127], dtype=int8)
|
||||
|
||||
If the input image is constant, the output will be clipped directly to the
|
||||
output range:
|
||||
>>> image = np.array([130, 130, 130], dtype=np.int32)
|
||||
>>> rescale_intensity(image, out_range=(0, 127)).astype(np.int32)
|
||||
array([127, 127, 127], dtype=int32)
|
||||
"""
|
||||
if out_range in ['dtype', 'image']:
|
||||
out_dtype = _output_dtype(image.dtype.type)
|
||||
else:
|
||||
out_dtype = _output_dtype(out_range)
|
||||
|
||||
imin, imax = map(float, intensity_range(image, in_range))
|
||||
omin, omax = map(float, intensity_range(image, out_range,
|
||||
clip_negative=(imin >= 0)))
|
||||
|
||||
if np.any(np.isnan([imin, imax, omin, omax])):
|
||||
warn(
|
||||
"One or more intensity levels are NaN. Rescaling will broadcast "
|
||||
"NaN to the full image. Provide intensity levels yourself to "
|
||||
"avoid this. E.g. with np.nanmin(image), np.nanmax(image).",
|
||||
stacklevel=2
|
||||
)
|
||||
|
||||
image = np.clip(image, imin, imax)
|
||||
|
||||
if imin != imax:
|
||||
image = (image - imin) / (imax - imin)
|
||||
return np.asarray(image * (omax - omin) + omin, dtype=out_dtype)
|
||||
else:
|
||||
return np.clip(image, omin, omax).astype(out_dtype)
|
||||
|
||||
|
||||
def _assert_non_negative(image):
|
||||
|
||||
if np.any(image < 0):
|
||||
raise ValueError('Image Correction methods work correctly only on '
|
||||
'images with non-negative values. Use '
|
||||
'skimage.exposure.rescale_intensity.')
|
||||
|
||||
|
||||
def adjust_gamma(image, gamma=1, gain=1):
|
||||
"""Performs Gamma Correction on the input image.
|
||||
|
||||
Also known as Power Law Transform.
|
||||
This function transforms the input image pixelwise according to the
|
||||
equation ``O = I**gamma`` after scaling each pixel to the range 0 to 1.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray
|
||||
Input image.
|
||||
gamma : float, optional
|
||||
Non negative real number. Default value is 1.
|
||||
gain : float, optional
|
||||
The constant multiplier. Default value is 1.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray
|
||||
Gamma corrected output image.
|
||||
|
||||
See Also
|
||||
--------
|
||||
adjust_log
|
||||
|
||||
Notes
|
||||
-----
|
||||
For gamma greater than 1, the histogram will shift towards left and
|
||||
the output image will be darker than the input image.
|
||||
|
||||
For gamma less than 1, the histogram will shift towards right and
|
||||
the output image will be brighter than the input image.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] https://en.wikipedia.org/wiki/Gamma_correction
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from skimage import data, exposure, img_as_float
|
||||
>>> image = img_as_float(data.moon())
|
||||
>>> gamma_corrected = exposure.adjust_gamma(image, 2)
|
||||
>>> # Output is darker for gamma > 1
|
||||
>>> image.mean() > gamma_corrected.mean()
|
||||
True
|
||||
"""
|
||||
_assert_non_negative(image)
|
||||
dtype = image.dtype.type
|
||||
|
||||
if gamma < 0:
|
||||
raise ValueError("Gamma should be a non-negative real number.")
|
||||
|
||||
scale = float(dtype_limits(image, True)[1] - dtype_limits(image, True)[0])
|
||||
|
||||
out = ((image / scale) ** gamma) * scale * gain
|
||||
return out.astype(dtype)
|
||||
|
||||
|
||||
def adjust_log(image, gain=1, inv=False):
|
||||
"""Performs Logarithmic correction on the input image.
|
||||
|
||||
This function transforms the input image pixelwise according to the
|
||||
equation ``O = gain*log(1 + I)`` after scaling each pixel to the range 0 to 1.
|
||||
For inverse logarithmic correction, the equation is ``O = gain*(2**I - 1)``.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray
|
||||
Input image.
|
||||
gain : float, optional
|
||||
The constant multiplier. Default value is 1.
|
||||
inv : float, optional
|
||||
If True, it performs inverse logarithmic correction,
|
||||
else correction will be logarithmic. Defaults to False.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray
|
||||
Logarithm corrected output image.
|
||||
|
||||
See Also
|
||||
--------
|
||||
adjust_gamma
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] http://www.ece.ucsb.edu/Faculty/Manjunath/courses/ece178W03/EnhancePart1.pdf
|
||||
|
||||
"""
|
||||
_assert_non_negative(image)
|
||||
dtype = image.dtype.type
|
||||
scale = float(dtype_limits(image, True)[1] - dtype_limits(image, True)[0])
|
||||
|
||||
if inv:
|
||||
out = (2 ** (image / scale) - 1) * scale * gain
|
||||
return dtype(out)
|
||||
|
||||
out = np.log2(1 + image / scale) * scale * gain
|
||||
return out.astype(dtype)
|
||||
|
||||
|
||||
def adjust_sigmoid(image, cutoff=0.5, gain=10, inv=False):
|
||||
"""Performs Sigmoid Correction on the input image.
|
||||
|
||||
Also known as Contrast Adjustment.
|
||||
This function transforms the input image pixelwise according to the
|
||||
equation ``O = 1/(1 + exp*(gain*(cutoff - I)))`` after scaling each pixel
|
||||
to the range 0 to 1.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray
|
||||
Input image.
|
||||
cutoff : float, optional
|
||||
Cutoff of the sigmoid function that shifts the characteristic curve
|
||||
in horizontal direction. Default value is 0.5.
|
||||
gain : float, optional
|
||||
The constant multiplier in exponential's power of sigmoid function.
|
||||
Default value is 10.
|
||||
inv : bool, optional
|
||||
If True, returns the negative sigmoid correction. Defaults to False.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray
|
||||
Sigmoid corrected output image.
|
||||
|
||||
See Also
|
||||
--------
|
||||
adjust_gamma
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Gustav J. Braun, "Image Lightness Rescaling Using Sigmoidal Contrast
|
||||
Enhancement Functions",
|
||||
http://www.cis.rit.edu/fairchild/PDFs/PAP07.pdf
|
||||
|
||||
"""
|
||||
_assert_non_negative(image)
|
||||
dtype = image.dtype.type
|
||||
scale = float(dtype_limits(image, True)[1] - dtype_limits(image, True)[0])
|
||||
|
||||
if inv:
|
||||
out = (1 - 1 / (1 + np.exp(gain * (cutoff - image / scale)))) * scale
|
||||
return dtype(out)
|
||||
|
||||
out = (1 / (1 + np.exp(gain * (cutoff - image / scale)))) * scale
|
||||
return out.astype(dtype)
|
||||
|
||||
|
||||
def is_low_contrast(image, fraction_threshold=0.05, lower_percentile=1,
|
||||
upper_percentile=99, method='linear'):
|
||||
"""Determine if an image is low contrast.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : array-like
|
||||
The image under test.
|
||||
fraction_threshold : float, optional
|
||||
The low contrast fraction threshold. An image is considered low-
|
||||
contrast when its range of brightness spans less than this
|
||||
fraction of its data type's full range. [1]_
|
||||
lower_percentile : float, optional
|
||||
Disregard values below this percentile when computing image contrast.
|
||||
upper_percentile : float, optional
|
||||
Disregard values above this percentile when computing image contrast.
|
||||
method : str, optional
|
||||
The contrast determination method. Right now the only available
|
||||
option is "linear".
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : bool
|
||||
True when the image is determined to be low contrast.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] https://scikit-image.org/docs/dev/user_guide/data_types.html
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> image = np.linspace(0, 0.04, 100)
|
||||
>>> is_low_contrast(image)
|
||||
True
|
||||
>>> image[-1] = 1
|
||||
>>> is_low_contrast(image)
|
||||
True
|
||||
>>> is_low_contrast(image, upper_percentile=100)
|
||||
False
|
||||
"""
|
||||
image = np.asanyarray(image)
|
||||
if image.ndim == 3:
|
||||
if image.shape[2] == 4:
|
||||
image = rgba2rgb(image)
|
||||
if image.shape[2] == 3:
|
||||
image = rgb2gray(image)
|
||||
|
||||
dlimits = dtype_limits(image, clip_negative=False)
|
||||
limits = np.percentile(image, [lower_percentile, upper_percentile])
|
||||
ratio = (limits[1] - limits[0]) / (dlimits[1] - dlimits[0])
|
||||
|
||||
return ratio < fraction_threshold
|
|
@ -0,0 +1,70 @@
|
|||
import numpy as np
|
||||
|
||||
|
||||
def _match_cumulative_cdf(source, template):
|
||||
"""
|
||||
Return modified source array so that the cumulative density function of
|
||||
its values matches the cumulative density function of the template.
|
||||
"""
|
||||
src_values, src_unique_indices, src_counts = np.unique(source.ravel(),
|
||||
return_inverse=True,
|
||||
return_counts=True)
|
||||
tmpl_values, tmpl_counts = np.unique(template.ravel(), return_counts=True)
|
||||
|
||||
# calculate normalized quantiles for each array
|
||||
src_quantiles = np.cumsum(src_counts) / source.size
|
||||
tmpl_quantiles = np.cumsum(tmpl_counts) / template.size
|
||||
|
||||
interp_a_values = np.interp(src_quantiles, tmpl_quantiles, tmpl_values)
|
||||
return interp_a_values[src_unique_indices].reshape(source.shape)
|
||||
|
||||
|
||||
def match_histograms(image, reference, *, multichannel=False):
|
||||
"""Adjust an image so that its cumulative histogram matches that of another.
|
||||
|
||||
The adjustment is applied separately for each channel.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray
|
||||
Input image. Can be gray-scale or in color.
|
||||
reference : ndarray
|
||||
Image to match histogram of. Must have the same number of channels as
|
||||
image.
|
||||
multichannel : bool, optional
|
||||
Apply the matching separately for each channel.
|
||||
|
||||
Returns
|
||||
-------
|
||||
matched : ndarray
|
||||
Transformed input image.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
Thrown when the number of channels in the input image and the reference
|
||||
differ.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] http://paulbourke.net/miscellaneous/equalisation/
|
||||
|
||||
"""
|
||||
if image.ndim != reference.ndim:
|
||||
raise ValueError('Image and reference must have the same number '
|
||||
'of channels.')
|
||||
|
||||
if multichannel:
|
||||
if image.shape[-1] != reference.shape[-1]:
|
||||
raise ValueError('Number of channels in the input image and '
|
||||
'reference image must match!')
|
||||
|
||||
matched = np.empty(image.shape, dtype=image.dtype)
|
||||
for channel in range(image.shape[-1]):
|
||||
matched_channel = _match_cumulative_cdf(image[..., channel],
|
||||
reference[..., channel])
|
||||
matched[..., channel] = matched_channel
|
||||
else:
|
||||
matched = _match_cumulative_cdf(image, reference)
|
||||
|
||||
return matched
|
25
venv/Lib/site-packages/skimage/exposure/setup.py
Normal file
25
venv/Lib/site-packages/skimage/exposure/setup.py
Normal file
|
@ -0,0 +1,25 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
|
||||
base_path = os.path.abspath(os.path.dirname(__file__))
|
||||
|
||||
|
||||
def configuration(parent_package='', top_path=None):
|
||||
from numpy.distutils.misc_util import Configuration
|
||||
|
||||
config = Configuration('exposure', parent_package, top_path)
|
||||
|
||||
return config
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from numpy.distutils.core import setup
|
||||
setup(maintainer='scikit-image Developers',
|
||||
author='scikit-image Developers',
|
||||
maintainer_email='scikit-image@python.org',
|
||||
description='Exposure corrections',
|
||||
url='https://github.com/scikit-image/scikit-image',
|
||||
license='SciPy License (BSD Style)',
|
||||
**(configuration(top_path='').todict())
|
||||
)
|
|
@ -0,0 +1,9 @@
|
|||
from ..._shared.testing import setup_test, teardown_test
|
||||
|
||||
|
||||
def setup():
|
||||
setup_test()
|
||||
|
||||
|
||||
def teardown():
|
||||
teardown_test()
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
762
venv/Lib/site-packages/skimage/exposure/tests/test_exposure.py
Normal file
762
venv/Lib/site-packages/skimage/exposure/tests/test_exposure.py
Normal file
|
@ -0,0 +1,762 @@
|
|||
import warnings
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
from skimage import util
|
||||
from skimage import data
|
||||
from skimage import exposure
|
||||
from skimage.exposure.exposure import intensity_range
|
||||
from skimage.color import rgb2gray
|
||||
from skimage.util.dtype import dtype_range
|
||||
|
||||
from skimage._shared._warnings import expected_warnings
|
||||
from skimage._shared import testing
|
||||
from skimage._shared.testing import (assert_array_equal,
|
||||
assert_array_almost_equal,
|
||||
assert_equal,
|
||||
assert_almost_equal)
|
||||
|
||||
|
||||
# Test integer histograms
|
||||
# =======================
|
||||
|
||||
def test_wrong_source_range():
|
||||
im = np.array([-1, 100], dtype=np.int8)
|
||||
with testing.raises(ValueError):
|
||||
frequencies, bin_centers = exposure.histogram(im, source_range='foobar')
|
||||
|
||||
|
||||
def test_negative_overflow():
|
||||
im = np.array([-1, 100], dtype=np.int8)
|
||||
frequencies, bin_centers = exposure.histogram(im)
|
||||
assert_array_equal(bin_centers, np.arange(-1, 101))
|
||||
assert frequencies[0] == 1
|
||||
assert frequencies[-1] == 1
|
||||
assert_array_equal(frequencies[1:-1], 0)
|
||||
|
||||
|
||||
def test_all_negative_image():
|
||||
im = np.array([-100, -1], dtype=np.int8)
|
||||
frequencies, bin_centers = exposure.histogram(im)
|
||||
assert_array_equal(bin_centers, np.arange(-100, 0))
|
||||
assert frequencies[0] == 1
|
||||
assert frequencies[-1] == 1
|
||||
assert_array_equal(frequencies[1:-1], 0)
|
||||
|
||||
|
||||
def test_int_range_image():
|
||||
im = np.array([10, 100], dtype=np.int8)
|
||||
frequencies, bin_centers = exposure.histogram(im)
|
||||
assert_equal(len(bin_centers), len(frequencies))
|
||||
assert_equal(bin_centers[0], 10)
|
||||
assert_equal(bin_centers[-1], 100)
|
||||
|
||||
|
||||
def test_peak_uint_range_dtype():
|
||||
im = np.array([10, 100], dtype=np.uint8)
|
||||
frequencies, bin_centers = exposure.histogram(im, source_range='dtype')
|
||||
assert_array_equal(bin_centers, np.arange(0, 256))
|
||||
assert_equal(frequencies[10], 1)
|
||||
assert_equal(frequencies[100], 1)
|
||||
assert_equal(frequencies[101], 0)
|
||||
assert_equal(frequencies.shape, (256,))
|
||||
|
||||
|
||||
def test_peak_int_range_dtype():
|
||||
im = np.array([10, 100], dtype=np.int8)
|
||||
frequencies, bin_centers = exposure.histogram(im, source_range='dtype')
|
||||
assert_array_equal(bin_centers, np.arange(-128, 128))
|
||||
assert_equal(frequencies[128+10], 1)
|
||||
assert_equal(frequencies[128+100], 1)
|
||||
assert_equal(frequencies[128+101], 0)
|
||||
assert_equal(frequencies.shape, (256,))
|
||||
|
||||
|
||||
def test_flat_uint_range_dtype():
|
||||
im = np.linspace(0, 255, 256, dtype=np.uint8)
|
||||
frequencies, bin_centers = exposure.histogram(im, source_range='dtype')
|
||||
assert_array_equal(bin_centers, np.arange(0, 256))
|
||||
assert_equal(frequencies.shape, (256,))
|
||||
|
||||
|
||||
def test_flat_int_range_dtype():
|
||||
im = np.linspace(-128, 128, 256, dtype=np.int8)
|
||||
frequencies, bin_centers = exposure.histogram(im, source_range='dtype')
|
||||
assert_array_equal(bin_centers, np.arange(-128, 128))
|
||||
assert_equal(frequencies.shape, (256,))
|
||||
|
||||
|
||||
def test_peak_float_out_of_range_image():
|
||||
im = np.array([10, 100], dtype=np.float16)
|
||||
frequencies, bin_centers = exposure.histogram(im, nbins=90)
|
||||
# offset values by 0.5 for float...
|
||||
assert_array_equal(bin_centers, np.arange(10, 100) + 0.5)
|
||||
|
||||
|
||||
def test_peak_float_out_of_range_dtype():
|
||||
im = np.array([10, 100], dtype=np.float16)
|
||||
nbins = 10
|
||||
frequencies, bin_centers = exposure.histogram(im, nbins=nbins, source_range='dtype')
|
||||
assert_almost_equal(np.min(bin_centers), -0.9, 3)
|
||||
assert_almost_equal(np.max(bin_centers), 0.9, 3)
|
||||
assert_equal(len(bin_centers), 10)
|
||||
|
||||
|
||||
def test_normalize():
|
||||
im = np.array([0, 255, 255], dtype=np.uint8)
|
||||
frequencies, bin_centers = exposure.histogram(im, source_range='dtype',
|
||||
normalize=False)
|
||||
expected = np.zeros(256)
|
||||
expected[0] = 1
|
||||
expected[-1] = 2
|
||||
assert_equal(frequencies, expected)
|
||||
frequencies, bin_centers = exposure.histogram(im, source_range='dtype',
|
||||
normalize=True)
|
||||
expected /= 3.
|
||||
assert_equal(frequencies, expected)
|
||||
|
||||
|
||||
# Test histogram equalization
|
||||
# ===========================
|
||||
|
||||
np.random.seed(0)
|
||||
|
||||
test_img_int = data.camera()
|
||||
# squeeze image intensities to lower image contrast
|
||||
test_img = util.img_as_float(test_img_int)
|
||||
test_img = exposure.rescale_intensity(test_img / 5. + 100)
|
||||
|
||||
|
||||
def test_equalize_uint8_approx():
|
||||
"""Check integer bins used for uint8 images."""
|
||||
img_eq0 = exposure.equalize_hist(test_img_int)
|
||||
img_eq1 = exposure.equalize_hist(test_img_int, nbins=3)
|
||||
np.testing.assert_allclose(img_eq0, img_eq1)
|
||||
|
||||
|
||||
def test_equalize_ubyte():
|
||||
img = util.img_as_ubyte(test_img)
|
||||
img_eq = exposure.equalize_hist(img)
|
||||
|
||||
cdf, bin_edges = exposure.cumulative_distribution(img_eq)
|
||||
check_cdf_slope(cdf)
|
||||
|
||||
|
||||
def test_equalize_float():
|
||||
img = util.img_as_float(test_img)
|
||||
img_eq = exposure.equalize_hist(img)
|
||||
|
||||
cdf, bin_edges = exposure.cumulative_distribution(img_eq)
|
||||
check_cdf_slope(cdf)
|
||||
|
||||
|
||||
def test_equalize_masked():
|
||||
img = util.img_as_float(test_img)
|
||||
mask = np.zeros(test_img.shape)
|
||||
mask[50:150, 50:250] = 1
|
||||
img_mask_eq = exposure.equalize_hist(img, mask=mask)
|
||||
img_eq = exposure.equalize_hist(img)
|
||||
|
||||
cdf, bin_edges = exposure.cumulative_distribution(img_mask_eq)
|
||||
check_cdf_slope(cdf)
|
||||
|
||||
assert not (img_eq == img_mask_eq).all()
|
||||
|
||||
|
||||
def check_cdf_slope(cdf):
|
||||
"""Slope of cdf which should equal 1 for an equalized histogram."""
|
||||
norm_intensity = np.linspace(0, 1, len(cdf))
|
||||
slope, intercept = np.polyfit(norm_intensity, cdf, 1)
|
||||
assert 0.9 < slope < 1.1
|
||||
|
||||
|
||||
# Test intensity range
|
||||
# ====================
|
||||
|
||||
|
||||
@testing.parametrize("test_input,expected", [
|
||||
('image', [0, 1]),
|
||||
('dtype', [0, 255]),
|
||||
((10, 20), [10, 20])
|
||||
])
|
||||
def test_intensity_range_uint8(test_input, expected):
|
||||
image = np.array([0, 1], dtype=np.uint8)
|
||||
out = intensity_range(image, range_values=test_input)
|
||||
assert_array_equal(out, expected)
|
||||
|
||||
|
||||
@testing.parametrize("test_input,expected", [
|
||||
('image', [0.1, 0.2]),
|
||||
('dtype', [-1, 1]),
|
||||
((0.3, 0.4), [0.3, 0.4])
|
||||
])
|
||||
def test_intensity_range_float(test_input, expected):
|
||||
image = np.array([0.1, 0.2], dtype=np.float64)
|
||||
out = intensity_range(image, range_values=test_input)
|
||||
assert_array_equal(out, expected)
|
||||
|
||||
|
||||
def test_intensity_range_clipped_float():
|
||||
image = np.array([0.1, 0.2], dtype=np.float64)
|
||||
out = intensity_range(image, range_values='dtype', clip_negative=True)
|
||||
assert_array_equal(out, (0, 1))
|
||||
|
||||
|
||||
# Test rescale intensity
|
||||
# ======================
|
||||
|
||||
uint10_max = 2**10 - 1
|
||||
uint12_max = 2**12 - 1
|
||||
uint14_max = 2**14 - 1
|
||||
uint16_max = 2**16 - 1
|
||||
|
||||
|
||||
def test_rescale_stretch():
|
||||
image = np.array([51, 102, 153], dtype=np.uint8)
|
||||
out = exposure.rescale_intensity(image)
|
||||
assert out.dtype == np.uint8
|
||||
assert_array_almost_equal(out, [0, 127, 255])
|
||||
|
||||
|
||||
def test_rescale_shrink():
|
||||
image = np.array([51., 102., 153.])
|
||||
out = exposure.rescale_intensity(image)
|
||||
assert_array_almost_equal(out, [0, 0.5, 1])
|
||||
|
||||
|
||||
def test_rescale_in_range():
|
||||
image = np.array([51., 102., 153.])
|
||||
out = exposure.rescale_intensity(image, in_range=(0, 255))
|
||||
assert_array_almost_equal(out, [0.2, 0.4, 0.6])
|
||||
|
||||
|
||||
def test_rescale_in_range_clip():
|
||||
image = np.array([51., 102., 153.])
|
||||
out = exposure.rescale_intensity(image, in_range=(0, 102))
|
||||
assert_array_almost_equal(out, [0.5, 1, 1])
|
||||
|
||||
|
||||
def test_rescale_out_range():
|
||||
"""Check that output range is correct.
|
||||
|
||||
.. versionchanged:: 0.17
|
||||
This function used to return dtype matching the input dtype. It now
|
||||
matches the output.
|
||||
"""
|
||||
image = np.array([-10, 0, 10], dtype=np.int8)
|
||||
out = exposure.rescale_intensity(image, out_range=(0, 127))
|
||||
assert out.dtype == np.float_
|
||||
assert_array_almost_equal(out, [0, 63.5, 127])
|
||||
|
||||
|
||||
def test_rescale_named_in_range():
|
||||
image = np.array([0, uint10_max, uint10_max + 100], dtype=np.uint16)
|
||||
out = exposure.rescale_intensity(image, in_range='uint10')
|
||||
assert_array_almost_equal(out, [0, uint16_max, uint16_max])
|
||||
|
||||
|
||||
def test_rescale_named_out_range():
|
||||
image = np.array([0, uint16_max], dtype=np.uint16)
|
||||
out = exposure.rescale_intensity(image, out_range='uint10')
|
||||
assert_array_almost_equal(out, [0, uint10_max])
|
||||
|
||||
|
||||
def test_rescale_uint12_limits():
|
||||
image = np.array([0, uint16_max], dtype=np.uint16)
|
||||
out = exposure.rescale_intensity(image, out_range='uint12')
|
||||
assert_array_almost_equal(out, [0, uint12_max])
|
||||
|
||||
|
||||
def test_rescale_uint14_limits():
|
||||
image = np.array([0, uint16_max], dtype=np.uint16)
|
||||
out = exposure.rescale_intensity(image, out_range='uint14')
|
||||
assert_array_almost_equal(out, [0, uint14_max])
|
||||
|
||||
|
||||
def test_rescale_all_zeros():
|
||||
image = np.zeros((2, 2), dtype=np.uint8)
|
||||
out = exposure.rescale_intensity(image)
|
||||
assert ~np.isnan(out).all()
|
||||
assert_array_almost_equal(out, image)
|
||||
|
||||
|
||||
def test_rescale_constant():
|
||||
image = np.array([130, 130], dtype=np.uint16)
|
||||
out = exposure.rescale_intensity(image, out_range=(0, 127))
|
||||
assert_array_almost_equal(out, [127, 127])
|
||||
|
||||
|
||||
def test_rescale_same_values():
|
||||
image = np.ones((2, 2))
|
||||
out = exposure.rescale_intensity(image)
|
||||
assert ~np.isnan(out).all()
|
||||
assert_array_almost_equal(out, image)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"in_range,out_range", [("image", "dtype"),
|
||||
("dtype", "image")]
|
||||
)
|
||||
def test_rescale_nan_warning(in_range, out_range):
|
||||
image = np.arange(12, dtype=float).reshape(3, 4)
|
||||
image[1, 1] = np.nan
|
||||
|
||||
msg = (
|
||||
r"One or more intensity levels are NaN\."
|
||||
r" Rescaling will broadcast NaN to the full image\."
|
||||
)
|
||||
|
||||
# 2019/11/10 Passing NaN to np.clip raises a DeprecationWarning for
|
||||
# versions above 1.17
|
||||
# TODO: Remove once NumPy removes this DeprecationWarning
|
||||
numpy_warning_1_17_plus = (
|
||||
r"Passing `np.nan` to mean no clipping in np.clip "
|
||||
r"has always been unreliable|\A\Z"
|
||||
)
|
||||
# 2019/12/06 Passing NaN to np.min and np.max raises a RuntimeWarning for
|
||||
# NumPy < 1.16
|
||||
# TODO: Remove once minimal required NumPy version is 1.16
|
||||
numpy_warning_smaller_1_16 = r"invalid value encountered in reduce|\A\Z"
|
||||
|
||||
with expected_warnings(
|
||||
[msg, numpy_warning_1_17_plus, numpy_warning_smaller_1_16]
|
||||
):
|
||||
exposure.rescale_intensity(image, in_range, out_range)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"out_range, out_dtype", [
|
||||
('uint8', np.uint8),
|
||||
('uint10', np.uint16),
|
||||
('uint12', np.uint16),
|
||||
('uint16', np.uint16),
|
||||
('float', np.float_),
|
||||
]
|
||||
)
|
||||
def test_rescale_output_dtype(out_range, out_dtype):
|
||||
image = np.array([-128, 0, 127], dtype=np.int8)
|
||||
output_image = exposure.rescale_intensity(image, out_range=out_range)
|
||||
assert output_image.dtype == out_dtype
|
||||
|
||||
|
||||
def test_rescale_no_overflow():
|
||||
image = np.array([-128, 0, 127], dtype=np.int8)
|
||||
output_image = exposure.rescale_intensity(image, out_range=np.uint8)
|
||||
testing.assert_array_equal(output_image, [0, 128, 255])
|
||||
assert output_image.dtype == np.uint8
|
||||
|
||||
|
||||
def test_rescale_float_output():
|
||||
image = np.array([-128, 0, 127], dtype=np.int8)
|
||||
output_image = exposure.rescale_intensity(image, out_range=(0, 255))
|
||||
testing.assert_array_equal(output_image, [0, 128, 255])
|
||||
assert output_image.dtype == np.float_
|
||||
|
||||
|
||||
def test_rescale_raises_on_incorrect_out_range():
|
||||
image = np.array([-128, 0, 127], dtype=np.int8)
|
||||
with testing.raises(ValueError):
|
||||
_ = exposure.rescale_intensity(image, out_range='flat')
|
||||
|
||||
# Test adaptive histogram equalization
|
||||
# ====================================
|
||||
|
||||
def test_adapthist_grayscale():
|
||||
"""Test a grayscale float image
|
||||
"""
|
||||
img = util.img_as_float(data.astronaut())
|
||||
img = rgb2gray(img)
|
||||
img = np.dstack((img, img, img))
|
||||
adapted = exposure.equalize_adapthist(img, kernel_size=(57, 51),
|
||||
clip_limit=0.01, nbins=128)
|
||||
assert img.shape == adapted.shape
|
||||
assert_almost_equal(peak_snr(img, adapted), 100.140, 3)
|
||||
assert_almost_equal(norm_brightness_err(img, adapted), 0.0529, 3)
|
||||
|
||||
|
||||
def test_adapthist_color():
|
||||
"""Test an RGB color uint16 image
|
||||
"""
|
||||
img = util.img_as_uint(data.astronaut())
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
warnings.simplefilter('always')
|
||||
hist, bin_centers = exposure.histogram(img)
|
||||
assert len(w) > 0
|
||||
adapted = exposure.equalize_adapthist(img, clip_limit=0.01)
|
||||
|
||||
assert adapted.min() == 0
|
||||
assert adapted.max() == 1.0
|
||||
assert img.shape == adapted.shape
|
||||
full_scale = exposure.rescale_intensity(img)
|
||||
assert_almost_equal(peak_snr(full_scale, adapted), 109.393, 1)
|
||||
assert_almost_equal(norm_brightness_err(full_scale, adapted), 0.02, 2)
|
||||
return data, adapted
|
||||
|
||||
|
||||
def test_adapthist_alpha():
|
||||
"""Test an RGBA color image
|
||||
"""
|
||||
img = util.img_as_float(data.astronaut())
|
||||
alpha = np.ones((img.shape[0], img.shape[1]), dtype=float)
|
||||
img = np.dstack((img, alpha))
|
||||
adapted = exposure.equalize_adapthist(img)
|
||||
assert adapted.shape != img.shape
|
||||
img = img[:, :, :3]
|
||||
full_scale = exposure.rescale_intensity(img)
|
||||
assert img.shape == adapted.shape
|
||||
assert_almost_equal(peak_snr(full_scale, adapted), 109.393, 2)
|
||||
assert_almost_equal(norm_brightness_err(full_scale, adapted), 0.0248, 3)
|
||||
|
||||
|
||||
def test_adapthist_grayscale_Nd():
|
||||
"""
|
||||
Test for n-dimensional consistency with float images
|
||||
Note: Currently if img.ndim == 3, img.shape[2] > 4 must hold for the image
|
||||
not to be interpreted as a color image by @adapt_rgb
|
||||
"""
|
||||
# take 2d image, subsample and stack it
|
||||
img = util.img_as_float(data.astronaut())
|
||||
img = rgb2gray(img)
|
||||
a = 15
|
||||
img2d = util.img_as_float(img[0:-1:a, 0:-1:a])
|
||||
img3d = np.array([img2d] * (img.shape[0] // a))
|
||||
|
||||
# apply CLAHE
|
||||
adapted2d = exposure.equalize_adapthist(img2d,
|
||||
kernel_size=5,
|
||||
clip_limit=0.05)
|
||||
adapted3d = exposure.equalize_adapthist(img3d,
|
||||
kernel_size=5,
|
||||
clip_limit=0.05)
|
||||
|
||||
# check that dimensions of input and output match
|
||||
assert img2d.shape == adapted2d.shape
|
||||
assert img3d.shape == adapted3d.shape
|
||||
|
||||
# check that the result from the stack of 2d images is similar
|
||||
# to the underlying 2d image
|
||||
assert np.mean(np.abs(adapted2d
|
||||
- adapted3d[adapted3d.shape[0] // 2])) < 0.02
|
||||
|
||||
|
||||
def test_adapthist_constant():
|
||||
"""Test constant image, float and uint
|
||||
"""
|
||||
img = np.zeros((8, 8))
|
||||
img += 2
|
||||
img = img.astype(np.uint16)
|
||||
adapted = exposure.equalize_adapthist(img, 3)
|
||||
assert np.min(adapted) == np.max(adapted)
|
||||
|
||||
img = np.zeros((8, 8))
|
||||
img += 0.1
|
||||
img = img.astype(np.float64)
|
||||
adapted = exposure.equalize_adapthist(img, 3)
|
||||
assert np.min(adapted) == np.max(adapted)
|
||||
|
||||
|
||||
def test_adapthist_borders():
|
||||
"""Test border processing
|
||||
"""
|
||||
img = rgb2gray(util.img_as_float(data.astronaut()))
|
||||
|
||||
# maximize difference between orig and processed img
|
||||
img /= 100.
|
||||
img[img.shape[0] // 2, img.shape[1] // 2] = 1.
|
||||
|
||||
# check borders are processed for different kernel sizes
|
||||
border_index = -1
|
||||
for kernel_size in range(51, 71, 2):
|
||||
adapted = exposure.equalize_adapthist(img, kernel_size, clip_limit=0.5)
|
||||
# Check last columns are processed
|
||||
assert norm_brightness_err(adapted[:, border_index],
|
||||
img[:, border_index]) > 0.1
|
||||
# Check last rows are processed
|
||||
assert norm_brightness_err(adapted[border_index, :],
|
||||
img[border_index, :]) > 0.1
|
||||
|
||||
|
||||
def test_adapthist_clip_limit():
|
||||
img_u = data.moon()
|
||||
img_f = util.img_as_float(img_u)
|
||||
|
||||
# uint8 input
|
||||
img_clahe = exposure.equalize_adapthist(img_u, clip_limit=1)
|
||||
assert_array_equal(img_f, img_clahe)
|
||||
|
||||
# float64 input
|
||||
img_clahe = exposure.equalize_adapthist(img_f, clip_limit=1)
|
||||
assert_array_equal(img_f, img_clahe)
|
||||
|
||||
|
||||
def peak_snr(img1, img2):
|
||||
"""Peak signal to noise ratio of two images
|
||||
|
||||
Parameters
|
||||
----------
|
||||
img1 : array-like
|
||||
img2 : array-like
|
||||
|
||||
Returns
|
||||
-------
|
||||
peak_snr : float
|
||||
Peak signal to noise ratio
|
||||
"""
|
||||
if img1.ndim == 3:
|
||||
img1, img2 = rgb2gray(img1.copy()), rgb2gray(img2.copy())
|
||||
img1 = util.img_as_float(img1)
|
||||
img2 = util.img_as_float(img2)
|
||||
mse = 1. / img1.size * np.square(img1 - img2).sum()
|
||||
_, max_ = dtype_range[img1.dtype.type]
|
||||
return 20 * np.log(max_ / mse)
|
||||
|
||||
|
||||
def norm_brightness_err(img1, img2):
|
||||
"""Normalized Absolute Mean Brightness Error between two images
|
||||
|
||||
Parameters
|
||||
----------
|
||||
img1 : array-like
|
||||
img2 : array-like
|
||||
|
||||
Returns
|
||||
-------
|
||||
norm_brightness_error : float
|
||||
Normalized absolute mean brightness error
|
||||
"""
|
||||
if img1.ndim == 3:
|
||||
img1, img2 = rgb2gray(img1), rgb2gray(img2)
|
||||
ambe = np.abs(img1.mean() - img2.mean())
|
||||
nbe = ambe / dtype_range[img1.dtype.type][1]
|
||||
return nbe
|
||||
|
||||
|
||||
# Test Gamma Correction
|
||||
# =====================
|
||||
|
||||
def test_adjust_gamma_1x1_shape():
|
||||
"""Check that the shape is maintained"""
|
||||
img = np.ones([1,1])
|
||||
result = exposure.adjust_gamma(img, 1.5)
|
||||
assert img.shape == result.shape
|
||||
|
||||
|
||||
def test_adjust_gamma_one():
|
||||
"""Same image should be returned for gamma equal to one"""
|
||||
image = np.random.uniform(0, 255, (8, 8))
|
||||
result = exposure.adjust_gamma(image, 1)
|
||||
assert_array_equal(result, image)
|
||||
|
||||
|
||||
def test_adjust_gamma_zero():
|
||||
"""White image should be returned for gamma equal to zero"""
|
||||
image = np.random.uniform(0, 255, (8, 8))
|
||||
result = exposure.adjust_gamma(image, 0)
|
||||
dtype = image.dtype.type
|
||||
assert_array_equal(result, dtype_range[dtype][1])
|
||||
|
||||
|
||||
def test_adjust_gamma_less_one():
|
||||
"""Verifying the output with expected results for gamma
|
||||
correction with gamma equal to half"""
|
||||
image = np.arange(0, 255, 4, np.uint8).reshape((8, 8))
|
||||
expected = np.array([
|
||||
[ 0, 31, 45, 55, 63, 71, 78, 84],
|
||||
[ 90, 95, 100, 105, 110, 115, 119, 123],
|
||||
[127, 131, 135, 139, 142, 146, 149, 153],
|
||||
[156, 159, 162, 165, 168, 171, 174, 177],
|
||||
[180, 183, 186, 188, 191, 194, 196, 199],
|
||||
[201, 204, 206, 209, 211, 214, 216, 218],
|
||||
[221, 223, 225, 228, 230, 232, 234, 236],
|
||||
[238, 241, 243, 245, 247, 249, 251, 253]], dtype=np.uint8)
|
||||
|
||||
result = exposure.adjust_gamma(image, 0.5)
|
||||
assert_array_equal(result, expected)
|
||||
|
||||
|
||||
def test_adjust_gamma_greater_one():
|
||||
"""Verifying the output with expected results for gamma
|
||||
correction with gamma equal to two"""
|
||||
image = np.arange(0, 255, 4, np.uint8).reshape((8, 8))
|
||||
expected = np.array([
|
||||
[ 0, 0, 0, 0, 1, 1, 2, 3],
|
||||
[ 4, 5, 6, 7, 9, 10, 12, 14],
|
||||
[ 16, 18, 20, 22, 25, 27, 30, 33],
|
||||
[ 36, 39, 42, 45, 49, 52, 56, 60],
|
||||
[ 64, 68, 72, 76, 81, 85, 90, 95],
|
||||
[100, 105, 110, 116, 121, 127, 132, 138],
|
||||
[144, 150, 156, 163, 169, 176, 182, 189],
|
||||
[196, 203, 211, 218, 225, 233, 241, 249]], dtype=np.uint8)
|
||||
|
||||
result = exposure.adjust_gamma(image, 2)
|
||||
assert_array_equal(result, expected)
|
||||
|
||||
|
||||
def test_adjust_gamma_neggative():
|
||||
image = np.arange(0, 255, 4, np.uint8).reshape((8, 8))
|
||||
with testing.raises(ValueError):
|
||||
exposure.adjust_gamma(image, -1)
|
||||
|
||||
|
||||
# Test Logarithmic Correction
|
||||
# ===========================
|
||||
|
||||
def test_adjust_log_1x1_shape():
|
||||
"""Check that the shape is maintained"""
|
||||
img = np.ones([1, 1])
|
||||
result = exposure.adjust_log(img, 1)
|
||||
assert img.shape == result.shape
|
||||
|
||||
|
||||
def test_adjust_log():
|
||||
"""Verifying the output with expected results for logarithmic
|
||||
correction with multiplier constant multiplier equal to unity"""
|
||||
image = np.arange(0, 255, 4, np.uint8).reshape((8, 8))
|
||||
expected = np.array([
|
||||
[ 0, 5, 11, 16, 22, 27, 33, 38],
|
||||
[ 43, 48, 53, 58, 63, 68, 73, 77],
|
||||
[ 82, 86, 91, 95, 100, 104, 109, 113],
|
||||
[117, 121, 125, 129, 133, 137, 141, 145],
|
||||
[149, 153, 157, 160, 164, 168, 172, 175],
|
||||
[179, 182, 186, 189, 193, 196, 199, 203],
|
||||
[206, 209, 213, 216, 219, 222, 225, 228],
|
||||
[231, 234, 238, 241, 244, 246, 249, 252]], dtype=np.uint8)
|
||||
|
||||
result = exposure.adjust_log(image, 1)
|
||||
assert_array_equal(result, expected)
|
||||
|
||||
|
||||
def test_adjust_inv_log():
|
||||
"""Verifying the output with expected results for inverse logarithmic
|
||||
correction with multiplier constant multiplier equal to unity"""
|
||||
image = np.arange(0, 255, 4, np.uint8).reshape((8, 8))
|
||||
expected = np.array([
|
||||
[ 0, 2, 5, 8, 11, 14, 17, 20],
|
||||
[ 23, 26, 29, 32, 35, 38, 41, 45],
|
||||
[ 48, 51, 55, 58, 61, 65, 68, 72],
|
||||
[ 76, 79, 83, 87, 90, 94, 98, 102],
|
||||
[106, 110, 114, 118, 122, 126, 130, 134],
|
||||
[138, 143, 147, 151, 156, 160, 165, 170],
|
||||
[174, 179, 184, 188, 193, 198, 203, 208],
|
||||
[213, 218, 224, 229, 234, 239, 245, 250]], dtype=np.uint8)
|
||||
|
||||
result = exposure.adjust_log(image, 1, True)
|
||||
assert_array_equal(result, expected)
|
||||
|
||||
|
||||
# Test Sigmoid Correction
|
||||
# =======================
|
||||
|
||||
def test_adjust_sigmoid_1x1_shape():
|
||||
"""Check that the shape is maintained"""
|
||||
img = np.ones([1, 1])
|
||||
result = exposure.adjust_sigmoid(img, 1, 5)
|
||||
assert img.shape == result.shape
|
||||
|
||||
|
||||
def test_adjust_sigmoid_cutoff_one():
|
||||
"""Verifying the output with expected results for sigmoid correction
|
||||
with cutoff equal to one and gain of 5"""
|
||||
image = np.arange(0, 255, 4, np.uint8).reshape((8, 8))
|
||||
expected = np.array([
|
||||
[ 1, 1, 1, 2, 2, 2, 2, 2],
|
||||
[ 3, 3, 3, 4, 4, 4, 5, 5],
|
||||
[ 5, 6, 6, 7, 7, 8, 9, 10],
|
||||
[ 10, 11, 12, 13, 14, 15, 16, 18],
|
||||
[ 19, 20, 22, 24, 25, 27, 29, 32],
|
||||
[ 34, 36, 39, 41, 44, 47, 50, 54],
|
||||
[ 57, 61, 64, 68, 72, 76, 80, 85],
|
||||
[ 89, 94, 99, 104, 108, 113, 118, 123]], dtype=np.uint8)
|
||||
|
||||
result = exposure.adjust_sigmoid(image, 1, 5)
|
||||
assert_array_equal(result, expected)
|
||||
|
||||
|
||||
def test_adjust_sigmoid_cutoff_zero():
|
||||
"""Verifying the output with expected results for sigmoid correction
|
||||
with cutoff equal to zero and gain of 10"""
|
||||
image = np.arange(0, 255, 4, np.uint8).reshape((8, 8))
|
||||
expected = np.array([
|
||||
[127, 137, 147, 156, 166, 175, 183, 191],
|
||||
[198, 205, 211, 216, 221, 225, 229, 232],
|
||||
[235, 238, 240, 242, 244, 245, 247, 248],
|
||||
[249, 250, 250, 251, 251, 252, 252, 253],
|
||||
[253, 253, 253, 253, 254, 254, 254, 254],
|
||||
[254, 254, 254, 254, 254, 254, 254, 254],
|
||||
[254, 254, 254, 254, 254, 254, 254, 254],
|
||||
[254, 254, 254, 254, 254, 254, 254, 254]], dtype=np.uint8)
|
||||
|
||||
result = exposure.adjust_sigmoid(image, 0, 10)
|
||||
assert_array_equal(result, expected)
|
||||
|
||||
|
||||
def test_adjust_sigmoid_cutoff_half():
|
||||
"""Verifying the output with expected results for sigmoid correction
|
||||
with cutoff equal to half and gain of 10"""
|
||||
image = np.arange(0, 255, 4, np.uint8).reshape((8, 8))
|
||||
expected = np.array([
|
||||
[ 1, 1, 2, 2, 3, 3, 4, 5],
|
||||
[ 5, 6, 7, 9, 10, 12, 14, 16],
|
||||
[ 19, 22, 25, 29, 34, 39, 44, 50],
|
||||
[ 57, 64, 72, 80, 89, 99, 108, 118],
|
||||
[128, 138, 148, 158, 167, 176, 184, 192],
|
||||
[199, 205, 211, 217, 221, 226, 229, 233],
|
||||
[236, 238, 240, 242, 244, 246, 247, 248],
|
||||
[249, 250, 250, 251, 251, 252, 252, 253]], dtype=np.uint8)
|
||||
|
||||
result = exposure.adjust_sigmoid(image, 0.5, 10)
|
||||
assert_array_equal(result, expected)
|
||||
|
||||
|
||||
def test_adjust_inv_sigmoid_cutoff_half():
|
||||
"""Verifying the output with expected results for inverse sigmoid
|
||||
correction with cutoff equal to half and gain of 10"""
|
||||
image = np.arange(0, 255, 4, np.uint8).reshape((8, 8))
|
||||
expected = np.array([
|
||||
[253, 253, 252, 252, 251, 251, 250, 249],
|
||||
[249, 248, 247, 245, 244, 242, 240, 238],
|
||||
[235, 232, 229, 225, 220, 215, 210, 204],
|
||||
[197, 190, 182, 174, 165, 155, 146, 136],
|
||||
[126, 116, 106, 96, 87, 78, 70, 62],
|
||||
[ 55, 49, 43, 37, 33, 28, 25, 21],
|
||||
[ 18, 16, 14, 12, 10, 8, 7, 6],
|
||||
[ 5, 4, 4, 3, 3, 2, 2, 1]], dtype=np.uint8)
|
||||
|
||||
result = exposure.adjust_sigmoid(image, 0.5, 10, True)
|
||||
assert_array_equal(result, expected)
|
||||
|
||||
|
||||
def test_negative():
|
||||
image = np.arange(-10, 245, 4).reshape((8, 8)).astype(np.double)
|
||||
with testing.raises(ValueError):
|
||||
exposure.adjust_gamma(image)
|
||||
|
||||
|
||||
def test_is_low_contrast():
|
||||
image = np.linspace(0, 0.04, 100)
|
||||
assert exposure.is_low_contrast(image)
|
||||
image[-1] = 1
|
||||
assert exposure.is_low_contrast(image)
|
||||
assert not exposure.is_low_contrast(image, upper_percentile=100)
|
||||
|
||||
image = (image * 255).astype(np.uint8)
|
||||
assert exposure.is_low_contrast(image)
|
||||
assert not exposure.is_low_contrast(image, upper_percentile=100)
|
||||
|
||||
image = (image.astype(np.uint16)) * 2**8
|
||||
assert exposure.is_low_contrast(image)
|
||||
assert not exposure.is_low_contrast(image, upper_percentile=100)
|
||||
|
||||
|
||||
# Test Dask Compatibility
|
||||
# =======================
|
||||
|
||||
def test_dask_histogram():
|
||||
pytest.importorskip('dask', reason="dask python library is not installed")
|
||||
import dask.array as da
|
||||
dask_array = da.from_array(np.array([[0, 1], [1, 2]]), chunks=(1, 2))
|
||||
output_hist, output_bins = exposure.histogram(dask_array)
|
||||
expected_bins = [0, 1, 2]
|
||||
expected_hist = [1, 2, 1]
|
||||
assert np.allclose(expected_bins, output_bins)
|
||||
assert np.allclose(expected_hist, output_hist)
|
|
@ -0,0 +1,83 @@
|
|||
import numpy as np
|
||||
|
||||
from skimage.exposure import histogram_matching
|
||||
from skimage import exposure
|
||||
from skimage import data
|
||||
|
||||
from skimage._shared.testing import assert_array_almost_equal, \
|
||||
assert_almost_equal
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.mark.parametrize('array, template, expected_array', [
|
||||
(np.arange(10), np.arange(100), np.arange(9, 100, 10)),
|
||||
(np.random.rand(4), np.ones(3), np.ones(4))
|
||||
])
|
||||
def test_match_array_values(array, template, expected_array):
|
||||
# when
|
||||
matched = histogram_matching._match_cumulative_cdf(array, template)
|
||||
|
||||
# then
|
||||
assert_array_almost_equal(matched, expected_array)
|
||||
|
||||
|
||||
class TestMatchHistogram:
|
||||
|
||||
image_rgb = data.chelsea()
|
||||
template_rgb = data.astronaut()
|
||||
|
||||
@pytest.mark.parametrize('image, reference, multichannel', [
|
||||
(image_rgb, template_rgb, True),
|
||||
(image_rgb[:, :, 0], template_rgb[:, :, 0], False)
|
||||
])
|
||||
def test_match_histograms(self, image, reference, multichannel):
|
||||
"""Assert that pdf of matched image is close to the reference's pdf for
|
||||
all channels and all values of matched"""
|
||||
|
||||
# when
|
||||
matched = exposure.match_histograms(image, reference,
|
||||
multichannel=multichannel)
|
||||
|
||||
matched_pdf = self._calculate_image_empirical_pdf(matched)
|
||||
reference_pdf = self._calculate_image_empirical_pdf(reference)
|
||||
|
||||
# then
|
||||
for channel in range(len(matched_pdf)):
|
||||
reference_values, reference_quantiles = reference_pdf[channel]
|
||||
matched_values, matched_quantiles = matched_pdf[channel]
|
||||
|
||||
for i, matched_value in enumerate(matched_values):
|
||||
closest_id = (
|
||||
np.abs(reference_values - matched_value)
|
||||
).argmin()
|
||||
assert_almost_equal(matched_quantiles[i],
|
||||
reference_quantiles[closest_id],
|
||||
decimal=1)
|
||||
|
||||
@pytest.mark.parametrize('image, reference', [
|
||||
(image_rgb, template_rgb[:, :, 0]),
|
||||
(image_rgb[:, :, 0], template_rgb)
|
||||
])
|
||||
def test_raises_value_error_on_channels_mismatch(self, image, reference):
|
||||
with pytest.raises(ValueError):
|
||||
exposure.match_histograms(image, reference)
|
||||
|
||||
@classmethod
|
||||
def _calculate_image_empirical_pdf(cls, image):
|
||||
"""Helper function for calculating empirical probability density
|
||||
function of a given image for all channels"""
|
||||
|
||||
if image.ndim > 2:
|
||||
image = image.transpose(2, 0, 1)
|
||||
channels = np.array(image, copy=False, ndmin=3)
|
||||
|
||||
channels_pdf = []
|
||||
for channel in channels:
|
||||
channel_values, counts = np.unique(channel, return_counts=True)
|
||||
channel_quantiles = np.cumsum(counts).astype(np.float64)
|
||||
channel_quantiles /= channel_quantiles[-1]
|
||||
|
||||
channels_pdf.append((channel_values, channel_quantiles))
|
||||
|
||||
return np.asarray(channels_pdf)
|
Loading…
Add table
Add a link
Reference in a new issue