Fixed database typo and removed unnecessary class identifier.
This commit is contained in:
parent
00ad49a143
commit
45fb349a7d
5098 changed files with 952558 additions and 85 deletions
49
venv/Lib/site-packages/skimage/util/__init__.py
Normal file
49
venv/Lib/site-packages/skimage/util/__init__.py
Normal file
|
@ -0,0 +1,49 @@
|
|||
import functools
|
||||
import warnings
|
||||
import numpy as np
|
||||
from .dtype import (img_as_float32, img_as_float64, img_as_float,
|
||||
img_as_int, img_as_uint, img_as_ubyte,
|
||||
img_as_bool, dtype_limits)
|
||||
from .shape import view_as_blocks, view_as_windows
|
||||
from .noise import random_noise
|
||||
from .apply_parallel import apply_parallel
|
||||
|
||||
from .arraycrop import crop
|
||||
from .compare import compare_images
|
||||
from ._regular_grid import regular_grid, regular_seeds
|
||||
from .unique import unique_rows
|
||||
from ._invert import invert
|
||||
from ._montage import montage
|
||||
from ._map_array import map_array
|
||||
|
||||
|
||||
@functools.wraps(np.pad)
|
||||
def pad(*args, **kwargs):
|
||||
warnings.warn("skimage.util.pad is deprecated and will be removed in "
|
||||
"version 0.19. Please use numpy.pad instead.",
|
||||
FutureWarning, stacklevel=2)
|
||||
return np.pad(*args, **kwargs)
|
||||
|
||||
|
||||
__all__ = ['img_as_float32',
|
||||
'img_as_float64',
|
||||
'img_as_float',
|
||||
'img_as_int',
|
||||
'img_as_uint',
|
||||
'img_as_ubyte',
|
||||
'img_as_bool',
|
||||
'dtype_limits',
|
||||
'view_as_blocks',
|
||||
'view_as_windows',
|
||||
'pad',
|
||||
'crop',
|
||||
'compare_images',
|
||||
'map_array',
|
||||
'montage',
|
||||
'random_noise',
|
||||
'regular_grid',
|
||||
'regular_seeds',
|
||||
'apply_parallel',
|
||||
'invert',
|
||||
'unique_rows',
|
||||
]
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
74
venv/Lib/site-packages/skimage/util/_invert.py
Normal file
74
venv/Lib/site-packages/skimage/util/_invert.py
Normal file
|
@ -0,0 +1,74 @@
|
|||
import numpy as np
|
||||
from .dtype import dtype_limits
|
||||
|
||||
|
||||
def invert(image, signed_float=False):
|
||||
"""Invert an image.
|
||||
|
||||
Invert the intensity range of the input image, so that the dtype maximum
|
||||
is now the dtype minimum, and vice-versa. This operation is
|
||||
slightly different depending on the input dtype:
|
||||
|
||||
- unsigned integers: subtract the image from the dtype maximum
|
||||
- signed integers: subtract the image from -1 (see Notes)
|
||||
- floats: subtract the image from 1 (if signed_float is False, so we
|
||||
assume the image is unsigned), or from 0 (if signed_float is True).
|
||||
|
||||
See the examples for clarification.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray
|
||||
Input image.
|
||||
signed_float : bool, optional
|
||||
If True and the image is of type float, the range is assumed to
|
||||
be [-1, 1]. If False and the image is of type float, the range is
|
||||
assumed to be [0, 1].
|
||||
|
||||
Returns
|
||||
-------
|
||||
inverted : ndarray
|
||||
Inverted image.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Ideally, for signed integers we would simply multiply by -1. However,
|
||||
signed integer ranges are asymmetric. For example, for np.int8, the range
|
||||
of possible values is [-128, 127], so that -128 * -1 equals -128! By
|
||||
subtracting from -1, we correctly map the maximum dtype value to the
|
||||
minimum.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> img = np.array([[100, 0, 200],
|
||||
... [ 0, 50, 0],
|
||||
... [ 30, 0, 255]], np.uint8)
|
||||
>>> invert(img)
|
||||
array([[155, 255, 55],
|
||||
[255, 205, 255],
|
||||
[225, 255, 0]], dtype=uint8)
|
||||
>>> img2 = np.array([[ -2, 0, -128],
|
||||
... [127, 0, 5]], np.int8)
|
||||
>>> invert(img2)
|
||||
array([[ 1, -1, 127],
|
||||
[-128, -1, -6]], dtype=int8)
|
||||
>>> img3 = np.array([[ 0., 1., 0.5, 0.75]])
|
||||
>>> invert(img3)
|
||||
array([[1. , 0. , 0.5 , 0.25]])
|
||||
>>> img4 = np.array([[ 0., 1., -1., -0.25]])
|
||||
>>> invert(img4, signed_float=True)
|
||||
array([[-0. , -1. , 1. , 0.25]])
|
||||
"""
|
||||
if image.dtype == 'bool':
|
||||
inverted = ~image
|
||||
elif np.issubdtype(image.dtype, np.unsignedinteger):
|
||||
max_val = dtype_limits(image, clip_negative=False)[1]
|
||||
inverted = np.subtract(max_val, image, dtype=image.dtype)
|
||||
elif np.issubdtype(image.dtype, np.signedinteger):
|
||||
inverted = np.subtract(-1, image, dtype=image.dtype)
|
||||
else: # float dtype
|
||||
if signed_float:
|
||||
inverted = -image
|
||||
else:
|
||||
inverted = np.subtract(1, image, dtype=image.dtype)
|
||||
return inverted
|
187
venv/Lib/site-packages/skimage/util/_map_array.py
Normal file
187
venv/Lib/site-packages/skimage/util/_map_array.py
Normal file
|
@ -0,0 +1,187 @@
|
|||
import numpy as np
|
||||
from ._remap import _map_array
|
||||
|
||||
|
||||
def map_array(input_arr, input_vals, output_vals, out=None):
|
||||
"""Map values from input array from input_vals to output_vals.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
input_arr : array of int, shape (M[, N][, P][, ...])
|
||||
The input label image.
|
||||
input_vals : array of int, shape (N,)
|
||||
The values to map from.
|
||||
output_vals : array, shape (N,)
|
||||
The values to map to.
|
||||
out: array, same shape as `input_arr`
|
||||
The output array. Will be created if not provided. It should
|
||||
have the same dtype as `output_vals`.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : array, same shape as `input_arr`
|
||||
The array of mapped values.
|
||||
"""
|
||||
|
||||
if not np.issubdtype(input_arr.dtype, np.integer):
|
||||
raise TypeError(
|
||||
'The dtype of an array to be remapped should be integer.'
|
||||
)
|
||||
# We ravel the input array for simplicity of iteration in Cython:
|
||||
orig_shape = input_arr.shape
|
||||
# NumPy docs for `np.ravel()` says:
|
||||
# "When a view is desired in as many cases as possible,
|
||||
# arr.reshape(-1) may be preferable."
|
||||
input_arr = input_arr.reshape(-1)
|
||||
if out is None:
|
||||
out = np.empty(orig_shape, dtype=output_vals.dtype)
|
||||
elif out.shape != orig_shape:
|
||||
raise ValueError(
|
||||
'If out array is provided, it should have the same shape as '
|
||||
f'the input array. Input array has shape {orig_shape}, provided '
|
||||
f'output array has shape {out.shape}.'
|
||||
)
|
||||
try:
|
||||
out_view = out.view()
|
||||
out_view.shape = (-1,) # no-copy reshape/ravel
|
||||
except AttributeError: # if out strides are not compatible with 0-copy
|
||||
raise ValueError(
|
||||
'If out array is provided, it should be either contiguous '
|
||||
f'or 1-dimensional. Got array with shape {out.shape} and '
|
||||
f'strides {out.strides}.'
|
||||
)
|
||||
|
||||
# ensure all arrays have matching types before sending to Cython
|
||||
input_vals = input_vals.astype(input_arr.dtype, copy=False)
|
||||
output_vals = output_vals.astype(out.dtype, copy=False)
|
||||
_map_array(input_arr, out_view, input_vals, output_vals)
|
||||
return out
|
||||
|
||||
|
||||
class ArrayMap:
|
||||
"""Class designed to mimic mapping by NumPy array indexing.
|
||||
|
||||
This class is designed to replicate the use of NumPy arrays for mapping
|
||||
values with indexing:
|
||||
|
||||
>>> values = np.array([0.25, 0.5, 1.0])
|
||||
>>> indices = np.array([[0, 0, 1], [2, 2, 1]])
|
||||
>>> values[indices]
|
||||
array([[0.25, 0.25, 0.5 ],
|
||||
[1. , 1. , 0.5 ]])
|
||||
|
||||
The issue with this indexing is that you need a very large ``values``
|
||||
array if the values in the ``indices`` array are large.
|
||||
|
||||
>>> values = np.array([0.25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0])
|
||||
>>> indices = np.array([[0, 0, 10], [0, 10, 10]])
|
||||
>>> values[indices]
|
||||
array([[0.25, 0.25, 1. ],
|
||||
[0.25, 1. , 1. ]])
|
||||
|
||||
Using this class, the approach is similar, but there is no need to
|
||||
create a large values array:
|
||||
|
||||
>>> in_indices = np.array([0, 10])
|
||||
>>> out_values = np.array([0.25, 1.0])
|
||||
>>> values = ArrayMap(in_indices, out_values)
|
||||
>>> values
|
||||
ArrayMap(array([ 0, 10]), array([0.25, 1. ]))
|
||||
>>> print(values)
|
||||
ArrayMap:
|
||||
0 → 0.25
|
||||
10 → 1.0
|
||||
>>> indices = np.array([[0, 0, 10], [0, 10, 10]])
|
||||
>>> values[indices]
|
||||
array([[0.25, 0.25, 1. ],
|
||||
[0.25, 1. , 1. ]])
|
||||
|
||||
Parameters
|
||||
----------
|
||||
in_values : array of int, shape (N,)
|
||||
The source values from which to map.
|
||||
out_values : array, shape (N,)
|
||||
The destination values from which to map.
|
||||
"""
|
||||
def __init__(self, in_values, out_values):
|
||||
self.in_values = in_values
|
||||
self.out_values = out_values
|
||||
self._max_str_lines = 4
|
||||
self._array = None
|
||||
|
||||
def __len__(self):
|
||||
"""Return one more than the maximum label value being remapped."""
|
||||
return np.max(self.in_values) + 1
|
||||
|
||||
def __array__(self, dtype=None):
|
||||
"""Return an array that behaves like the arraymap when indexed.
|
||||
|
||||
This array can be very large: it is the size of the largest value
|
||||
in the ``in_vals`` array, plus one.
|
||||
"""
|
||||
if dtype is None:
|
||||
dtype = self.out_values.dtype
|
||||
output = np.zeros(np.max(self.in_values) + 1, dtype=dtype)
|
||||
output[self.in_values] = self.out_values
|
||||
return output
|
||||
|
||||
@property
|
||||
def dtype(self):
|
||||
return self.out_values.dtype
|
||||
|
||||
def __repr__(self):
|
||||
return f'ArrayMap({repr(self.in_values)}, {repr(self.out_values)})'
|
||||
|
||||
def __str__(self):
|
||||
if len(self.in_values) <= self._max_str_lines + 1:
|
||||
rows = range(len(self.in_values))
|
||||
string = '\n'.join(
|
||||
['ArrayMap:'] +
|
||||
[f' {self.in_values[i]} → {self.out_values[i]}' for i in rows]
|
||||
)
|
||||
else:
|
||||
rows0 = list(range(0, self._max_str_lines // 2))
|
||||
rows1 = list(range(-self._max_str_lines // 2, 0))
|
||||
string = '\n'.join(
|
||||
['ArrayMap:'] +
|
||||
[f' {self.in_values[i]} → {self.out_values[i]}'
|
||||
for i in rows0] +
|
||||
[' ...'] +
|
||||
[f' {self.in_values[i]} → {self.out_values[i]}'
|
||||
for i in rows1]
|
||||
)
|
||||
return string
|
||||
|
||||
def __call__(self, arr):
|
||||
return self.__getitem__(arr)
|
||||
|
||||
def __getitem__(self, index):
|
||||
scalar = np.isscalar(index)
|
||||
if scalar:
|
||||
index = np.array([index])
|
||||
elif isinstance(index, slice):
|
||||
start = index.start or 0 # treat None or 0 the same way
|
||||
stop = (index.stop
|
||||
if index.stop is not None
|
||||
else len(self))
|
||||
step = index.step
|
||||
index = np.arange(start, stop, step)
|
||||
if index.dtype == bool:
|
||||
index = np.flatnonzero(index)
|
||||
|
||||
out = map_array(
|
||||
index,
|
||||
self.in_values.astype(index.dtype, copy=False),
|
||||
self.out_values,
|
||||
)
|
||||
|
||||
if scalar:
|
||||
out = out[0]
|
||||
return out
|
||||
|
||||
def __setitem__(self, indices, values):
|
||||
if self._array is None:
|
||||
self._array = self.__array__()
|
||||
self._array[indices] = values
|
||||
self.in_values = np.flatnonzero(self._array)
|
||||
self.out_values = self._array[self.in_values]
|
142
venv/Lib/site-packages/skimage/util/_montage.py
Normal file
142
venv/Lib/site-packages/skimage/util/_montage.py
Normal file
|
@ -0,0 +1,142 @@
|
|||
import numpy as np
|
||||
|
||||
|
||||
__all__ = ['montage']
|
||||
|
||||
|
||||
def montage(arr_in, fill='mean', rescale_intensity=False, grid_shape=None,
|
||||
padding_width=0, multichannel=False):
|
||||
"""Create a montage of several single- or multichannel images.
|
||||
|
||||
Create a rectangular montage from an input array representing an ensemble
|
||||
of equally shaped single- (gray) or multichannel (color) images.
|
||||
|
||||
For example, ``montage(arr_in)`` called with the following `arr_in`
|
||||
|
||||
+---+---+---+
|
||||
| 1 | 2 | 3 |
|
||||
+---+---+---+
|
||||
|
||||
will return
|
||||
|
||||
+---+---+
|
||||
| 1 | 2 |
|
||||
+---+---+
|
||||
| 3 | * |
|
||||
+---+---+
|
||||
|
||||
where the '*' patch will be determined by the `fill` parameter.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
arr_in : (K, M, N[, C]) ndarray
|
||||
An array representing an ensemble of `K` images of equal shape.
|
||||
fill : float or array-like of floats or 'mean', optional
|
||||
Value to fill the padding areas and/or the extra tiles in
|
||||
the output array. Has to be `float` for single channel collections.
|
||||
For multichannel collections has to be an array-like of shape of
|
||||
number of channels. If `mean`, uses the mean value over all images.
|
||||
rescale_intensity : bool, optional
|
||||
Whether to rescale the intensity of each image to [0, 1].
|
||||
grid_shape : tuple, optional
|
||||
The desired grid shape for the montage `(ntiles_row, ntiles_column)`.
|
||||
The default aspect ratio is square.
|
||||
padding_width : int, optional
|
||||
The size of the spacing between the tiles and between the tiles and
|
||||
the borders. If non-zero, makes the boundaries of individual images
|
||||
easier to perceive.
|
||||
multichannel : boolean, optional
|
||||
If True, the last `arr_in` dimension is threated as a color channel,
|
||||
otherwise as spatial.
|
||||
|
||||
Returns
|
||||
-------
|
||||
arr_out : (K*(M+p)+p, K*(N+p)+p[, C]) ndarray
|
||||
Output array with input images glued together (including padding `p`).
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> from skimage.util import montage
|
||||
>>> arr_in = np.arange(3 * 2 * 2).reshape(3, 2, 2)
|
||||
>>> arr_in # doctest: +NORMALIZE_WHITESPACE
|
||||
array([[[ 0, 1],
|
||||
[ 2, 3]],
|
||||
[[ 4, 5],
|
||||
[ 6, 7]],
|
||||
[[ 8, 9],
|
||||
[10, 11]]])
|
||||
>>> arr_out = montage(arr_in)
|
||||
>>> arr_out.shape
|
||||
(4, 4)
|
||||
>>> arr_out
|
||||
array([[ 0, 1, 4, 5],
|
||||
[ 2, 3, 6, 7],
|
||||
[ 8, 9, 5, 5],
|
||||
[10, 11, 5, 5]])
|
||||
>>> arr_in.mean()
|
||||
5.5
|
||||
>>> arr_out_nonsquare = montage(arr_in, grid_shape=(1, 3))
|
||||
>>> arr_out_nonsquare
|
||||
array([[ 0, 1, 4, 5, 8, 9],
|
||||
[ 2, 3, 6, 7, 10, 11]])
|
||||
>>> arr_out_nonsquare.shape
|
||||
(2, 6)
|
||||
"""
|
||||
|
||||
# exposure imports scipy.linalg which is quite expensive.
|
||||
# Since skimage.util is in the critical import path, we lazy import
|
||||
# exposure to improve import time
|
||||
from .. import exposure
|
||||
if multichannel:
|
||||
arr_in = np.asarray(arr_in)
|
||||
else:
|
||||
arr_in = np.asarray(arr_in)[..., np.newaxis]
|
||||
|
||||
if arr_in.ndim != 4:
|
||||
raise ValueError('Input array has to be 3-dimensional for grayscale '
|
||||
'images, or 4-dimensional with `multichannel=True` '
|
||||
'for color images.')
|
||||
|
||||
n_images, n_rows, n_cols, n_chan = arr_in.shape
|
||||
|
||||
if grid_shape:
|
||||
ntiles_row, ntiles_col = [int(s) for s in grid_shape]
|
||||
else:
|
||||
ntiles_row = ntiles_col = int(np.ceil(np.sqrt(n_images)))
|
||||
|
||||
# Rescale intensity if necessary
|
||||
if rescale_intensity:
|
||||
for i in range(n_images):
|
||||
arr_in[i] = exposure.rescale_intensity(arr_in[i])
|
||||
|
||||
# Calculate the fill value
|
||||
if fill == 'mean':
|
||||
fill = arr_in.mean(axis=(0, 1, 2))
|
||||
fill = np.atleast_1d(fill).astype(arr_in.dtype)
|
||||
|
||||
# Pre-allocate an array with padding for montage
|
||||
n_pad = padding_width
|
||||
arr_out = np.empty(((n_rows + n_pad) * ntiles_row + n_pad,
|
||||
(n_cols + n_pad) * ntiles_col + n_pad,
|
||||
n_chan), dtype=arr_in.dtype)
|
||||
for idx_chan in range(n_chan):
|
||||
arr_out[..., idx_chan] = fill[idx_chan]
|
||||
|
||||
slices_row = [slice(n_pad + (n_rows + n_pad) * n,
|
||||
n_pad + (n_rows + n_pad) * n + n_rows)
|
||||
for n in range(ntiles_row)]
|
||||
slices_col = [slice(n_pad + (n_cols + n_pad) * n,
|
||||
n_pad + (n_cols + n_pad) * n + n_cols)
|
||||
for n in range(ntiles_col)]
|
||||
|
||||
# Copy the data to the output array
|
||||
for idx_image, image in enumerate(arr_in):
|
||||
idx_sr = idx_image // ntiles_col
|
||||
idx_sc = idx_image % ntiles_col
|
||||
arr_out[slices_row[idx_sr], slices_col[idx_sc], :] = image
|
||||
|
||||
if multichannel:
|
||||
return arr_out
|
||||
else:
|
||||
return arr_out[..., 0]
|
116
venv/Lib/site-packages/skimage/util/_regular_grid.py
Normal file
116
venv/Lib/site-packages/skimage/util/_regular_grid.py
Normal file
|
@ -0,0 +1,116 @@
|
|||
import numpy as np
|
||||
|
||||
|
||||
def regular_grid(ar_shape, n_points):
|
||||
"""Find `n_points` regularly spaced along `ar_shape`.
|
||||
|
||||
The returned points (as slices) should be as close to cubically-spaced as
|
||||
possible. Essentially, the points are spaced by the Nth root of the input
|
||||
array size, where N is the number of dimensions. However, if an array
|
||||
dimension cannot fit a full step size, it is "discarded", and the
|
||||
computation is done for only the remaining dimensions.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ar_shape : array-like of ints
|
||||
The shape of the space embedding the grid. ``len(ar_shape)`` is the
|
||||
number of dimensions.
|
||||
n_points : int
|
||||
The (approximate) number of points to embed in the space.
|
||||
|
||||
Returns
|
||||
-------
|
||||
slices : tuple of slice objects
|
||||
A slice along each dimension of `ar_shape`, such that the intersection
|
||||
of all the slices give the coordinates of regularly spaced points.
|
||||
|
||||
.. versionchanged:: 0.14.1
|
||||
In scikit-image 0.14.1 and 0.15, the return type was changed from a
|
||||
list to a tuple to ensure `compatibility with Numpy 1.15`_ and
|
||||
higher. If your code requires the returned result to be a list, you
|
||||
may convert the output of this function to a list with:
|
||||
|
||||
>>> result = list(regular_grid(ar_shape=(3, 20, 40), n_points=8))
|
||||
|
||||
.. _compatibility with NumPy 1.15: https://github.com/numpy/numpy/blob/master/doc/release/1.15.0-notes.rst#deprecations
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> ar = np.zeros((20, 40))
|
||||
>>> g = regular_grid(ar.shape, 8)
|
||||
>>> g
|
||||
(slice(5, None, 10), slice(5, None, 10))
|
||||
>>> ar[g] = 1
|
||||
>>> ar.sum()
|
||||
8.0
|
||||
>>> ar = np.zeros((20, 40))
|
||||
>>> g = regular_grid(ar.shape, 32)
|
||||
>>> g
|
||||
(slice(2, None, 5), slice(2, None, 5))
|
||||
>>> ar[g] = 1
|
||||
>>> ar.sum()
|
||||
32.0
|
||||
>>> ar = np.zeros((3, 20, 40))
|
||||
>>> g = regular_grid(ar.shape, 8)
|
||||
>>> g
|
||||
(slice(1, None, 3), slice(5, None, 10), slice(5, None, 10))
|
||||
>>> ar[g] = 1
|
||||
>>> ar.sum()
|
||||
8.0
|
||||
"""
|
||||
ar_shape = np.asanyarray(ar_shape)
|
||||
ndim = len(ar_shape)
|
||||
unsort_dim_idxs = np.argsort(np.argsort(ar_shape))
|
||||
sorted_dims = np.sort(ar_shape)
|
||||
space_size = float(np.prod(ar_shape))
|
||||
if space_size <= n_points:
|
||||
return (slice(None), ) * ndim
|
||||
stepsizes = np.full(ndim, (space_size / n_points) ** (1.0 / ndim),
|
||||
dtype='float64')
|
||||
if (sorted_dims < stepsizes).any():
|
||||
for dim in range(ndim):
|
||||
stepsizes[dim] = sorted_dims[dim]
|
||||
space_size = float(np.prod(sorted_dims[dim + 1:]))
|
||||
stepsizes[dim + 1:] = ((space_size / n_points) **
|
||||
(1.0 / (ndim - dim - 1)))
|
||||
if (sorted_dims >= stepsizes).all():
|
||||
break
|
||||
starts = (stepsizes // 2).astype(int)
|
||||
stepsizes = np.round(stepsizes).astype(int)
|
||||
slices = [slice(start, None, step) for
|
||||
start, step in zip(starts, stepsizes)]
|
||||
slices = tuple(slices[i] for i in unsort_dim_idxs)
|
||||
return slices
|
||||
|
||||
|
||||
def regular_seeds(ar_shape, n_points, dtype=int):
|
||||
"""Return an image with ~`n_points` regularly-spaced nonzero pixels.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ar_shape : tuple of int
|
||||
The shape of the desired output image.
|
||||
n_points : int
|
||||
The desired number of nonzero points.
|
||||
dtype : numpy data type, optional
|
||||
The desired data type of the output.
|
||||
|
||||
Returns
|
||||
-------
|
||||
seed_img : array of int or bool
|
||||
The desired image.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> regular_seeds((5, 5), 4)
|
||||
array([[0, 0, 0, 0, 0],
|
||||
[0, 1, 0, 2, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 3, 0, 4, 0],
|
||||
[0, 0, 0, 0, 0]])
|
||||
"""
|
||||
grid = regular_grid(ar_shape, n_points)
|
||||
seed_img = np.zeros(ar_shape, dtype=dtype)
|
||||
seed_img[grid] = 1 + np.reshape(np.arange(seed_img[grid].size),
|
||||
seed_img[grid].shape)
|
||||
return seed_img
|
BIN
venv/Lib/site-packages/skimage/util/_remap.cp36-win32.pyd
Normal file
BIN
venv/Lib/site-packages/skimage/util/_remap.cp36-win32.pyd
Normal file
Binary file not shown.
147
venv/Lib/site-packages/skimage/util/apply_parallel.py
Normal file
147
venv/Lib/site-packages/skimage/util/apply_parallel.py
Normal file
|
@ -0,0 +1,147 @@
|
|||
__all__ = ['apply_parallel']
|
||||
|
||||
|
||||
def _get_chunks(shape, ncpu):
|
||||
"""Split the array into equal sized chunks based on the number of
|
||||
available processors. The last chunk in each dimension absorbs the
|
||||
remainder array elements if the number of CPUs does not divide evenly into
|
||||
the number of array elements.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> _get_chunks((4, 4), 4)
|
||||
((2, 2), (2, 2))
|
||||
>>> _get_chunks((4, 4), 2)
|
||||
((2, 2), (4,))
|
||||
>>> _get_chunks((5, 5), 2)
|
||||
((2, 3), (5,))
|
||||
>>> _get_chunks((2, 4), 2)
|
||||
((1, 1), (4,))
|
||||
"""
|
||||
# since apply_parallel is in the critical import path, we lazy import
|
||||
# math just when we need it.
|
||||
from math import ceil
|
||||
|
||||
chunks = []
|
||||
nchunks_per_dim = int(ceil(ncpu ** (1./len(shape))))
|
||||
|
||||
used_chunks = 1
|
||||
for i in shape:
|
||||
if used_chunks < ncpu:
|
||||
regular_chunk = i // nchunks_per_dim
|
||||
remainder_chunk = regular_chunk + (i % nchunks_per_dim)
|
||||
|
||||
if regular_chunk == 0:
|
||||
chunk_lens = (remainder_chunk,)
|
||||
else:
|
||||
chunk_lens = ((regular_chunk,) * (nchunks_per_dim - 1) +
|
||||
(remainder_chunk,))
|
||||
else:
|
||||
chunk_lens = (i,)
|
||||
|
||||
chunks.append(chunk_lens)
|
||||
used_chunks *= nchunks_per_dim
|
||||
return tuple(chunks)
|
||||
|
||||
|
||||
def _ensure_dask_array(array, chunks=None):
|
||||
import dask.array as da
|
||||
if isinstance(array, da.Array):
|
||||
return array
|
||||
|
||||
return da.from_array(array, chunks=chunks)
|
||||
|
||||
|
||||
def apply_parallel(function, array, chunks=None, depth=0, mode=None,
|
||||
extra_arguments=(), extra_keywords={}, *, compute=None):
|
||||
"""Map a function in parallel across an array.
|
||||
|
||||
Split an array into possibly overlapping chunks of a given depth and
|
||||
boundary type, call the given function in parallel on the chunks, combine
|
||||
the chunks and return the resulting array.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
function : function
|
||||
Function to be mapped which takes an array as an argument.
|
||||
array : numpy array or dask array
|
||||
Array which the function will be applied to.
|
||||
chunks : int, tuple, or tuple of tuples, optional
|
||||
A single integer is interpreted as the length of one side of a square
|
||||
chunk that should be tiled across the array. One tuple of length
|
||||
``array.ndim`` represents the shape of a chunk, and it is tiled across
|
||||
the array. A list of tuples of length ``ndim``, where each sub-tuple
|
||||
is a sequence of chunk sizes along the corresponding dimension. If
|
||||
None, the array is broken up into chunks based on the number of
|
||||
available cpus. More information about chunks is in the documentation
|
||||
`here <https://dask.pydata.org/en/latest/array-design.html>`_.
|
||||
depth : int, optional
|
||||
Integer equal to the depth of the added boundary cells. Defaults to
|
||||
zero.
|
||||
mode : {'reflect', 'symmetric', 'periodic', 'wrap', 'nearest', 'edge'}, optional
|
||||
type of external boundary padding.
|
||||
extra_arguments : tuple, optional
|
||||
Tuple of arguments to be passed to the function.
|
||||
extra_keywords : dictionary, optional
|
||||
Dictionary of keyword arguments to be passed to the function.
|
||||
compute : bool, optional
|
||||
If ``True``, compute eagerly returning a NumPy Array.
|
||||
If ``False``, compute lazily returning a Dask Array.
|
||||
If ``None`` (default), compute based on array type provided
|
||||
(eagerly for NumPy Arrays and lazily for Dask Arrays).
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray or dask Array
|
||||
Returns the result of the applying the operation.
|
||||
Type is dependent on the ``compute`` argument.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Numpy edge modes 'symmetric', 'wrap', and 'edge' are converted to the
|
||||
equivalent ``dask`` boundary modes 'reflect', 'periodic' and 'nearest',
|
||||
respectively.
|
||||
Setting ``compute=False`` can be useful for chaining later operations.
|
||||
For example region selection to preview a result or storing large data
|
||||
to disk instead of loading in memory.
|
||||
|
||||
"""
|
||||
try:
|
||||
# Importing dask takes time. since apply_parallel is on the
|
||||
# minimum import path of skimage, we lazy attempt to import dask
|
||||
import dask.array as da
|
||||
except ImportError:
|
||||
raise RuntimeError("Could not import 'dask'. Please install "
|
||||
"using 'pip install dask'")
|
||||
|
||||
if compute is None:
|
||||
compute = not isinstance(array, da.Array)
|
||||
|
||||
if chunks is None:
|
||||
shape = array.shape
|
||||
try:
|
||||
# since apply_parallel is in the critical import path, we lazy
|
||||
# import multiprocessing just when we need it.
|
||||
from multiprocessing import cpu_count
|
||||
ncpu = cpu_count()
|
||||
except NotImplementedError:
|
||||
ncpu = 4
|
||||
chunks = _get_chunks(shape, ncpu)
|
||||
|
||||
if mode == 'wrap':
|
||||
mode = 'periodic'
|
||||
elif mode == 'symmetric':
|
||||
mode = 'reflect'
|
||||
elif mode == 'edge':
|
||||
mode = 'nearest'
|
||||
|
||||
def wrapped_func(arr):
|
||||
return function(arr, *extra_arguments, **extra_keywords)
|
||||
|
||||
darr = _ensure_dask_array(array, chunks=chunks)
|
||||
|
||||
res = darr.map_overlap(wrapped_func, depth, boundary=mode)
|
||||
if compute:
|
||||
res = res.compute()
|
||||
|
||||
return res
|
63
venv/Lib/site-packages/skimage/util/arraycrop.py
Normal file
63
venv/Lib/site-packages/skimage/util/arraycrop.py
Normal file
|
@ -0,0 +1,63 @@
|
|||
"""
|
||||
The arraycrop module contains functions to crop values from the edges of an
|
||||
n-dimensional array.
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
|
||||
__all__ = ['crop']
|
||||
|
||||
|
||||
def crop(ar, crop_width, copy=False, order='K'):
|
||||
"""Crop array `ar` by `crop_width` along each dimension.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ar : array-like of rank N
|
||||
Input array.
|
||||
crop_width : {sequence, int}
|
||||
Number of values to remove from the edges of each axis.
|
||||
``((before_1, after_1),`` ... ``(before_N, after_N))`` specifies
|
||||
unique crop widths at the start and end of each axis.
|
||||
``((before, after),)`` specifies a fixed start and end crop
|
||||
for every axis.
|
||||
``(n,)`` or ``n`` for integer ``n`` is a shortcut for
|
||||
before = after = ``n`` for all axes.
|
||||
copy : bool, optional
|
||||
If `True`, ensure the returned array is a contiguous copy. Normally,
|
||||
a crop operation will return a discontiguous view of the underlying
|
||||
input array.
|
||||
order : {'C', 'F', 'A', 'K'}, optional
|
||||
If ``copy==True``, control the memory layout of the copy. See
|
||||
``np.copy``.
|
||||
|
||||
Returns
|
||||
-------
|
||||
cropped : array
|
||||
The cropped array. If ``copy=False`` (default), this is a sliced
|
||||
view of the input array.
|
||||
"""
|
||||
# Since arraycrop is in the critical import path, we lazy import distutils
|
||||
# to check the version of numpy
|
||||
# After numpy 1.15, a new backward compatible function have been
|
||||
# implemented.
|
||||
# See https://github.com/numpy/numpy/pull/11966
|
||||
from distutils.version import LooseVersion as Version
|
||||
old_numpy = Version(np.__version__) < Version('1.16')
|
||||
if old_numpy:
|
||||
from numpy.lib.arraypad import _validate_lengths
|
||||
else:
|
||||
from numpy.lib.arraypad import _as_pairs
|
||||
|
||||
ar = np.array(ar, copy=False)
|
||||
if old_numpy:
|
||||
crops = _validate_lengths(ar, crop_width)
|
||||
else:
|
||||
crops = _as_pairs(crop_width, ar.ndim, as_index=True)
|
||||
slices = tuple(slice(a, ar.shape[i] - b)
|
||||
for i, (a, b) in enumerate(crops))
|
||||
if copy:
|
||||
cropped = np.array(ar[slices], order=order, copy=True)
|
||||
else:
|
||||
cropped = ar[slices]
|
||||
return cropped
|
60
venv/Lib/site-packages/skimage/util/compare.py
Normal file
60
venv/Lib/site-packages/skimage/util/compare.py
Normal file
|
@ -0,0 +1,60 @@
|
|||
import numpy as np
|
||||
from ..util import img_as_float
|
||||
from itertools import product
|
||||
|
||||
|
||||
def compare_images(image1, image2, method='diff', *, n_tiles=(8, 8)):
|
||||
"""
|
||||
Return an image showing the differences between two images.
|
||||
|
||||
.. versionadded:: 0.16
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image1, image2 : 2-D array
|
||||
Images to process, must be of the same shape.
|
||||
method : string, optional
|
||||
Method used for the comparison.
|
||||
Valid values are {'diff', 'blend', 'checkerboard'}.
|
||||
Details are provided in the note section.
|
||||
n_tiles : tuple, optional
|
||||
Used only for the `checkerboard` method. Specifies the number
|
||||
of tiles (row, column) to divide the image.
|
||||
|
||||
Returns
|
||||
-------
|
||||
comparison : 2-D array
|
||||
Image showing the differences.
|
||||
|
||||
Notes
|
||||
-----
|
||||
``'diff'`` computes the absolute difference between the two images.
|
||||
``'blend'`` computes the mean value.
|
||||
``'checkerboard'`` makes tiles of dimension `n_tiles` that display
|
||||
alternatively the first and the second image.
|
||||
"""
|
||||
if image1.shape != image2.shape:
|
||||
raise ValueError('Images must have the same shape.')
|
||||
|
||||
img1 = img_as_float(image1)
|
||||
img2 = img_as_float(image2)
|
||||
|
||||
if method == 'diff':
|
||||
comparison = np.abs(img2 - img1)
|
||||
elif method == 'blend':
|
||||
comparison = 0.5 * (img2 + img1)
|
||||
elif method == 'checkerboard':
|
||||
shapex, shapey = img1.shape
|
||||
mask = np.full((shapex, shapey), False)
|
||||
stepx = int(shapex / n_tiles[0])
|
||||
stepy = int(shapey / n_tiles[1])
|
||||
for i, j in product(range(n_tiles[0]), range(n_tiles[1])):
|
||||
if (i + j) % 2 == 0:
|
||||
mask[i * stepx:(i + 1)*stepx, j * stepy:(j + 1) * stepy] = True
|
||||
comparison = np.zeros_like(img1)
|
||||
comparison[mask] = img1[mask]
|
||||
comparison[~mask] = img2[~mask]
|
||||
else:
|
||||
raise ValueError('Wrong value for `method`. '
|
||||
'Must be either "diff", "blend" or "checkerboard".')
|
||||
return comparison
|
549
venv/Lib/site-packages/skimage/util/dtype.py
Normal file
549
venv/Lib/site-packages/skimage/util/dtype.py
Normal file
|
@ -0,0 +1,549 @@
|
|||
import numpy as np
|
||||
from warnings import warn
|
||||
|
||||
|
||||
__all__ = ['img_as_float32', 'img_as_float64', 'img_as_float',
|
||||
'img_as_int', 'img_as_uint', 'img_as_ubyte',
|
||||
'img_as_bool', 'dtype_limits']
|
||||
|
||||
# For integers Numpy uses `_integer_types` basis internally, and builds a leaky
|
||||
# `np.XintYY` abstraction on top of it. This leads to situations when, for
|
||||
# example, there are two np.Xint64 dtypes with the same attributes but
|
||||
# different object references. In order to avoid any potential issues,
|
||||
# we use the basis dtypes here. For more information, see:
|
||||
# - https://github.com/scikit-image/scikit-image/issues/3043
|
||||
# For convenience, for these dtypes we indicate also the possible bit depths
|
||||
# (some of them are platform specific). For the details, see:
|
||||
# http://www.unix.org/whitepapers/64bit.html
|
||||
_integer_types = (np.byte, np.ubyte, # 8 bits
|
||||
np.short, np.ushort, # 16 bits
|
||||
np.intc, np.uintc, # 16 or 32 or 64 bits
|
||||
np.int_, np.uint, # 32 or 64 bits
|
||||
np.longlong, np.ulonglong) # 64 bits
|
||||
_integer_ranges = {t: (np.iinfo(t).min, np.iinfo(t).max)
|
||||
for t in _integer_types}
|
||||
dtype_range = {np.bool_: (False, True),
|
||||
np.bool8: (False, True),
|
||||
np.float16: (-1, 1),
|
||||
np.float32: (-1, 1),
|
||||
np.float64: (-1, 1)}
|
||||
dtype_range.update(_integer_ranges)
|
||||
|
||||
_supported_types = list(dtype_range.keys())
|
||||
|
||||
|
||||
def dtype_limits(image, clip_negative=False):
|
||||
"""Return intensity limits, i.e. (min, max) tuple, of the image's dtype.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray
|
||||
Input image.
|
||||
clip_negative : bool, optional
|
||||
If True, clip the negative range (i.e. return 0 for min intensity)
|
||||
even if the image dtype allows negative values.
|
||||
|
||||
Returns
|
||||
-------
|
||||
imin, imax : tuple
|
||||
Lower and upper intensity limits.
|
||||
"""
|
||||
imin, imax = dtype_range[image.dtype.type]
|
||||
if clip_negative:
|
||||
imin = 0
|
||||
return imin, imax
|
||||
|
||||
|
||||
def _dtype_itemsize(itemsize, *dtypes):
|
||||
"""Return first of `dtypes` with itemsize greater than `itemsize`
|
||||
|
||||
Parameters
|
||||
----------
|
||||
itemsize: int
|
||||
The data type object element size.
|
||||
|
||||
Other Parameters
|
||||
----------------
|
||||
*dtypes:
|
||||
Any Object accepted by `np.dtype` to be converted to a data
|
||||
type object
|
||||
|
||||
Returns
|
||||
-------
|
||||
dtype: data type object
|
||||
First of `dtypes` with itemsize greater than `itemsize`.
|
||||
|
||||
"""
|
||||
return next(dt for dt in dtypes if np.dtype(dt).itemsize >= itemsize)
|
||||
|
||||
|
||||
def _dtype_bits(kind, bits, itemsize=1):
|
||||
"""Return dtype of `kind` that can store a `bits` wide unsigned int
|
||||
|
||||
Parameters:
|
||||
kind: str
|
||||
Data type kind.
|
||||
bits: int
|
||||
Desired number of bits.
|
||||
itemsize: int
|
||||
The data type object element size.
|
||||
|
||||
Returns
|
||||
-------
|
||||
dtype: data type object
|
||||
Data type of `kind` that can store a `bits` wide unsigned int
|
||||
|
||||
"""
|
||||
|
||||
s = next(i for i in (itemsize, ) + (2, 4, 8) if
|
||||
bits < (i * 8) or (bits == (i * 8) and kind == 'u'))
|
||||
|
||||
return np.dtype(kind + str(s))
|
||||
|
||||
|
||||
def _scale(a, n, m, copy=True):
|
||||
"""Scale an array of unsigned/positive integers from `n` to `m` bits.
|
||||
|
||||
Numbers can be represented exactly only if `m` is a multiple of `n`.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
a : ndarray
|
||||
Input image array.
|
||||
n : int
|
||||
Number of bits currently used to encode the values in `a`.
|
||||
m : int
|
||||
Desired number of bits to encode the values in `out`.
|
||||
copy : bool, optional
|
||||
If True, allocates and returns new array. Otherwise, modifies
|
||||
`a` in place.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : array
|
||||
Output image array. Has the same kind as `a`.
|
||||
"""
|
||||
kind = a.dtype.kind
|
||||
if n > m and a.max() < 2 ** m:
|
||||
mnew = int(np.ceil(m / 2) * 2)
|
||||
if mnew > m:
|
||||
dtype = "int{}".format(mnew)
|
||||
else:
|
||||
dtype = "uint{}".format(mnew)
|
||||
n = int(np.ceil(n / 2) * 2)
|
||||
warn("Downcasting {} to {} without scaling because max "
|
||||
"value {} fits in {}".format(a.dtype, dtype, a.max(), dtype),
|
||||
stacklevel=3)
|
||||
return a.astype(_dtype_bits(kind, m))
|
||||
elif n == m:
|
||||
return a.copy() if copy else a
|
||||
elif n > m:
|
||||
# downscale with precision loss
|
||||
if copy:
|
||||
b = np.empty(a.shape, _dtype_bits(kind, m))
|
||||
np.floor_divide(a, 2**(n - m), out=b, dtype=a.dtype,
|
||||
casting='unsafe')
|
||||
return b
|
||||
else:
|
||||
a //= 2**(n - m)
|
||||
return a
|
||||
elif m % n == 0:
|
||||
# exact upscale to a multiple of `n` bits
|
||||
if copy:
|
||||
b = np.empty(a.shape, _dtype_bits(kind, m))
|
||||
np.multiply(a, (2**m - 1) // (2**n - 1), out=b, dtype=b.dtype)
|
||||
return b
|
||||
else:
|
||||
a = a.astype(_dtype_bits(kind, m, a.dtype.itemsize), copy=False)
|
||||
a *= (2**m - 1) // (2**n - 1)
|
||||
return a
|
||||
else:
|
||||
# upscale to a multiple of `n` bits,
|
||||
# then downscale with precision loss
|
||||
o = (m // n + 1) * n
|
||||
if copy:
|
||||
b = np.empty(a.shape, _dtype_bits(kind, o))
|
||||
np.multiply(a, (2**o - 1) // (2**n - 1), out=b, dtype=b.dtype)
|
||||
b //= 2**(o - m)
|
||||
return b
|
||||
else:
|
||||
a = a.astype(_dtype_bits(kind, o, a.dtype.itemsize), copy=False)
|
||||
a *= (2**o - 1) // (2**n - 1)
|
||||
a //= 2**(o - m)
|
||||
return a
|
||||
|
||||
|
||||
def _convert(image, dtype, force_copy=False, uniform=False):
|
||||
"""
|
||||
Convert an image to the requested data-type.
|
||||
|
||||
Warnings are issued in case of precision loss, or when negative values
|
||||
are clipped during conversion to unsigned integer types (sign loss).
|
||||
|
||||
Floating point values are expected to be normalized and will be clipped
|
||||
to the range [0.0, 1.0] or [-1.0, 1.0] when converting to unsigned or
|
||||
signed integers respectively.
|
||||
|
||||
Numbers are not shifted to the negative side when converting from
|
||||
unsigned to signed integer types. Negative values will be clipped when
|
||||
converting to unsigned integers.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray
|
||||
Input image.
|
||||
dtype : dtype
|
||||
Target data-type.
|
||||
force_copy : bool, optional
|
||||
Force a copy of the data, irrespective of its current dtype.
|
||||
uniform : bool, optional
|
||||
Uniformly quantize the floating point range to the integer range.
|
||||
By default (uniform=False) floating point values are scaled and
|
||||
rounded to the nearest integers, which minimizes back and forth
|
||||
conversion errors.
|
||||
|
||||
.. versionchanged :: 0.15
|
||||
``_convert`` no longer warns about possible precision or sign
|
||||
information loss. See discussions on these warnings at:
|
||||
https://github.com/scikit-image/scikit-image/issues/2602
|
||||
https://github.com/scikit-image/scikit-image/issues/543#issuecomment-208202228
|
||||
https://github.com/scikit-image/scikit-image/pull/3575
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] DirectX data conversion rules.
|
||||
https://msdn.microsoft.com/en-us/library/windows/desktop/dd607323%28v=vs.85%29.aspx
|
||||
.. [2] Data Conversions. In "OpenGL ES 2.0 Specification v2.0.25",
|
||||
pp 7-8. Khronos Group, 2010.
|
||||
.. [3] Proper treatment of pixels as integers. A.W. Paeth.
|
||||
In "Graphics Gems I", pp 249-256. Morgan Kaufmann, 1990.
|
||||
.. [4] Dirty Pixels. J. Blinn. In "Jim Blinn's corner: Dirty Pixels",
|
||||
pp 47-57. Morgan Kaufmann, 1998.
|
||||
|
||||
"""
|
||||
image = np.asarray(image)
|
||||
dtypeobj_in = image.dtype
|
||||
dtypeobj_out = np.dtype(dtype)
|
||||
dtype_in = dtypeobj_in.type
|
||||
dtype_out = dtypeobj_out.type
|
||||
kind_in = dtypeobj_in.kind
|
||||
kind_out = dtypeobj_out.kind
|
||||
itemsize_in = dtypeobj_in.itemsize
|
||||
itemsize_out = dtypeobj_out.itemsize
|
||||
|
||||
# Below, we do an `issubdtype` check. Its purpose is to find out
|
||||
# whether we can get away without doing any image conversion. This happens
|
||||
# when:
|
||||
#
|
||||
# - the output and input dtypes are the same or
|
||||
# - when the output is specified as a type, and the input dtype
|
||||
# is a subclass of that type (e.g. `np.floating` will allow
|
||||
# `float32` and `float64` arrays through)
|
||||
|
||||
if np.issubdtype(dtype_in, np.obj2sctype(dtype)):
|
||||
if force_copy:
|
||||
image = image.copy()
|
||||
return image
|
||||
|
||||
if not (dtype_in in _supported_types and dtype_out in _supported_types):
|
||||
raise ValueError("Can not convert from {} to {}."
|
||||
.format(dtypeobj_in, dtypeobj_out))
|
||||
|
||||
if kind_in in 'ui':
|
||||
imin_in = np.iinfo(dtype_in).min
|
||||
imax_in = np.iinfo(dtype_in).max
|
||||
if kind_out in 'ui':
|
||||
imin_out = np.iinfo(dtype_out).min
|
||||
imax_out = np.iinfo(dtype_out).max
|
||||
|
||||
# any -> binary
|
||||
if kind_out == 'b':
|
||||
return image > dtype_in(dtype_range[dtype_in][1] / 2)
|
||||
|
||||
# binary -> any
|
||||
if kind_in == 'b':
|
||||
result = image.astype(dtype_out)
|
||||
if kind_out != 'f':
|
||||
result *= dtype_out(dtype_range[dtype_out][1])
|
||||
return result
|
||||
|
||||
# float -> any
|
||||
if kind_in == 'f':
|
||||
if kind_out == 'f':
|
||||
# float -> float
|
||||
return image.astype(dtype_out)
|
||||
|
||||
if np.min(image) < -1.0 or np.max(image) > 1.0:
|
||||
raise ValueError("Images of type float must be between -1 and 1.")
|
||||
# floating point -> integer
|
||||
# use float type that can represent output integer type
|
||||
computation_type = _dtype_itemsize(itemsize_out, dtype_in,
|
||||
np.float32, np.float64)
|
||||
|
||||
if not uniform:
|
||||
if kind_out == 'u':
|
||||
image_out = np.multiply(image, imax_out,
|
||||
dtype=computation_type)
|
||||
else:
|
||||
image_out = np.multiply(image, (imax_out - imin_out) / 2,
|
||||
dtype=computation_type)
|
||||
image_out -= 1.0 / 2.
|
||||
np.rint(image_out, out=image_out)
|
||||
np.clip(image_out, imin_out, imax_out, out=image_out)
|
||||
elif kind_out == 'u':
|
||||
image_out = np.multiply(image, imax_out + 1,
|
||||
dtype=computation_type)
|
||||
np.clip(image_out, 0, imax_out, out=image_out)
|
||||
else:
|
||||
image_out = np.multiply(image, (imax_out - imin_out + 1.0) / 2.0,
|
||||
dtype=computation_type)
|
||||
np.floor(image_out, out=image_out)
|
||||
np.clip(image_out, imin_out, imax_out, out=image_out)
|
||||
return image_out.astype(dtype_out)
|
||||
|
||||
# signed/unsigned int -> float
|
||||
if kind_out == 'f':
|
||||
# use float type that can exactly represent input integers
|
||||
computation_type = _dtype_itemsize(itemsize_in, dtype_out,
|
||||
np.float32, np.float64)
|
||||
|
||||
if kind_in == 'u':
|
||||
# using np.divide or np.multiply doesn't copy the data
|
||||
# until the computation time
|
||||
image = np.multiply(image, 1. / imax_in,
|
||||
dtype=computation_type)
|
||||
# DirectX uses this conversion also for signed ints
|
||||
# if imin_in:
|
||||
# np.maximum(image, -1.0, out=image)
|
||||
else:
|
||||
image = np.add(image, 0.5, dtype=computation_type)
|
||||
image *= 2 / (imax_in - imin_in)
|
||||
|
||||
return np.asarray(image, dtype_out)
|
||||
|
||||
# unsigned int -> signed/unsigned int
|
||||
if kind_in == 'u':
|
||||
if kind_out == 'i':
|
||||
# unsigned int -> signed int
|
||||
image = _scale(image, 8 * itemsize_in, 8 * itemsize_out - 1)
|
||||
return image.view(dtype_out)
|
||||
else:
|
||||
# unsigned int -> unsigned int
|
||||
return _scale(image, 8 * itemsize_in, 8 * itemsize_out)
|
||||
|
||||
# signed int -> unsigned int
|
||||
if kind_out == 'u':
|
||||
image = _scale(image, 8 * itemsize_in - 1, 8 * itemsize_out)
|
||||
result = np.empty(image.shape, dtype_out)
|
||||
np.maximum(image, 0, out=result, dtype=image.dtype, casting='unsafe')
|
||||
return result
|
||||
|
||||
# signed int -> signed int
|
||||
if itemsize_in > itemsize_out:
|
||||
return _scale(image, 8 * itemsize_in - 1, 8 * itemsize_out - 1)
|
||||
|
||||
image = image.astype(_dtype_bits('i', itemsize_out * 8))
|
||||
image -= imin_in
|
||||
image = _scale(image, 8 * itemsize_in, 8 * itemsize_out, copy=False)
|
||||
image += imin_out
|
||||
return image.astype(dtype_out)
|
||||
|
||||
|
||||
def convert(image, dtype, force_copy=False, uniform=False):
|
||||
warn("The use of this function is discouraged as its behavior may change "
|
||||
"dramatically in scikit-image 1.0. This function will be removed"
|
||||
"in scikit-image 1.0.", FutureWarning, stacklevel=2)
|
||||
return _convert(image=image, dtype=dtype,
|
||||
force_copy=force_copy, uniform=uniform)
|
||||
|
||||
|
||||
if _convert.__doc__ is not None:
|
||||
convert.__doc__ = _convert.__doc__ + """
|
||||
|
||||
Warns
|
||||
-----
|
||||
FutureWarning:
|
||||
.. versionadded:: 0.17
|
||||
|
||||
The use of this function is discouraged as its behavior may change
|
||||
dramatically in scikit-image 1.0. This function will be removed
|
||||
in scikit-image 1.0.
|
||||
"""
|
||||
|
||||
|
||||
def img_as_float32(image, force_copy=False):
|
||||
"""Convert an image to single-precision (32-bit) floating point format.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray
|
||||
Input image.
|
||||
force_copy : bool, optional
|
||||
Force a copy of the data, irrespective of its current dtype.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray of float32
|
||||
Output image.
|
||||
|
||||
Notes
|
||||
-----
|
||||
The range of a floating point image is [0.0, 1.0] or [-1.0, 1.0] when
|
||||
converting from unsigned or signed datatypes, respectively.
|
||||
If the input image has a float type, intensity values are not modified
|
||||
and can be outside the ranges [0.0, 1.0] or [-1.0, 1.0].
|
||||
|
||||
"""
|
||||
return _convert(image, np.float32, force_copy)
|
||||
|
||||
|
||||
def img_as_float64(image, force_copy=False):
|
||||
"""Convert an image to double-precision (64-bit) floating point format.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray
|
||||
Input image.
|
||||
force_copy : bool, optional
|
||||
Force a copy of the data, irrespective of its current dtype.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray of float64
|
||||
Output image.
|
||||
|
||||
Notes
|
||||
-----
|
||||
The range of a floating point image is [0.0, 1.0] or [-1.0, 1.0] when
|
||||
converting from unsigned or signed datatypes, respectively.
|
||||
If the input image has a float type, intensity values are not modified
|
||||
and can be outside the ranges [0.0, 1.0] or [-1.0, 1.0].
|
||||
|
||||
"""
|
||||
return _convert(image, np.float64, force_copy)
|
||||
|
||||
|
||||
def img_as_float(image, force_copy=False):
|
||||
"""Convert an image to floating point format.
|
||||
|
||||
This function is similar to `img_as_float64`, but will not convert
|
||||
lower-precision floating point arrays to `float64`.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray
|
||||
Input image.
|
||||
force_copy : bool, optional
|
||||
Force a copy of the data, irrespective of its current dtype.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray of float
|
||||
Output image.
|
||||
|
||||
Notes
|
||||
-----
|
||||
The range of a floating point image is [0.0, 1.0] or [-1.0, 1.0] when
|
||||
converting from unsigned or signed datatypes, respectively.
|
||||
If the input image has a float type, intensity values are not modified
|
||||
and can be outside the ranges [0.0, 1.0] or [-1.0, 1.0].
|
||||
|
||||
"""
|
||||
return _convert(image, np.floating, force_copy)
|
||||
|
||||
|
||||
def img_as_uint(image, force_copy=False):
|
||||
"""Convert an image to 16-bit unsigned integer format.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray
|
||||
Input image.
|
||||
force_copy : bool, optional
|
||||
Force a copy of the data, irrespective of its current dtype.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray of uint16
|
||||
Output image.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Negative input values will be clipped.
|
||||
Positive values are scaled between 0 and 65535.
|
||||
|
||||
"""
|
||||
return _convert(image, np.uint16, force_copy)
|
||||
|
||||
|
||||
def img_as_int(image, force_copy=False):
|
||||
"""Convert an image to 16-bit signed integer format.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray
|
||||
Input image.
|
||||
force_copy : bool, optional
|
||||
Force a copy of the data, irrespective of its current dtype.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray of uint16
|
||||
Output image.
|
||||
|
||||
Notes
|
||||
-----
|
||||
The values are scaled between -32768 and 32767.
|
||||
If the input data-type is positive-only (e.g., uint8), then
|
||||
the output image will still only have positive values.
|
||||
|
||||
"""
|
||||
return _convert(image, np.int16, force_copy)
|
||||
|
||||
|
||||
def img_as_ubyte(image, force_copy=False):
|
||||
"""Convert an image to 8-bit unsigned integer format.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray
|
||||
Input image.
|
||||
force_copy : bool, optional
|
||||
Force a copy of the data, irrespective of its current dtype.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray of ubyte (uint8)
|
||||
Output image.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Negative input values will be clipped.
|
||||
Positive values are scaled between 0 and 255.
|
||||
|
||||
"""
|
||||
return _convert(image, np.uint8, force_copy)
|
||||
|
||||
|
||||
def img_as_bool(image, force_copy=False):
|
||||
"""Convert an image to boolean format.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray
|
||||
Input image.
|
||||
force_copy : bool, optional
|
||||
Force a copy of the data, irrespective of its current dtype.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray of bool (`bool_`)
|
||||
Output image.
|
||||
|
||||
Notes
|
||||
-----
|
||||
The upper half of the input dtype's positive range is True, and the lower
|
||||
half is False. All negative values (if present) are False.
|
||||
|
||||
"""
|
||||
return _convert(image, np.bool_, force_copy)
|
24
venv/Lib/site-packages/skimage/util/lookfor.py
Normal file
24
venv/Lib/site-packages/skimage/util/lookfor.py
Normal file
|
@ -0,0 +1,24 @@
|
|||
import numpy as np
|
||||
import sys
|
||||
|
||||
|
||||
def lookfor(what):
|
||||
"""Do a keyword search on scikit-image docstrings.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
what : str
|
||||
Words to look for.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import skimage
|
||||
>>> skimage.lookfor('regular_grid')
|
||||
Search results for 'regular_grid'
|
||||
---------------------------------
|
||||
skimage.lookfor
|
||||
Do a keyword search on scikit-image docstrings.
|
||||
skimage.util.regular_grid
|
||||
Find `n_points` regularly spaced along `ar_shape`.
|
||||
"""
|
||||
return np.lookfor(what, sys.modules[__name__.split('.')[0]])
|
192
venv/Lib/site-packages/skimage/util/noise.py
Normal file
192
venv/Lib/site-packages/skimage/util/noise.py
Normal file
|
@ -0,0 +1,192 @@
|
|||
import numpy as np
|
||||
from .dtype import img_as_float
|
||||
|
||||
|
||||
__all__ = ['random_noise']
|
||||
|
||||
|
||||
def random_noise(image, mode='gaussian', seed=None, clip=True, **kwargs):
|
||||
"""
|
||||
Function to add random noise of various types to a floating-point image.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray
|
||||
Input image data. Will be converted to float.
|
||||
mode : str, optional
|
||||
One of the following strings, selecting the type of noise to add:
|
||||
|
||||
- 'gaussian' Gaussian-distributed additive noise.
|
||||
- 'localvar' Gaussian-distributed additive noise, with specified
|
||||
local variance at each point of `image`.
|
||||
- 'poisson' Poisson-distributed noise generated from the data.
|
||||
- 'salt' Replaces random pixels with 1.
|
||||
- 'pepper' Replaces random pixels with 0 (for unsigned images) or
|
||||
-1 (for signed images).
|
||||
- 's&p' Replaces random pixels with either 1 or `low_val`, where
|
||||
`low_val` is 0 for unsigned images or -1 for signed
|
||||
images.
|
||||
- 'speckle' Multiplicative noise using out = image + n*image, where
|
||||
n is uniform noise with specified mean & variance.
|
||||
seed : int, optional
|
||||
If provided, this will set the random seed before generating noise,
|
||||
for valid pseudo-random comparisons.
|
||||
clip : bool, optional
|
||||
If True (default), the output will be clipped after noise applied
|
||||
for modes `'speckle'`, `'poisson'`, and `'gaussian'`. This is
|
||||
needed to maintain the proper image data range. If False, clipping
|
||||
is not applied, and the output may extend beyond the range [-1, 1].
|
||||
mean : float, optional
|
||||
Mean of random distribution. Used in 'gaussian' and 'speckle'.
|
||||
Default : 0.
|
||||
var : float, optional
|
||||
Variance of random distribution. Used in 'gaussian' and 'speckle'.
|
||||
Note: variance = (standard deviation) ** 2. Default : 0.01
|
||||
local_vars : ndarray, optional
|
||||
Array of positive floats, same shape as `image`, defining the local
|
||||
variance at every image point. Used in 'localvar'.
|
||||
amount : float, optional
|
||||
Proportion of image pixels to replace with noise on range [0, 1].
|
||||
Used in 'salt', 'pepper', and 'salt & pepper'. Default : 0.05
|
||||
salt_vs_pepper : float, optional
|
||||
Proportion of salt vs. pepper noise for 's&p' on range [0, 1].
|
||||
Higher values represent more salt. Default : 0.5 (equal amounts)
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : ndarray
|
||||
Output floating-point image data on range [0, 1] or [-1, 1] if the
|
||||
input `image` was unsigned or signed, respectively.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Speckle, Poisson, Localvar, and Gaussian noise may generate noise outside
|
||||
the valid image range. The default is to clip (not alias) these values,
|
||||
but they may be preserved by setting `clip=False`. Note that in this case
|
||||
the output may contain values outside the ranges [0, 1] or [-1, 1].
|
||||
Use this option with care.
|
||||
|
||||
Because of the prevalence of exclusively positive floating-point images in
|
||||
intermediate calculations, it is not possible to intuit if an input is
|
||||
signed based on dtype alone. Instead, negative values are explicitly
|
||||
searched for. Only if found does this function assume signed input.
|
||||
Unexpected results only occur in rare, poorly exposes cases (e.g. if all
|
||||
values are above 50 percent gray in a signed `image`). In this event,
|
||||
manually scaling the input to the positive domain will solve the problem.
|
||||
|
||||
The Poisson distribution is only defined for positive integers. To apply
|
||||
this noise type, the number of unique values in the image is found and
|
||||
the next round power of two is used to scale up the floating-point result,
|
||||
after which it is scaled back down to the floating-point image range.
|
||||
|
||||
To generate Poisson noise against a signed image, the signed image is
|
||||
temporarily converted to an unsigned image in the floating point domain,
|
||||
Poisson noise is generated, then it is returned to the original range.
|
||||
|
||||
"""
|
||||
mode = mode.lower()
|
||||
|
||||
# Detect if a signed image was input
|
||||
if image.min() < 0:
|
||||
low_clip = -1.
|
||||
else:
|
||||
low_clip = 0.
|
||||
|
||||
image = img_as_float(image)
|
||||
if seed is not None:
|
||||
np.random.seed(seed=seed)
|
||||
|
||||
allowedtypes = {
|
||||
'gaussian': 'gaussian_values',
|
||||
'localvar': 'localvar_values',
|
||||
'poisson': 'poisson_values',
|
||||
'salt': 'sp_values',
|
||||
'pepper': 'sp_values',
|
||||
's&p': 's&p_values',
|
||||
'speckle': 'gaussian_values'}
|
||||
|
||||
kwdefaults = {
|
||||
'mean': 0.,
|
||||
'var': 0.01,
|
||||
'amount': 0.05,
|
||||
'salt_vs_pepper': 0.5,
|
||||
'local_vars': np.zeros_like(image) + 0.01}
|
||||
|
||||
allowedkwargs = {
|
||||
'gaussian_values': ['mean', 'var'],
|
||||
'localvar_values': ['local_vars'],
|
||||
'sp_values': ['amount'],
|
||||
's&p_values': ['amount', 'salt_vs_pepper'],
|
||||
'poisson_values': []}
|
||||
|
||||
for key in kwargs:
|
||||
if key not in allowedkwargs[allowedtypes[mode]]:
|
||||
raise ValueError('%s keyword not in allowed keywords %s' %
|
||||
(key, allowedkwargs[allowedtypes[mode]]))
|
||||
|
||||
# Set kwarg defaults
|
||||
for kw in allowedkwargs[allowedtypes[mode]]:
|
||||
kwargs.setdefault(kw, kwdefaults[kw])
|
||||
|
||||
if mode == 'gaussian':
|
||||
noise = np.random.normal(kwargs['mean'], kwargs['var'] ** 0.5,
|
||||
image.shape)
|
||||
out = image + noise
|
||||
|
||||
elif mode == 'localvar':
|
||||
# Ensure local variance input is correct
|
||||
if (kwargs['local_vars'] <= 0).any():
|
||||
raise ValueError('All values of `local_vars` must be > 0.')
|
||||
|
||||
# Safe shortcut usage broadcasts kwargs['local_vars'] as a ufunc
|
||||
out = image + np.random.normal(0, kwargs['local_vars'] ** 0.5)
|
||||
|
||||
elif mode == 'poisson':
|
||||
# Determine unique values in image & calculate the next power of two
|
||||
vals = len(np.unique(image))
|
||||
vals = 2 ** np.ceil(np.log2(vals))
|
||||
|
||||
# Ensure image is exclusively positive
|
||||
if low_clip == -1.:
|
||||
old_max = image.max()
|
||||
image = (image + 1.) / (old_max + 1.)
|
||||
|
||||
# Generating noise for each unique value in image.
|
||||
out = np.random.poisson(image * vals) / float(vals)
|
||||
|
||||
# Return image to original range if input was signed
|
||||
if low_clip == -1.:
|
||||
out = out * (old_max + 1.) - 1.
|
||||
|
||||
elif mode == 'salt':
|
||||
# Re-call function with mode='s&p' and p=1 (all salt noise)
|
||||
out = random_noise(image, mode='s&p', seed=seed,
|
||||
amount=kwargs['amount'], salt_vs_pepper=1.)
|
||||
|
||||
elif mode == 'pepper':
|
||||
# Re-call function with mode='s&p' and p=1 (all pepper noise)
|
||||
out = random_noise(image, mode='s&p', seed=seed,
|
||||
amount=kwargs['amount'], salt_vs_pepper=0.)
|
||||
|
||||
elif mode == 's&p':
|
||||
out = image.copy()
|
||||
p = kwargs['amount']
|
||||
q = kwargs['salt_vs_pepper']
|
||||
flipped = np.random.choice([True, False], size=image.shape,
|
||||
p=[p, 1 - p])
|
||||
salted = np.random.choice([True, False], size=image.shape,
|
||||
p=[q, 1 - q])
|
||||
peppered = ~salted
|
||||
out[flipped & salted] = 1
|
||||
out[flipped & peppered] = low_clip
|
||||
|
||||
elif mode == 'speckle':
|
||||
noise = np.random.normal(kwargs['mean'], kwargs['var'] ** 0.5,
|
||||
image.shape)
|
||||
out = image + image * noise
|
||||
|
||||
# Clip back to original range, if necessary
|
||||
if clip:
|
||||
out = np.clip(out, low_clip, 1.0)
|
||||
|
||||
return out
|
32
venv/Lib/site-packages/skimage/util/setup.py
Normal file
32
venv/Lib/site-packages/skimage/util/setup.py
Normal file
|
@ -0,0 +1,32 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
from skimage._build import cython
|
||||
|
||||
base_path = os.path.abspath(os.path.dirname(__file__))
|
||||
|
||||
|
||||
def configuration(parent_package='', top_path=None):
|
||||
from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
|
||||
|
||||
config = Configuration('util', parent_package, top_path)
|
||||
|
||||
cython(['_remap.pyx'], working_path=base_path)
|
||||
# note: the extra compiler flag -std=c++0x is needed to access the
|
||||
# std::unordered_map container on some earlier gcc compilers. See:
|
||||
# https://stackoverflow.com/a/3973692/224254
|
||||
config.add_extension('_remap', sources='_remap.cpp',
|
||||
include_dirs=[get_numpy_include_dirs()],
|
||||
language='c++', extra_compile_args=['-std=c++0x'])
|
||||
|
||||
return config
|
||||
|
||||
if __name__ == '__main__':
|
||||
from numpy.distutils.core import setup
|
||||
setup(maintainer='scikit-image Developers',
|
||||
maintainer_email='scikit-image@python.org',
|
||||
description='Segmentation Algorithms',
|
||||
url='https://github.com/scikit-image/scikit-image',
|
||||
license='SciPy License (BSD Style)',
|
||||
**(configuration(top_path='').todict())
|
||||
)
|
248
venv/Lib/site-packages/skimage/util/shape.py
Normal file
248
venv/Lib/site-packages/skimage/util/shape.py
Normal file
|
@ -0,0 +1,248 @@
|
|||
import numbers
|
||||
import numpy as np
|
||||
from numpy.lib.stride_tricks import as_strided
|
||||
from warnings import warn
|
||||
|
||||
__all__ = ['view_as_blocks', 'view_as_windows']
|
||||
|
||||
|
||||
def view_as_blocks(arr_in, block_shape):
|
||||
"""Block view of the input n-dimensional array (using re-striding).
|
||||
|
||||
Blocks are non-overlapping views of the input array.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
arr_in : ndarray
|
||||
N-d input array.
|
||||
block_shape : tuple
|
||||
The shape of the block. Each dimension must divide evenly into the
|
||||
corresponding dimensions of `arr_in`.
|
||||
|
||||
Returns
|
||||
-------
|
||||
arr_out : ndarray
|
||||
Block view of the input array.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> from skimage.util.shape import view_as_blocks
|
||||
>>> A = np.arange(4*4).reshape(4,4)
|
||||
>>> A
|
||||
array([[ 0, 1, 2, 3],
|
||||
[ 4, 5, 6, 7],
|
||||
[ 8, 9, 10, 11],
|
||||
[12, 13, 14, 15]])
|
||||
>>> B = view_as_blocks(A, block_shape=(2, 2))
|
||||
>>> B[0, 0]
|
||||
array([[0, 1],
|
||||
[4, 5]])
|
||||
>>> B[0, 1]
|
||||
array([[2, 3],
|
||||
[6, 7]])
|
||||
>>> B[1, 0, 1, 1]
|
||||
13
|
||||
|
||||
>>> A = np.arange(4*4*6).reshape(4,4,6)
|
||||
>>> A # doctest: +NORMALIZE_WHITESPACE
|
||||
array([[[ 0, 1, 2, 3, 4, 5],
|
||||
[ 6, 7, 8, 9, 10, 11],
|
||||
[12, 13, 14, 15, 16, 17],
|
||||
[18, 19, 20, 21, 22, 23]],
|
||||
[[24, 25, 26, 27, 28, 29],
|
||||
[30, 31, 32, 33, 34, 35],
|
||||
[36, 37, 38, 39, 40, 41],
|
||||
[42, 43, 44, 45, 46, 47]],
|
||||
[[48, 49, 50, 51, 52, 53],
|
||||
[54, 55, 56, 57, 58, 59],
|
||||
[60, 61, 62, 63, 64, 65],
|
||||
[66, 67, 68, 69, 70, 71]],
|
||||
[[72, 73, 74, 75, 76, 77],
|
||||
[78, 79, 80, 81, 82, 83],
|
||||
[84, 85, 86, 87, 88, 89],
|
||||
[90, 91, 92, 93, 94, 95]]])
|
||||
>>> B = view_as_blocks(A, block_shape=(1, 2, 2))
|
||||
>>> B.shape
|
||||
(4, 2, 3, 1, 2, 2)
|
||||
>>> B[2:, 0, 2] # doctest: +NORMALIZE_WHITESPACE
|
||||
array([[[[52, 53],
|
||||
[58, 59]]],
|
||||
[[[76, 77],
|
||||
[82, 83]]]])
|
||||
"""
|
||||
if not isinstance(block_shape, tuple):
|
||||
raise TypeError('block needs to be a tuple')
|
||||
|
||||
block_shape = np.array(block_shape)
|
||||
if (block_shape <= 0).any():
|
||||
raise ValueError("'block_shape' elements must be strictly positive")
|
||||
|
||||
if block_shape.size != arr_in.ndim:
|
||||
raise ValueError("'block_shape' must have the same length "
|
||||
"as 'arr_in.shape'")
|
||||
|
||||
arr_shape = np.array(arr_in.shape)
|
||||
if (arr_shape % block_shape).sum() != 0:
|
||||
raise ValueError("'block_shape' is not compatible with 'arr_in'")
|
||||
|
||||
# -- restride the array to build the block view
|
||||
new_shape = tuple(arr_shape // block_shape) + tuple(block_shape)
|
||||
new_strides = tuple(arr_in.strides * block_shape) + arr_in.strides
|
||||
|
||||
arr_out = as_strided(arr_in, shape=new_shape, strides=new_strides)
|
||||
|
||||
return arr_out
|
||||
|
||||
|
||||
def view_as_windows(arr_in, window_shape, step=1):
|
||||
"""Rolling window view of the input n-dimensional array.
|
||||
|
||||
Windows are overlapping views of the input array, with adjacent windows
|
||||
shifted by a single row or column (or an index of a higher dimension).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
arr_in : ndarray
|
||||
N-d input array.
|
||||
window_shape : integer or tuple of length arr_in.ndim
|
||||
Defines the shape of the elementary n-dimensional orthotope
|
||||
(better know as hyperrectangle [1]_) of the rolling window view.
|
||||
If an integer is given, the shape will be a hypercube of
|
||||
sidelength given by its value.
|
||||
step : integer or tuple of length arr_in.ndim
|
||||
Indicates step size at which extraction shall be performed.
|
||||
If integer is given, then the step is uniform in all dimensions.
|
||||
|
||||
Returns
|
||||
-------
|
||||
arr_out : ndarray
|
||||
(rolling) window view of the input array.
|
||||
|
||||
Notes
|
||||
-----
|
||||
One should be very careful with rolling views when it comes to
|
||||
memory usage. Indeed, although a 'view' has the same memory
|
||||
footprint as its base array, the actual array that emerges when this
|
||||
'view' is used in a computation is generally a (much) larger array
|
||||
than the original, especially for 2-dimensional arrays and above.
|
||||
|
||||
For example, let us consider a 3 dimensional array of size (100,
|
||||
100, 100) of ``float64``. This array takes about 8*100**3 Bytes for
|
||||
storage which is just 8 MB. If one decides to build a rolling view
|
||||
on this array with a window of (3, 3, 3) the hypothetical size of
|
||||
the rolling view (if one was to reshape the view for example) would
|
||||
be 8*(100-3+1)**3*3**3 which is about 203 MB! The scaling becomes
|
||||
even worse as the dimension of the input array becomes larger.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] https://en.wikipedia.org/wiki/Hyperrectangle
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> from skimage.util.shape import view_as_windows
|
||||
>>> A = np.arange(4*4).reshape(4,4)
|
||||
>>> A
|
||||
array([[ 0, 1, 2, 3],
|
||||
[ 4, 5, 6, 7],
|
||||
[ 8, 9, 10, 11],
|
||||
[12, 13, 14, 15]])
|
||||
>>> window_shape = (2, 2)
|
||||
>>> B = view_as_windows(A, window_shape)
|
||||
>>> B[0, 0]
|
||||
array([[0, 1],
|
||||
[4, 5]])
|
||||
>>> B[0, 1]
|
||||
array([[1, 2],
|
||||
[5, 6]])
|
||||
|
||||
>>> A = np.arange(10)
|
||||
>>> A
|
||||
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
|
||||
>>> window_shape = (3,)
|
||||
>>> B = view_as_windows(A, window_shape)
|
||||
>>> B.shape
|
||||
(8, 3)
|
||||
>>> B
|
||||
array([[0, 1, 2],
|
||||
[1, 2, 3],
|
||||
[2, 3, 4],
|
||||
[3, 4, 5],
|
||||
[4, 5, 6],
|
||||
[5, 6, 7],
|
||||
[6, 7, 8],
|
||||
[7, 8, 9]])
|
||||
|
||||
>>> A = np.arange(5*4).reshape(5, 4)
|
||||
>>> A
|
||||
array([[ 0, 1, 2, 3],
|
||||
[ 4, 5, 6, 7],
|
||||
[ 8, 9, 10, 11],
|
||||
[12, 13, 14, 15],
|
||||
[16, 17, 18, 19]])
|
||||
>>> window_shape = (4, 3)
|
||||
>>> B = view_as_windows(A, window_shape)
|
||||
>>> B.shape
|
||||
(2, 2, 4, 3)
|
||||
>>> B # doctest: +NORMALIZE_WHITESPACE
|
||||
array([[[[ 0, 1, 2],
|
||||
[ 4, 5, 6],
|
||||
[ 8, 9, 10],
|
||||
[12, 13, 14]],
|
||||
[[ 1, 2, 3],
|
||||
[ 5, 6, 7],
|
||||
[ 9, 10, 11],
|
||||
[13, 14, 15]]],
|
||||
[[[ 4, 5, 6],
|
||||
[ 8, 9, 10],
|
||||
[12, 13, 14],
|
||||
[16, 17, 18]],
|
||||
[[ 5, 6, 7],
|
||||
[ 9, 10, 11],
|
||||
[13, 14, 15],
|
||||
[17, 18, 19]]]])
|
||||
"""
|
||||
|
||||
# -- basic checks on arguments
|
||||
if not isinstance(arr_in, np.ndarray):
|
||||
raise TypeError("`arr_in` must be a numpy ndarray")
|
||||
|
||||
ndim = arr_in.ndim
|
||||
|
||||
if isinstance(window_shape, numbers.Number):
|
||||
window_shape = (window_shape,) * ndim
|
||||
if not (len(window_shape) == ndim):
|
||||
raise ValueError("`window_shape` is incompatible with `arr_in.shape`")
|
||||
|
||||
if isinstance(step, numbers.Number):
|
||||
if step < 1:
|
||||
raise ValueError("`step` must be >= 1")
|
||||
step = (step,) * ndim
|
||||
if len(step) != ndim:
|
||||
raise ValueError("`step` is incompatible with `arr_in.shape`")
|
||||
|
||||
arr_shape = np.array(arr_in.shape)
|
||||
window_shape = np.array(window_shape, dtype=arr_shape.dtype)
|
||||
|
||||
if ((arr_shape - window_shape) < 0).any():
|
||||
raise ValueError("`window_shape` is too large")
|
||||
|
||||
if ((window_shape - 1) < 0).any():
|
||||
raise ValueError("`window_shape` is too small")
|
||||
|
||||
# -- build rolling window view
|
||||
slices = tuple(slice(None, None, st) for st in step)
|
||||
window_strides = np.array(arr_in.strides)
|
||||
|
||||
indexing_strides = arr_in[slices].strides
|
||||
|
||||
win_indices_shape = (((np.array(arr_in.shape) - np.array(window_shape))
|
||||
// np.array(step)) + 1)
|
||||
|
||||
new_shape = tuple(list(win_indices_shape) + list(window_shape))
|
||||
strides = tuple(list(indexing_strides) + list(window_strides))
|
||||
|
||||
arr_out = as_strided(arr_in, shape=new_shape, strides=strides)
|
||||
return arr_out
|
9
venv/Lib/site-packages/skimage/util/tests/__init__.py
Normal file
9
venv/Lib/site-packages/skimage/util/tests/__init__.py
Normal file
|
@ -0,0 +1,9 @@
|
|||
from ..._shared.testing import setup_test, teardown_test
|
||||
|
||||
|
||||
def setup():
|
||||
setup_test()
|
||||
|
||||
|
||||
def teardown():
|
||||
teardown_test()
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,96 @@
|
|||
import numpy as np
|
||||
|
||||
from skimage._shared.testing import assert_array_almost_equal
|
||||
from skimage.filters import threshold_local, gaussian
|
||||
from skimage.util.apply_parallel import apply_parallel
|
||||
|
||||
import pytest
|
||||
da = pytest.importorskip('dask.array')
|
||||
|
||||
|
||||
def test_apply_parallel():
|
||||
# data
|
||||
a = np.arange(144).reshape(12, 12).astype(float)
|
||||
|
||||
# apply the filter
|
||||
expected1 = threshold_local(a, 3)
|
||||
result1 = apply_parallel(threshold_local, a, chunks=(6, 6), depth=5,
|
||||
extra_arguments=(3,),
|
||||
extra_keywords={'mode': 'reflect'})
|
||||
|
||||
assert_array_almost_equal(result1, expected1)
|
||||
|
||||
def wrapped_gauss(arr):
|
||||
return gaussian(arr, 1, mode='reflect')
|
||||
|
||||
expected2 = gaussian(a, 1, mode='reflect')
|
||||
result2 = apply_parallel(wrapped_gauss, a, chunks=(6, 6), depth=5)
|
||||
|
||||
assert_array_almost_equal(result2, expected2)
|
||||
|
||||
expected3 = gaussian(a, 1, mode='reflect')
|
||||
result3 = apply_parallel(
|
||||
wrapped_gauss, da.from_array(a, chunks=(6, 6)), depth=5, compute=True
|
||||
)
|
||||
|
||||
assert isinstance(result3, np.ndarray)
|
||||
assert_array_almost_equal(result3, expected3)
|
||||
|
||||
|
||||
def test_apply_parallel_lazy():
|
||||
# data
|
||||
a = np.arange(144).reshape(12, 12).astype(float)
|
||||
d = da.from_array(a, chunks=(6, 6))
|
||||
|
||||
# apply the filter
|
||||
expected1 = threshold_local(a, 3)
|
||||
result1 = apply_parallel(threshold_local, a, chunks=(6, 6), depth=5,
|
||||
extra_arguments=(3,),
|
||||
extra_keywords={'mode': 'reflect'},
|
||||
compute=False)
|
||||
|
||||
# apply the filter on a Dask Array
|
||||
result2 = apply_parallel(threshold_local, d, depth=5,
|
||||
extra_arguments=(3,),
|
||||
extra_keywords={'mode': 'reflect'})
|
||||
|
||||
assert isinstance(result1, da.Array)
|
||||
|
||||
assert_array_almost_equal(result1.compute(), expected1)
|
||||
|
||||
assert isinstance(result2, da.Array)
|
||||
|
||||
assert_array_almost_equal(result2.compute(), expected1)
|
||||
|
||||
|
||||
def test_no_chunks():
|
||||
a = np.ones(1 * 4 * 8 * 9).reshape(1, 4, 8, 9)
|
||||
|
||||
def add_42(arr):
|
||||
return arr + 42
|
||||
|
||||
expected = add_42(a)
|
||||
result = apply_parallel(add_42, a)
|
||||
|
||||
assert_array_almost_equal(result, expected)
|
||||
|
||||
|
||||
def test_apply_parallel_wrap():
|
||||
def wrapped(arr):
|
||||
return gaussian(arr, 1, mode='wrap')
|
||||
a = np.arange(144).reshape(12, 12).astype(float)
|
||||
expected = gaussian(a, 1, mode='wrap')
|
||||
result = apply_parallel(wrapped, a, chunks=(6, 6), depth=5, mode='wrap')
|
||||
|
||||
assert_array_almost_equal(result, expected)
|
||||
|
||||
|
||||
def test_apply_parallel_nearest():
|
||||
def wrapped(arr):
|
||||
return gaussian(arr, 1, mode='nearest')
|
||||
a = np.arange(144).reshape(12, 12).astype(float)
|
||||
expected = gaussian(a, 1, mode='nearest')
|
||||
result = apply_parallel(wrapped, a, chunks=(6, 6), depth={0: 5, 1: 5},
|
||||
mode='nearest')
|
||||
|
||||
assert_array_almost_equal(result, expected)
|
48
venv/Lib/site-packages/skimage/util/tests/test_arraycrop.py
Normal file
48
venv/Lib/site-packages/skimage/util/tests/test_arraycrop.py
Normal file
|
@ -0,0 +1,48 @@
|
|||
|
||||
import numpy as np
|
||||
from skimage.util import crop
|
||||
from skimage._shared.testing import (assert_array_equal, assert_equal)
|
||||
|
||||
|
||||
def test_multi_crop():
|
||||
arr = np.arange(45).reshape(9, 5)
|
||||
out = crop(arr, ((1, 2), (2, 1)))
|
||||
assert_array_equal(out[0], [7, 8])
|
||||
assert_array_equal(out[-1], [32, 33])
|
||||
assert_equal(out.shape, (6, 2))
|
||||
|
||||
|
||||
def test_pair_crop():
|
||||
arr = np.arange(45).reshape(9, 5)
|
||||
out = crop(arr, (1, 2))
|
||||
assert_array_equal(out[0], [6, 7])
|
||||
assert_array_equal(out[-1], [31, 32])
|
||||
assert_equal(out.shape, (6, 2))
|
||||
|
||||
|
||||
def test_int_crop():
|
||||
arr = np.arange(45).reshape(9, 5)
|
||||
out = crop(arr, 1)
|
||||
assert_array_equal(out[0], [6, 7, 8])
|
||||
assert_array_equal(out[-1], [36, 37, 38])
|
||||
assert_equal(out.shape, (7, 3))
|
||||
|
||||
|
||||
def test_copy_crop():
|
||||
arr = np.arange(45).reshape(9, 5)
|
||||
out0 = crop(arr, 1, copy=True)
|
||||
assert out0.flags.c_contiguous
|
||||
out0[0, 0] = 100
|
||||
assert not np.any(arr == 100)
|
||||
assert not np.may_share_memory(arr, out0)
|
||||
|
||||
out1 = crop(arr, 1)
|
||||
out1[0, 0] = 100
|
||||
assert arr[1, 1] == 100
|
||||
assert np.may_share_memory(arr, out1)
|
||||
|
||||
|
||||
def test_zero_crop():
|
||||
arr = np.arange(45).reshape(9, 5)
|
||||
out = crop(arr, 0)
|
||||
assert out.shape == (9, 5)
|
1060
venv/Lib/site-packages/skimage/util/tests/test_arraypad.py
Normal file
1060
venv/Lib/site-packages/skimage/util/tests/test_arraypad.py
Normal file
File diff suppressed because it is too large
Load diff
64
venv/Lib/site-packages/skimage/util/tests/test_compare.py
Normal file
64
venv/Lib/site-packages/skimage/util/tests/test_compare.py
Normal file
|
@ -0,0 +1,64 @@
|
|||
import numpy as np
|
||||
|
||||
from skimage._shared.testing import assert_array_equal
|
||||
from skimage._shared import testing
|
||||
|
||||
from skimage.util.compare import compare_images
|
||||
|
||||
|
||||
def test_compate_images_ValueError_shape():
|
||||
img1 = np.zeros((10, 10), dtype=np.uint8)
|
||||
img2 = np.zeros((10, 1), dtype=np.uint8)
|
||||
with testing.raises(ValueError):
|
||||
compare_images(img1, img2)
|
||||
|
||||
|
||||
def test_compare_images_diff():
|
||||
img1 = np.zeros((10, 10), dtype=np.uint8)
|
||||
img1[3:8, 3:8] = 255
|
||||
img2 = np.zeros_like(img1)
|
||||
img2[3:8, 0:8] = 255
|
||||
expected_result = np.zeros_like(img1, dtype=np.float64)
|
||||
expected_result[3:8, 0:3] = 1
|
||||
result = compare_images(img1, img2, method='diff')
|
||||
assert_array_equal(result, expected_result)
|
||||
|
||||
|
||||
def test_compare_images_blend():
|
||||
img1 = np.zeros((10, 10), dtype=np.uint8)
|
||||
img1[3:8, 3:8] = 255
|
||||
img2 = np.zeros_like(img1)
|
||||
img2[3:8, 0:8] = 255
|
||||
expected_result = np.zeros_like(img1, dtype=np.float64)
|
||||
expected_result[3:8, 3:8] = 1
|
||||
expected_result[3:8, 0:3] = 0.5
|
||||
result = compare_images(img1, img2, method='blend')
|
||||
assert_array_equal(result, expected_result)
|
||||
|
||||
|
||||
def test_compare_images_checkerboard_default():
|
||||
img1 = np.zeros((2**4, 2**4), dtype=np.uint8)
|
||||
img2 = np.full(img1.shape, fill_value=255, dtype=np.uint8)
|
||||
res = compare_images(img1, img2, method='checkerboard')
|
||||
exp_row1 = np.array([0., 0., 1., 1., 0., 0., 1., 1., 0., 0., 1., 1., 0., 0., 1., 1.])
|
||||
exp_row2 = np.array([1., 1., 0., 0., 1., 1., 0., 0., 1., 1., 0., 0., 1., 1., 0., 0.])
|
||||
for i in (0, 1, 4, 5, 8, 9, 12, 13):
|
||||
assert_array_equal(res[i, :], exp_row1)
|
||||
for i in (2, 3, 6, 7, 10, 11, 14, 15):
|
||||
assert_array_equal(res[i, :], exp_row2)
|
||||
|
||||
|
||||
def test_compare_images_checkerboard_tuple():
|
||||
img1 = np.zeros((2**4, 2**4), dtype=np.uint8)
|
||||
img2 = np.full(img1.shape, fill_value=255, dtype=np.uint8)
|
||||
res = compare_images(img1, img2, method='checkerboard', n_tiles=(4, 8))
|
||||
exp_row1 = np.array(
|
||||
[0., 0., 1., 1., 0., 0., 1., 1., 0., 0., 1., 1., 0., 0., 1., 1.]
|
||||
)
|
||||
exp_row2 = np.array(
|
||||
[1., 1., 0., 0., 1., 1., 0., 0., 1., 1., 0., 0., 1., 1., 0., 0.]
|
||||
)
|
||||
for i in (0, 1, 2, 3, 8, 9, 10, 11):
|
||||
assert_array_equal(res[i, :], exp_row1)
|
||||
for i in (4, 5, 6, 7, 12, 13, 14, 15):
|
||||
assert_array_equal(res[i, :], exp_row2)
|
193
venv/Lib/site-packages/skimage/util/tests/test_dtype.py
Normal file
193
venv/Lib/site-packages/skimage/util/tests/test_dtype.py
Normal file
|
@ -0,0 +1,193 @@
|
|||
import warnings
|
||||
|
||||
import numpy as np
|
||||
import itertools
|
||||
from skimage import (img_as_float, img_as_float32, img_as_float64,
|
||||
img_as_int, img_as_uint, img_as_ubyte)
|
||||
from skimage.util.dtype import _convert
|
||||
|
||||
from skimage._shared._warnings import expected_warnings
|
||||
from skimage._shared import testing
|
||||
from skimage._shared.testing import assert_equal, parametrize
|
||||
|
||||
|
||||
dtype_range = {np.uint8: (0, 255),
|
||||
np.uint16: (0, 65535),
|
||||
np.int8: (-128, 127),
|
||||
np.int16: (-32768, 32767),
|
||||
np.float32: (-1.0, 1.0),
|
||||
np.float64: (-1.0, 1.0)}
|
||||
|
||||
|
||||
img_funcs = (img_as_int, img_as_float64, img_as_float32,
|
||||
img_as_uint, img_as_ubyte)
|
||||
dtypes_for_img_funcs = (np.int16, np.float64, np.float32, np.uint16, np.ubyte)
|
||||
img_funcs_and_types = zip(img_funcs, dtypes_for_img_funcs)
|
||||
|
||||
|
||||
def _verify_range(msg, x, vmin, vmax, dtype):
|
||||
assert_equal(x[0], vmin)
|
||||
assert_equal(x[-1], vmax)
|
||||
assert x.dtype == dtype
|
||||
|
||||
|
||||
@parametrize("dtype, f_and_dt",
|
||||
itertools.product(dtype_range, img_funcs_and_types))
|
||||
def test_range(dtype, f_and_dt):
|
||||
imin, imax = dtype_range[dtype]
|
||||
x = np.linspace(imin, imax, 10).astype(dtype)
|
||||
|
||||
f, dt = f_and_dt
|
||||
|
||||
y = f(x)
|
||||
|
||||
omin, omax = dtype_range[dt]
|
||||
|
||||
if imin == 0 or omin == 0:
|
||||
omin = 0
|
||||
imin = 0
|
||||
|
||||
_verify_range("From %s to %s" % (np.dtype(dtype), np.dtype(dt)),
|
||||
y, omin, omax, np.dtype(dt))
|
||||
|
||||
|
||||
# Add non-standard data types that are allowed by the `_convert` function.
|
||||
dtype_range_extra = dtype_range.copy()
|
||||
dtype_range_extra.update({np.int32: (-2147483648, 2147483647),
|
||||
np.uint32: (0, 4294967295)})
|
||||
|
||||
dtype_pairs = [(np.uint8, np.uint32),
|
||||
(np.int8, np.uint32),
|
||||
(np.int8, np.int32),
|
||||
(np.int32, np.int8),
|
||||
(np.float64, np.float32),
|
||||
(np.int32, np.float32)]
|
||||
|
||||
|
||||
@parametrize("dtype_in, dt", dtype_pairs)
|
||||
def test_range_extra_dtypes(dtype_in, dt):
|
||||
"""Test code paths that are not skipped by `test_range`"""
|
||||
|
||||
imin, imax = dtype_range_extra[dtype_in]
|
||||
x = np.linspace(imin, imax, 10).astype(dtype_in)
|
||||
|
||||
y = _convert(x, dt)
|
||||
|
||||
omin, omax = dtype_range_extra[dt]
|
||||
_verify_range("From %s to %s" % (np.dtype(dtype_in), np.dtype(dt)),
|
||||
y, omin, omax, np.dtype(dt))
|
||||
|
||||
|
||||
def test_downcast():
|
||||
x = np.arange(10).astype(np.uint64)
|
||||
with expected_warnings(['Downcasting']):
|
||||
y = img_as_int(x)
|
||||
assert np.allclose(y, x.astype(np.int16))
|
||||
assert y.dtype == np.int16, y.dtype
|
||||
|
||||
|
||||
def test_float_out_of_range():
|
||||
too_high = np.array([2], dtype=np.float32)
|
||||
with testing.raises(ValueError):
|
||||
img_as_int(too_high)
|
||||
too_low = np.array([-2], dtype=np.float32)
|
||||
with testing.raises(ValueError):
|
||||
img_as_int(too_low)
|
||||
|
||||
|
||||
def test_float_float_all_ranges():
|
||||
arr_in = np.array([[-10., 10., 1e20]], dtype=np.float32)
|
||||
np.testing.assert_array_equal(img_as_float(arr_in), arr_in)
|
||||
|
||||
|
||||
def test_copy():
|
||||
x = np.array([1], dtype=np.float64)
|
||||
y = img_as_float(x)
|
||||
z = img_as_float(x, force_copy=True)
|
||||
|
||||
assert y is x
|
||||
assert z is not x
|
||||
|
||||
|
||||
def test_bool():
|
||||
img_ = np.zeros((10, 10), np.bool_)
|
||||
img8 = np.zeros((10, 10), np.bool8)
|
||||
img_[1, 1] = True
|
||||
img8[1, 1] = True
|
||||
for (func, dt) in [(img_as_int, np.int16),
|
||||
(img_as_float, np.float64),
|
||||
(img_as_uint, np.uint16),
|
||||
(img_as_ubyte, np.ubyte)]:
|
||||
converted_ = func(img_)
|
||||
assert np.sum(converted_) == dtype_range[dt][1]
|
||||
converted8 = func(img8)
|
||||
assert np.sum(converted8) == dtype_range[dt][1]
|
||||
|
||||
|
||||
def test_clobber():
|
||||
# The `img_as_*` functions should never modify input arrays.
|
||||
for func_input_type in img_funcs:
|
||||
for func_output_type in img_funcs:
|
||||
img = np.random.rand(5, 5)
|
||||
|
||||
img_in = func_input_type(img)
|
||||
img_in_before = img_in.copy()
|
||||
func_output_type(img_in)
|
||||
|
||||
assert_equal(img_in, img_in_before)
|
||||
|
||||
|
||||
def test_signed_scaling_float32():
|
||||
x = np.array([-128, 127], dtype=np.int8)
|
||||
y = img_as_float32(x)
|
||||
assert_equal(y.max(), 1)
|
||||
|
||||
|
||||
def test_float32_passthrough():
|
||||
x = np.array([-1, 1], dtype=np.float32)
|
||||
y = img_as_float(x)
|
||||
assert_equal(y.dtype, x.dtype)
|
||||
|
||||
|
||||
float_dtype_list = [float, np.float, np.double, np.single, np.float32,
|
||||
np.float64, 'float32', 'float64']
|
||||
|
||||
|
||||
def test_float_conversion_dtype():
|
||||
"""Test any convertion from a float dtype to an other."""
|
||||
x = np.array([-1, 1])
|
||||
|
||||
# Test all combinations of dtypes convertions
|
||||
dtype_combin = np.array(np.meshgrid(float_dtype_list,
|
||||
float_dtype_list)).T.reshape(-1, 2)
|
||||
|
||||
for dtype_in, dtype_out in dtype_combin:
|
||||
x = x.astype(dtype_in)
|
||||
y = _convert(x, dtype_out)
|
||||
assert y.dtype == np.dtype(dtype_out)
|
||||
|
||||
|
||||
def test_float_conversion_dtype_warns():
|
||||
"""Test that convert issues a warning when called"""
|
||||
from skimage.util.dtype import convert
|
||||
x = np.array([-1, 1])
|
||||
|
||||
# Test all combinations of dtypes convertions
|
||||
dtype_combin = np.array(np.meshgrid(float_dtype_list,
|
||||
float_dtype_list)).T.reshape(-1, 2)
|
||||
|
||||
for dtype_in, dtype_out in dtype_combin:
|
||||
x = x.astype(dtype_in)
|
||||
with expected_warnings(["The use of this function is discouraged"]):
|
||||
y = convert(x, dtype_out)
|
||||
assert y.dtype == np.dtype(dtype_out)
|
||||
|
||||
|
||||
def test_subclass_conversion():
|
||||
"""Check subclass conversion behavior"""
|
||||
x = np.array([-1, 1])
|
||||
|
||||
for dtype in float_dtype_list:
|
||||
x = x.astype(dtype)
|
||||
y = _convert(x, np.floating)
|
||||
assert y.dtype == x.dtype
|
77
venv/Lib/site-packages/skimage/util/tests/test_invert.py
Normal file
77
venv/Lib/site-packages/skimage/util/tests/test_invert.py
Normal file
|
@ -0,0 +1,77 @@
|
|||
import numpy as np
|
||||
from skimage import dtype_limits
|
||||
from skimage.util.dtype import dtype_range
|
||||
from skimage.util import invert
|
||||
|
||||
from skimage._shared.testing import assert_array_equal
|
||||
|
||||
|
||||
def test_invert_bool():
|
||||
dtype = 'bool'
|
||||
image = np.zeros((3, 3), dtype=dtype)
|
||||
upper_dtype_limit = dtype_limits(image, clip_negative=False)[1]
|
||||
image[1, :] = upper_dtype_limit
|
||||
expected = np.zeros((3, 3), dtype=dtype) + upper_dtype_limit
|
||||
expected[1, :] = 0
|
||||
result = invert(image)
|
||||
assert_array_equal(expected, result)
|
||||
|
||||
|
||||
def test_invert_uint8():
|
||||
dtype = 'uint8'
|
||||
image = np.zeros((3, 3), dtype=dtype)
|
||||
upper_dtype_limit = dtype_limits(image, clip_negative=False)[1]
|
||||
image[1, :] = upper_dtype_limit
|
||||
expected = np.zeros((3, 3), dtype=dtype) + upper_dtype_limit
|
||||
expected[1, :] = 0
|
||||
result = invert(image)
|
||||
assert_array_equal(expected, result)
|
||||
|
||||
|
||||
def test_invert_int8():
|
||||
dtype = 'int8'
|
||||
image = np.zeros((3, 3), dtype=dtype)
|
||||
lower_dtype_limit, upper_dtype_limit = \
|
||||
dtype_limits(image, clip_negative=False)
|
||||
image[1, :] = lower_dtype_limit
|
||||
image[2, :] = upper_dtype_limit
|
||||
expected = np.zeros((3, 3), dtype=dtype)
|
||||
expected[2, :] = lower_dtype_limit
|
||||
expected[1, :] = upper_dtype_limit
|
||||
expected[0, :] = -1
|
||||
result = invert(image)
|
||||
assert_array_equal(expected, result)
|
||||
|
||||
|
||||
def test_invert_float64_signed():
|
||||
dtype = 'float64'
|
||||
image = np.zeros((3, 3), dtype=dtype)
|
||||
lower_dtype_limit, upper_dtype_limit = \
|
||||
dtype_limits(image, clip_negative=False)
|
||||
image[1, :] = lower_dtype_limit
|
||||
image[2, :] = upper_dtype_limit
|
||||
expected = np.zeros((3, 3), dtype=dtype)
|
||||
expected[2, :] = lower_dtype_limit
|
||||
expected[1, :] = upper_dtype_limit
|
||||
result = invert(image, signed_float=True)
|
||||
assert_array_equal(expected, result)
|
||||
|
||||
|
||||
def test_invert_float64_unsigned():
|
||||
dtype = 'float64'
|
||||
image = np.zeros((3, 3), dtype=dtype)
|
||||
lower_dtype_limit, upper_dtype_limit = \
|
||||
dtype_limits(image, clip_negative=True)
|
||||
image[2, :] = upper_dtype_limit
|
||||
expected = np.zeros((3, 3), dtype=dtype)
|
||||
expected[0, :] = upper_dtype_limit
|
||||
expected[1, :] = upper_dtype_limit
|
||||
result = invert(image)
|
||||
assert_array_equal(expected, result)
|
||||
|
||||
|
||||
def test_invert_roundtrip():
|
||||
for t, limits in dtype_range.items():
|
||||
image = np.array(limits, dtype=t)
|
||||
expected = invert(invert(image))
|
||||
assert_array_equal(image, expected)
|
55
venv/Lib/site-packages/skimage/util/tests/test_map_array.py
Normal file
55
venv/Lib/site-packages/skimage/util/tests/test_map_array.py
Normal file
|
@ -0,0 +1,55 @@
|
|||
import numpy as np
|
||||
from skimage.util._map_array import map_array, ArrayMap
|
||||
|
||||
from skimage._shared import testing
|
||||
from skimage._shared.testing import assert_array_equal
|
||||
import pytest
|
||||
|
||||
|
||||
def test_map_array_incorrect_output_shape():
|
||||
labels = np.random.randint(0, 5, size=(24, 25))
|
||||
out = np.empty((24, 24))
|
||||
in_values = np.unique(labels)
|
||||
out_values = np.random.random(in_values.shape).astype(out.dtype)
|
||||
with testing.raises(ValueError):
|
||||
map_array(labels, in_values, out_values, out=out)
|
||||
|
||||
|
||||
def test_map_array_non_contiguous_output_array():
|
||||
labels = np.random.randint(0, 5, size=(24, 25))
|
||||
out = np.empty((24 * 3, 25 * 2))[::3, ::2]
|
||||
in_values = np.unique(labels)
|
||||
out_values = np.random.random(in_values.shape).astype(out.dtype)
|
||||
with testing.raises(ValueError):
|
||||
map_array(labels, in_values, out_values, out=out)
|
||||
|
||||
|
||||
def test_arraymap_long_str():
|
||||
labels = np.random.randint(0, 40, size=(24, 25))
|
||||
in_values = np.unique(labels)
|
||||
out_values = np.random.random(in_values.shape)
|
||||
m = ArrayMap(in_values, out_values)
|
||||
assert len(str(m).split('\n')) == m._max_str_lines + 2
|
||||
|
||||
|
||||
def test_arraymap_update():
|
||||
in_values = np.unique(np.random.randint(0, 200, size=5))
|
||||
out_values = np.random.random(len(in_values))
|
||||
m = ArrayMap(in_values, out_values)
|
||||
image = np.random.randint(1, len(m), size=(512, 512))
|
||||
assert np.all(m[image] < 1) # missing values map to 0.
|
||||
m[1:] += 1
|
||||
assert np.all(m[image] >= 1)
|
||||
|
||||
|
||||
def test_arraymap_bool_index():
|
||||
in_values = np.unique(np.random.randint(0, 200, size=5))
|
||||
out_values = np.random.random(len(in_values))
|
||||
m = ArrayMap(in_values, out_values)
|
||||
image = np.random.randint(1, len(in_values), size=(512, 512))
|
||||
assert np.all(m[image] < 1) # missing values map to 0.
|
||||
positive = np.ones(len(m), dtype=bool)
|
||||
positive[0] = False
|
||||
m[positive] += 1
|
||||
assert np.all(m[image] >= 1)
|
||||
|
145
venv/Lib/site-packages/skimage/util/tests/test_montage.py
Normal file
145
venv/Lib/site-packages/skimage/util/tests/test_montage.py
Normal file
|
@ -0,0 +1,145 @@
|
|||
from skimage._shared import testing
|
||||
from skimage._shared.testing import assert_equal, assert_array_equal
|
||||
from skimage._shared._warnings import expected_warnings
|
||||
|
||||
import numpy as np
|
||||
from skimage.util import montage
|
||||
|
||||
|
||||
def test_montage_simple_gray():
|
||||
n_images, n_rows, n_cols = 3, 2, 3
|
||||
arr_in = np.arange(n_images * n_rows * n_cols, dtype=np.float)
|
||||
arr_in = arr_in.reshape(n_images, n_rows, n_cols)
|
||||
|
||||
arr_out = montage(arr_in)
|
||||
arr_ref = np.array(
|
||||
[[ 0. , 1. , 2. , 6. , 7. , 8. ],
|
||||
[ 3. , 4. , 5. , 9. , 10. , 11. ],
|
||||
[ 12. , 13. , 14. , 8.5, 8.5, 8.5],
|
||||
[ 15. , 16. , 17. , 8.5, 8.5, 8.5]]
|
||||
)
|
||||
assert_array_equal(arr_out, arr_ref)
|
||||
|
||||
|
||||
def test_montage_simple_rgb():
|
||||
n_images, n_rows, n_cols, n_channels = 2, 2, 2, 2
|
||||
arr_in = np.arange(n_images * n_rows * n_cols * n_channels, dtype=np.float)
|
||||
arr_in = arr_in.reshape(n_images, n_rows, n_cols, n_channels)
|
||||
|
||||
arr_out = montage(arr_in, multichannel=True)
|
||||
arr_ref = np.array(
|
||||
[[[ 0, 1],
|
||||
[ 2, 3],
|
||||
[ 8, 9],
|
||||
[10, 11]],
|
||||
[[ 4, 5],
|
||||
[ 6, 7],
|
||||
[12, 13],
|
||||
[14, 15]],
|
||||
[[ 7, 8],
|
||||
[ 7, 8],
|
||||
[ 7, 8],
|
||||
[ 7, 8]],
|
||||
[[ 7, 8],
|
||||
[ 7, 8],
|
||||
[ 7, 8],
|
||||
[ 7, 8]]]
|
||||
)
|
||||
assert_array_equal(arr_out, arr_ref)
|
||||
|
||||
|
||||
def test_montage_fill_gray():
|
||||
n_images, n_rows, n_cols = 3, 2, 3
|
||||
arr_in = np.arange(n_images*n_rows*n_cols, dtype=np.float)
|
||||
arr_in = arr_in.reshape(n_images, n_rows, n_cols)
|
||||
|
||||
arr_out = montage(arr_in, fill=0)
|
||||
arr_ref = np.array(
|
||||
[[ 0. , 1. , 2. , 6. , 7. , 8. ],
|
||||
[ 3. , 4. , 5. , 9. , 10. , 11. ],
|
||||
[ 12. , 13. , 14. , 0. , 0. , 0. ],
|
||||
[ 15. , 16. , 17. , 0. , 0. , 0. ]]
|
||||
)
|
||||
assert_array_equal(arr_out, arr_ref)
|
||||
|
||||
|
||||
def test_montage_grid_default_gray():
|
||||
n_images, n_rows, n_cols = 15, 11, 7
|
||||
arr_in = np.arange(n_images * n_rows * n_cols, dtype=np.float)
|
||||
arr_in = arr_in.reshape(n_images, n_rows, n_cols)
|
||||
|
||||
n_tiles = int(np.ceil(np.sqrt(n_images)))
|
||||
arr_out = montage(arr_in)
|
||||
assert_equal(arr_out.shape, (n_tiles * n_rows, n_tiles * n_cols))
|
||||
|
||||
|
||||
def test_montage_grid_custom_gray():
|
||||
n_images, n_rows, n_cols = 6, 2, 2
|
||||
arr_in = np.arange(n_images * n_rows * n_cols, dtype=np.float32)
|
||||
arr_in = arr_in.reshape(n_images, n_rows, n_cols)
|
||||
|
||||
arr_out = montage(arr_in, grid_shape=(3, 2))
|
||||
arr_ref = np.array(
|
||||
[[ 0., 1., 4., 5.],
|
||||
[ 2., 3., 6., 7.],
|
||||
[ 8., 9., 12., 13.],
|
||||
[ 10., 11., 14., 15.],
|
||||
[ 16., 17., 20., 21.],
|
||||
[ 18., 19., 22., 23.]]
|
||||
)
|
||||
assert_array_equal(arr_out, arr_ref)
|
||||
|
||||
|
||||
def test_montage_rescale_intensity_gray():
|
||||
n_images, n_rows, n_cols = 4, 3, 3
|
||||
arr_in = np.arange(n_images * n_rows * n_cols, dtype=np.float32)
|
||||
arr_in = arr_in.reshape(n_images, n_rows, n_cols)
|
||||
|
||||
arr_out = montage(arr_in, rescale_intensity=True)
|
||||
arr_ref = np.array(
|
||||
[[ 0. , 0.125, 0.25 , 0. , 0.125, 0.25 ],
|
||||
[ 0.375, 0.5 , 0.625, 0.375, 0.5 , 0.625],
|
||||
[ 0.75 , 0.875, 1. , 0.75 , 0.875, 1. ],
|
||||
[ 0. , 0.125, 0.25 , 0. , 0.125, 0.25 ],
|
||||
[ 0.375, 0.5 , 0.625, 0.375, 0.5 , 0.625],
|
||||
[ 0.75 , 0.875, 1. , 0.75 , 0.875, 1. ]]
|
||||
)
|
||||
assert_equal(arr_out.min(), 0.0)
|
||||
assert_equal(arr_out.max(), 1.0)
|
||||
assert_array_equal(arr_out, arr_ref)
|
||||
|
||||
|
||||
def test_montage_simple_padding_gray():
|
||||
n_images, n_rows, n_cols = 2, 2, 2
|
||||
arr_in = np.arange(n_images * n_rows * n_cols)
|
||||
arr_in = arr_in.reshape(n_images, n_rows, n_cols)
|
||||
|
||||
arr_out = montage(arr_in, padding_width=1)
|
||||
arr_ref = np.array(
|
||||
[[3, 3, 3, 3, 3, 3, 3],
|
||||
[3, 0, 1, 3, 4, 5, 3],
|
||||
[3, 2, 3, 3, 6, 7, 3],
|
||||
[3, 3, 3, 3, 3, 3, 3],
|
||||
[3, 3, 3, 3, 3, 3, 3],
|
||||
[3, 3, 3, 3, 3, 3, 3],
|
||||
[3, 3, 3, 3, 3, 3, 3]]
|
||||
)
|
||||
assert_array_equal(arr_out, arr_ref)
|
||||
|
||||
|
||||
def test_error_ndim():
|
||||
arr_error = np.random.randn(1, 2)
|
||||
with testing.raises(ValueError):
|
||||
montage(arr_error)
|
||||
|
||||
arr_error = np.random.randn(1, 2, 3, 4)
|
||||
with testing.raises(ValueError):
|
||||
montage(arr_error)
|
||||
|
||||
arr_error = np.random.randn(1, 2, 3)
|
||||
with testing.raises(ValueError):
|
||||
montage(arr_error, multichannel=True)
|
||||
|
||||
arr_error = np.random.randn(1, 2, 3, 4, 5)
|
||||
with testing.raises(ValueError):
|
||||
montage(arr_error, multichannel=True)
|
214
venv/Lib/site-packages/skimage/util/tests/test_random_noise.py
Normal file
214
venv/Lib/site-packages/skimage/util/tests/test_random_noise.py
Normal file
|
@ -0,0 +1,214 @@
|
|||
from skimage._shared import testing
|
||||
from skimage._shared.testing import assert_array_equal, assert_allclose
|
||||
|
||||
import numpy as np
|
||||
from skimage.data import camera
|
||||
from skimage.util import random_noise, img_as_float
|
||||
|
||||
|
||||
def test_set_seed():
|
||||
seed = 42
|
||||
cam = camera()
|
||||
test = random_noise(cam, seed=seed)
|
||||
assert_array_equal(test, random_noise(cam, seed=seed))
|
||||
|
||||
|
||||
def test_salt():
|
||||
seed = 42
|
||||
cam = img_as_float(camera())
|
||||
cam_noisy = random_noise(cam, seed=seed, mode='salt', amount=0.15)
|
||||
saltmask = cam != cam_noisy
|
||||
|
||||
# Ensure all changes are to 1.0
|
||||
assert_allclose(cam_noisy[saltmask], np.ones(saltmask.sum()))
|
||||
|
||||
# Ensure approximately correct amount of noise was added
|
||||
proportion = float(saltmask.sum()) / (cam.shape[0] * cam.shape[1])
|
||||
assert 0.11 < proportion <= 0.15
|
||||
|
||||
|
||||
def test_salt_p1():
|
||||
image = np.random.rand(2, 3)
|
||||
noisy = random_noise(image, mode='salt', amount=1)
|
||||
assert_array_equal(noisy, [[1, 1, 1], [1, 1, 1]])
|
||||
|
||||
|
||||
def test_singleton_dim():
|
||||
"""Ensure images where size of a given dimension is 1 work correctly."""
|
||||
image = np.random.rand(1, 20)
|
||||
noisy = random_noise(image, mode='salt', amount=0.1, seed=42)
|
||||
assert np.sum(noisy == 1) == 2
|
||||
|
||||
|
||||
def test_pepper():
|
||||
seed = 42
|
||||
cam = img_as_float(camera())
|
||||
data_signed = cam * 2. - 1. # Same image, on range [-1, 1]
|
||||
|
||||
cam_noisy = random_noise(cam, seed=seed, mode='pepper', amount=0.15)
|
||||
peppermask = cam != cam_noisy
|
||||
|
||||
# Ensure all changes are to 1.0
|
||||
assert_allclose(cam_noisy[peppermask], np.zeros(peppermask.sum()))
|
||||
|
||||
# Ensure approximately correct amount of noise was added
|
||||
proportion = float(peppermask.sum()) / (cam.shape[0] * cam.shape[1])
|
||||
assert 0.11 < proportion <= 0.15
|
||||
|
||||
# Check to make sure pepper gets added properly to signed images
|
||||
orig_zeros = (data_signed == -1).sum()
|
||||
cam_noisy_signed = random_noise(data_signed, seed=seed, mode='pepper',
|
||||
amount=.15)
|
||||
|
||||
proportion = (float((cam_noisy_signed == -1).sum() - orig_zeros) /
|
||||
(cam.shape[0] * cam.shape[1]))
|
||||
assert 0.11 < proportion <= 0.15
|
||||
|
||||
|
||||
def test_salt_and_pepper():
|
||||
seed = 42
|
||||
cam = img_as_float(camera())
|
||||
cam_noisy = random_noise(cam, seed=seed, mode='s&p', amount=0.15,
|
||||
salt_vs_pepper=0.25)
|
||||
saltmask = np.logical_and(cam != cam_noisy, cam_noisy == 1.)
|
||||
peppermask = np.logical_and(cam != cam_noisy, cam_noisy == 0.)
|
||||
|
||||
# Ensure all changes are to 0. or 1.
|
||||
assert_allclose(cam_noisy[saltmask], np.ones(saltmask.sum()))
|
||||
assert_allclose(cam_noisy[peppermask], np.zeros(peppermask.sum()))
|
||||
|
||||
# Ensure approximately correct amount of noise was added
|
||||
proportion = float(
|
||||
saltmask.sum() + peppermask.sum()) / (cam.shape[0] * cam.shape[1])
|
||||
assert 0.11 < proportion <= 0.18
|
||||
|
||||
# Verify the relative amount of salt vs. pepper is close to expected
|
||||
assert 0.18 < saltmask.sum() / float(peppermask.sum()) < 0.33
|
||||
|
||||
|
||||
def test_gaussian():
|
||||
seed = 42
|
||||
data = np.zeros((128, 128)) + 0.5
|
||||
data_gaussian = random_noise(data, seed=seed, var=0.01)
|
||||
assert 0.008 < data_gaussian.var() < 0.012
|
||||
|
||||
data_gaussian = random_noise(data, seed=seed, mean=0.3, var=0.015)
|
||||
assert 0.28 < data_gaussian.mean() - 0.5 < 0.32
|
||||
assert 0.012 < data_gaussian.var() < 0.018
|
||||
|
||||
|
||||
def test_localvar():
|
||||
seed = 42
|
||||
data = np.zeros((128, 128)) + 0.5
|
||||
local_vars = np.zeros((128, 128)) + 0.001
|
||||
local_vars[:64, 64:] = 0.1
|
||||
local_vars[64:, :64] = 0.25
|
||||
local_vars[64:, 64:] = 0.45
|
||||
|
||||
data_gaussian = random_noise(data, mode='localvar', seed=seed,
|
||||
local_vars=local_vars, clip=False)
|
||||
assert 0. < data_gaussian[:64, :64].var() < 0.002
|
||||
assert 0.095 < data_gaussian[:64, 64:].var() < 0.105
|
||||
assert 0.245 < data_gaussian[64:, :64].var() < 0.255
|
||||
assert 0.445 < data_gaussian[64:, 64:].var() < 0.455
|
||||
|
||||
# Ensure local variance bounds checking works properly
|
||||
bad_local_vars = np.zeros_like(data)
|
||||
with testing.raises(ValueError):
|
||||
random_noise(data, mode='localvar', seed=seed,
|
||||
local_vars=bad_local_vars)
|
||||
bad_local_vars += 0.1
|
||||
bad_local_vars[0, 0] = -1
|
||||
with testing.raises(ValueError):
|
||||
random_noise(data, mode='localvar', seed=seed,
|
||||
local_vars=bad_local_vars)
|
||||
|
||||
|
||||
def test_speckle():
|
||||
seed = 42
|
||||
data = np.zeros((128, 128)) + 0.1
|
||||
np.random.seed(seed=seed)
|
||||
noise = np.random.normal(0.1, 0.02 ** 0.5, (128, 128))
|
||||
expected = np.clip(data + data * noise, 0, 1)
|
||||
|
||||
data_speckle = random_noise(data, mode='speckle', seed=seed, mean=0.1,
|
||||
var=0.02)
|
||||
assert_allclose(expected, data_speckle)
|
||||
|
||||
|
||||
def test_poisson():
|
||||
seed = 42
|
||||
data = camera() # 512x512 grayscale uint8
|
||||
cam_noisy = random_noise(data, mode='poisson', seed=seed)
|
||||
cam_noisy2 = random_noise(data, mode='poisson', seed=seed, clip=False)
|
||||
|
||||
np.random.seed(seed=seed)
|
||||
expected = np.random.poisson(img_as_float(data) * 256) / 256.
|
||||
assert_allclose(cam_noisy, np.clip(expected, 0., 1.))
|
||||
assert_allclose(cam_noisy2, expected)
|
||||
|
||||
|
||||
def test_clip_poisson():
|
||||
seed = 42
|
||||
data = camera() # 512x512 grayscale uint8
|
||||
data_signed = img_as_float(data) * 2. - 1. # Same image, on range [-1, 1]
|
||||
|
||||
# Signed and unsigned, clipped
|
||||
cam_poisson = random_noise(data, mode='poisson', seed=seed, clip=True)
|
||||
cam_poisson2 = random_noise(data_signed, mode='poisson', seed=seed,
|
||||
clip=True)
|
||||
assert (cam_poisson.max() == 1.) and (cam_poisson.min() == 0.)
|
||||
assert (cam_poisson2.max() == 1.) and (cam_poisson2.min() == -1.)
|
||||
|
||||
# Signed and unsigned, unclipped
|
||||
cam_poisson = random_noise(data, mode='poisson', seed=seed, clip=False)
|
||||
cam_poisson2 = random_noise(data_signed, mode='poisson', seed=seed,
|
||||
clip=False)
|
||||
assert (cam_poisson.max() > 1.15) and (cam_poisson.min() == 0.)
|
||||
assert (cam_poisson2.max() > 1.3) and (cam_poisson2.min() == -1.)
|
||||
|
||||
|
||||
def test_clip_gaussian():
|
||||
seed = 42
|
||||
data = camera() # 512x512 grayscale uint8
|
||||
data_signed = img_as_float(data) * 2. - 1. # Same image, on range [-1, 1]
|
||||
|
||||
# Signed and unsigned, clipped
|
||||
cam_gauss = random_noise(data, mode='gaussian', seed=seed, clip=True)
|
||||
cam_gauss2 = random_noise(data_signed, mode='gaussian', seed=seed,
|
||||
clip=True)
|
||||
assert (cam_gauss.max() == 1.) and (cam_gauss.min() == 0.)
|
||||
assert (cam_gauss2.max() == 1.) and (cam_gauss2.min() == -1.)
|
||||
|
||||
# Signed and unsigned, unclipped
|
||||
cam_gauss = random_noise(data, mode='gaussian', seed=seed, clip=False)
|
||||
cam_gauss2 = random_noise(data_signed, mode='gaussian', seed=seed,
|
||||
clip=False)
|
||||
assert (cam_gauss.max() > 1.22) and (cam_gauss.min() < -0.36)
|
||||
assert (cam_gauss2.max() > 1.219) and (cam_gauss2.min() < -1.337)
|
||||
|
||||
|
||||
def test_clip_speckle():
|
||||
seed = 42
|
||||
data = camera() # 512x512 grayscale uint8
|
||||
data_signed = img_as_float(data) * 2. - 1. # Same image, on range [-1, 1]
|
||||
|
||||
# Signed and unsigned, clipped
|
||||
cam_speckle = random_noise(data, mode='speckle', seed=seed, clip=True)
|
||||
cam_speckle2 = random_noise(data_signed, mode='speckle', seed=seed,
|
||||
clip=True)
|
||||
assert (cam_speckle.max() == 1.) and (cam_speckle.min() == 0.)
|
||||
assert (cam_speckle2.max() == 1.) and (cam_speckle2.min() == -1.)
|
||||
|
||||
# Signed and unsigned, unclipped
|
||||
cam_speckle = random_noise(data, mode='speckle', seed=seed, clip=False)
|
||||
cam_speckle2 = random_noise(data_signed, mode='speckle', seed=seed,
|
||||
clip=False)
|
||||
assert (cam_speckle.max() > 1.219) and (cam_speckle.min() == 0.)
|
||||
assert (cam_speckle2.max() > 1.219) and (cam_speckle2.min() < -1.306)
|
||||
|
||||
|
||||
def test_bad_mode():
|
||||
data = np.zeros((64, 64))
|
||||
with testing.raises(KeyError):
|
||||
random_noise(data, 'perlin')
|
|
@ -0,0 +1,36 @@
|
|||
import numpy as np
|
||||
from skimage.util import regular_grid
|
||||
from skimage._shared.testing import assert_equal
|
||||
|
||||
|
||||
def test_regular_grid_full():
|
||||
ar = np.zeros((2, 2))
|
||||
g = regular_grid(ar, 25)
|
||||
assert_equal(g, [slice(None, None, None), slice(None, None, None)])
|
||||
ar[g] = 1
|
||||
assert_equal(ar.size, ar.sum())
|
||||
|
||||
|
||||
def test_regular_grid_2d_8():
|
||||
ar = np.zeros((20, 40))
|
||||
g = regular_grid(ar.shape, 8)
|
||||
assert_equal(g, [slice(5.0, None, 10.0), slice(5.0, None, 10.0)])
|
||||
ar[g] = 1
|
||||
assert_equal(ar.sum(), 8)
|
||||
|
||||
|
||||
def test_regular_grid_2d_32():
|
||||
ar = np.zeros((20, 40))
|
||||
g = regular_grid(ar.shape, 32)
|
||||
assert_equal(g, [slice(2.0, None, 5.0), slice(2.0, None, 5.0)])
|
||||
ar[g] = 1
|
||||
assert_equal(ar.sum(), 32)
|
||||
|
||||
|
||||
def test_regular_grid_3d_8():
|
||||
ar = np.zeros((3, 20, 40))
|
||||
g = regular_grid(ar.shape, 8)
|
||||
assert_equal(g, [slice(1.0, None, 3.0), slice(5.0, None, 10.0),
|
||||
slice(5.0, None, 10.0)])
|
||||
ar[g] = 1
|
||||
assert_equal(ar.sum(), 8)
|
192
venv/Lib/site-packages/skimage/util/tests/test_shape.py
Normal file
192
venv/Lib/site-packages/skimage/util/tests/test_shape.py
Normal file
|
@ -0,0 +1,192 @@
|
|||
import numpy as np
|
||||
|
||||
from skimage._shared import testing
|
||||
from skimage._shared.testing import assert_equal, assert_warns
|
||||
from skimage.util.shape import view_as_blocks, view_as_windows
|
||||
|
||||
|
||||
def test_view_as_blocks_block_not_a_tuple():
|
||||
A = np.arange(10)
|
||||
with testing.raises(TypeError):
|
||||
view_as_blocks(A, [5])
|
||||
|
||||
|
||||
def test_view_as_blocks_negative_shape():
|
||||
A = np.arange(10)
|
||||
with testing.raises(ValueError):
|
||||
view_as_blocks(A, (-2,))
|
||||
|
||||
|
||||
def test_view_as_blocks_block_too_large():
|
||||
A = np.arange(10)
|
||||
with testing.raises(ValueError):
|
||||
view_as_blocks(A, (11,))
|
||||
|
||||
|
||||
def test_view_as_blocks_wrong_block_dimension():
|
||||
A = np.arange(10)
|
||||
with testing.raises(ValueError):
|
||||
view_as_blocks(A, (2, 2))
|
||||
|
||||
|
||||
def test_view_as_blocks_1D_array_wrong_block_shape():
|
||||
A = np.arange(10)
|
||||
with testing.raises(ValueError):
|
||||
view_as_blocks(A, (3,))
|
||||
|
||||
|
||||
def test_view_as_blocks_1D_array():
|
||||
A = np.arange(10)
|
||||
B = view_as_blocks(A, (5,))
|
||||
assert_equal(B, np.array([[0, 1, 2, 3, 4],
|
||||
[5, 6, 7, 8, 9]]))
|
||||
|
||||
|
||||
def test_view_as_blocks_2D_array():
|
||||
A = np.arange(4 * 4).reshape(4, 4)
|
||||
B = view_as_blocks(A, (2, 2))
|
||||
assert_equal(B[0, 1], np.array([[2, 3],
|
||||
[6, 7]]))
|
||||
assert_equal(B[1, 0, 1, 1], 13)
|
||||
|
||||
|
||||
def test_view_as_blocks_3D_array():
|
||||
A = np.arange(4 * 4 * 6).reshape(4, 4, 6)
|
||||
B = view_as_blocks(A, (1, 2, 2))
|
||||
assert_equal(B.shape, (4, 2, 3, 1, 2, 2))
|
||||
assert_equal(B[2:, 0, 2], np.array([[[[52, 53],
|
||||
[58, 59]]],
|
||||
[[[76, 77],
|
||||
[82, 83]]]]))
|
||||
|
||||
|
||||
def test_view_as_windows_input_not_array():
|
||||
A = [1, 2, 3, 4, 5]
|
||||
with testing.raises(TypeError):
|
||||
view_as_windows(A, (2,))
|
||||
|
||||
|
||||
def test_view_as_windows_wrong_window_dimension():
|
||||
A = np.arange(10)
|
||||
with testing.raises(ValueError):
|
||||
view_as_windows(A, (2, 2))
|
||||
|
||||
|
||||
def test_view_as_windows_negative_window_length():
|
||||
A = np.arange(10)
|
||||
with testing.raises(ValueError):
|
||||
view_as_windows(A, (-1,))
|
||||
|
||||
|
||||
def test_view_as_windows_window_too_large():
|
||||
A = np.arange(10)
|
||||
with testing.raises(ValueError):
|
||||
view_as_windows(A, (11,))
|
||||
|
||||
|
||||
def test_view_as_windows_step_below_one():
|
||||
A = np.arange(10)
|
||||
with testing.raises(ValueError):
|
||||
view_as_windows(A, (11,), step=0.9)
|
||||
|
||||
|
||||
def test_view_as_windows_1D():
|
||||
A = np.arange(10)
|
||||
window_shape = (3,)
|
||||
B = view_as_windows(A, window_shape)
|
||||
assert_equal(B, np.array([[0, 1, 2],
|
||||
[1, 2, 3],
|
||||
[2, 3, 4],
|
||||
[3, 4, 5],
|
||||
[4, 5, 6],
|
||||
[5, 6, 7],
|
||||
[6, 7, 8],
|
||||
[7, 8, 9]]))
|
||||
|
||||
|
||||
def test_view_as_windows_2D():
|
||||
A = np.arange(5 * 4).reshape(5, 4)
|
||||
window_shape = (4, 3)
|
||||
B = view_as_windows(A, window_shape)
|
||||
assert_equal(B.shape, (2, 2, 4, 3))
|
||||
assert_equal(B, np.array([[[[0, 1, 2],
|
||||
[4, 5, 6],
|
||||
[8, 9, 10],
|
||||
[12, 13, 14]],
|
||||
[[1, 2, 3],
|
||||
[5, 6, 7],
|
||||
[9, 10, 11],
|
||||
[13, 14, 15]]],
|
||||
[[[4, 5, 6],
|
||||
[8, 9, 10],
|
||||
[12, 13, 14],
|
||||
[16, 17, 18]],
|
||||
[[5, 6, 7],
|
||||
[9, 10, 11],
|
||||
[13, 14, 15],
|
||||
[17, 18, 19]]]]))
|
||||
|
||||
|
||||
def test_view_as_windows_with_skip():
|
||||
A = np.arange(20).reshape((5, 4))
|
||||
B = view_as_windows(A, 2, step=2)
|
||||
assert_equal(B, [[[[0, 1],
|
||||
[4, 5]],
|
||||
[[2, 3],
|
||||
[6, 7]]],
|
||||
[[[8, 9],
|
||||
[12, 13]],
|
||||
[[10, 11],
|
||||
[14, 15]]]])
|
||||
|
||||
C = view_as_windows(A, 2, step=4)
|
||||
assert_equal(C.shape, (1, 1, 2, 2))
|
||||
|
||||
|
||||
def test_views_non_contiguous():
|
||||
A = np.arange(16).reshape((4, 4))
|
||||
A = A[::2, :]
|
||||
|
||||
res_b = view_as_blocks(A, (2, 2))
|
||||
res_w = view_as_windows(A, (2, 2))
|
||||
print(res_b)
|
||||
print(res_w)
|
||||
|
||||
expected_b = [[[[0, 1],
|
||||
[8, 9]],
|
||||
[[2, 3],
|
||||
[10, 11]]]]
|
||||
|
||||
expected_w = [[[[ 0, 1],
|
||||
[ 8, 9]],
|
||||
[[ 1, 2],
|
||||
[ 9, 10]],
|
||||
[[ 2, 3],
|
||||
[10, 11]]]]
|
||||
|
||||
assert_equal(res_b, expected_b)
|
||||
assert_equal(res_w, expected_w)
|
||||
|
||||
|
||||
def test_view_as_windows_step_tuple():
|
||||
A = np.arange(24).reshape((6, 4))
|
||||
B = view_as_windows(A, (3, 2), step=3)
|
||||
assert B.shape == (2, 1, 3, 2)
|
||||
assert B.size != A.size
|
||||
|
||||
C = view_as_windows(A, (3, 2), step=(3, 2))
|
||||
assert C.shape == (2, 2, 3, 2)
|
||||
assert C.size == A.size
|
||||
|
||||
assert_equal(C, [[[[0, 1],
|
||||
[4, 5],
|
||||
[8, 9]],
|
||||
[[2, 3],
|
||||
[6, 7],
|
||||
[10, 11]]],
|
||||
[[[12, 13],
|
||||
[16, 17],
|
||||
[20, 21]],
|
||||
[[14, 15],
|
||||
[18, 19],
|
||||
[22, 23]]]])
|
|
@ -0,0 +1,39 @@
|
|||
import numpy as np
|
||||
from skimage.util import unique_rows
|
||||
from skimage._shared import testing
|
||||
from skimage._shared.testing import assert_equal
|
||||
|
||||
|
||||
def test_discontiguous_array():
|
||||
ar = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]], np.uint8)
|
||||
ar = ar[::2]
|
||||
ar_out = unique_rows(ar)
|
||||
desired_ar_out = np.array([[1, 0, 1]], np.uint8)
|
||||
assert_equal(ar_out, desired_ar_out)
|
||||
|
||||
|
||||
def test_uint8_array():
|
||||
ar = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]], np.uint8)
|
||||
ar_out = unique_rows(ar)
|
||||
desired_ar_out = np.array([[0, 1, 0], [1, 0, 1]], np.uint8)
|
||||
assert_equal(ar_out, desired_ar_out)
|
||||
|
||||
|
||||
def test_float_array():
|
||||
ar = np.array([[1.1, 0.0, 1.1], [0.0, 1.1, 0.0], [1.1, 0.0, 1.1]],
|
||||
np.float)
|
||||
ar_out = unique_rows(ar)
|
||||
desired_ar_out = np.array([[0.0, 1.1, 0.0], [1.1, 0.0, 1.1]], np.float)
|
||||
assert_equal(ar_out, desired_ar_out)
|
||||
|
||||
|
||||
def test_1d_array():
|
||||
ar = np.array([1, 0, 1, 1], np.uint8)
|
||||
with testing.raises(ValueError):
|
||||
unique_rows(ar)
|
||||
|
||||
|
||||
def test_3d_array():
|
||||
ar = np.arange(8).reshape((2, 2, 2))
|
||||
with testing.raises(ValueError):
|
||||
unique_rows(ar)
|
50
venv/Lib/site-packages/skimage/util/unique.py
Normal file
50
venv/Lib/site-packages/skimage/util/unique.py
Normal file
|
@ -0,0 +1,50 @@
|
|||
import numpy as np
|
||||
|
||||
|
||||
def unique_rows(ar):
|
||||
"""Remove repeated rows from a 2D array.
|
||||
|
||||
In particular, if given an array of coordinates of shape
|
||||
(Npoints, Ndim), it will remove repeated points.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ar : 2-D ndarray
|
||||
The input array.
|
||||
|
||||
Returns
|
||||
-------
|
||||
ar_out : 2-D ndarray
|
||||
A copy of the input array with repeated rows removed.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError : if `ar` is not two-dimensional.
|
||||
|
||||
Notes
|
||||
-----
|
||||
The function will generate a copy of `ar` if it is not
|
||||
C-contiguous, which will negatively affect performance for large
|
||||
input arrays.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> ar = np.array([[1, 0, 1],
|
||||
... [0, 1, 0],
|
||||
... [1, 0, 1]], np.uint8)
|
||||
>>> unique_rows(ar)
|
||||
array([[0, 1, 0],
|
||||
[1, 0, 1]], dtype=uint8)
|
||||
"""
|
||||
if ar.ndim != 2:
|
||||
raise ValueError("unique_rows() only makes sense for 2D arrays, "
|
||||
"got %dd" % ar.ndim)
|
||||
# the view in the next line only works if the array is C-contiguous
|
||||
ar = np.ascontiguousarray(ar)
|
||||
# np.unique() finds identical items in a raveled array. To make it
|
||||
# see each row as a single item, we create a view of each row as a
|
||||
# byte string of length itemsize times number of columns in `ar`
|
||||
ar_row_view = ar.view('|S%d' % (ar.itemsize * ar.shape[1]))
|
||||
_, unique_row_indices = np.unique(ar_row_view, return_index=True)
|
||||
ar_out = ar[unique_row_indices]
|
||||
return ar_out
|
Loading…
Add table
Add a link
Reference in a new issue