Fixed database typo and removed unnecessary class identifier.
This commit is contained in:
parent
00ad49a143
commit
45fb349a7d
5098 changed files with 952558 additions and 85 deletions
90
venv/Lib/site-packages/skimage/feature/__init__.py
Normal file
90
venv/Lib/site-packages/skimage/feature/__init__.py
Normal file
|
@ -0,0 +1,90 @@
|
|||
from .._shared.utils import deprecated
|
||||
|
||||
from ._canny import canny
|
||||
from ._cascade import Cascade
|
||||
from ._daisy import daisy
|
||||
from ._hog import hog
|
||||
from .texture import (greycomatrix, greycoprops,
|
||||
local_binary_pattern,
|
||||
multiblock_lbp,
|
||||
draw_multiblock_lbp)
|
||||
|
||||
from .peak import peak_local_max
|
||||
from .corner import (corner_kitchen_rosenfeld, corner_harris,
|
||||
corner_shi_tomasi, corner_foerstner, corner_subpix,
|
||||
corner_peaks, corner_fast, structure_tensor,
|
||||
structure_tensor_eigvals, hessian_matrix,
|
||||
hessian_matrix_eigvals, hessian_matrix_det,
|
||||
corner_moravec, corner_orientations,
|
||||
shape_index)
|
||||
from .template import match_template
|
||||
from .brief import BRIEF
|
||||
from .censure import CENSURE
|
||||
from .orb import ORB
|
||||
from .match import match_descriptors
|
||||
from .util import plot_matches
|
||||
from .blob import blob_dog, blob_log, blob_doh
|
||||
from .haar import (haar_like_feature, haar_like_feature_coord,
|
||||
draw_haar_like_feature)
|
||||
|
||||
|
||||
@deprecated(alt_func='skimage.registration.phase_cross_correlation',
|
||||
removed_version='0.19')
|
||||
def masked_register_translation(src_image, target_image, src_mask,
|
||||
target_mask=None, overlap_ratio=0.3):
|
||||
from ..registration import phase_cross_correlation
|
||||
return phase_cross_correlation(src_image, target_image,
|
||||
reference_mask=src_mask,
|
||||
moving_mask=target_mask,
|
||||
overlap_ratio=overlap_ratio)
|
||||
|
||||
|
||||
@deprecated(alt_func='skimage.registration.phase_cross_correlation',
|
||||
removed_version='0.19')
|
||||
def register_translation(src_image, target_image, upsample_factor=1,
|
||||
space="real", return_error=True):
|
||||
from ..registration import phase_cross_correlation
|
||||
return phase_cross_correlation(src_image, target_image,
|
||||
upsample_factor=upsample_factor,
|
||||
space=space, return_error=return_error)
|
||||
|
||||
|
||||
__all__ = ['canny',
|
||||
'Cascade',
|
||||
'daisy',
|
||||
'hog',
|
||||
'greycomatrix',
|
||||
'greycoprops',
|
||||
'local_binary_pattern',
|
||||
'multiblock_lbp',
|
||||
'draw_multiblock_lbp',
|
||||
'peak_local_max',
|
||||
'structure_tensor',
|
||||
'structure_tensor_eigvals',
|
||||
'hessian_matrix',
|
||||
'hessian_matrix_det',
|
||||
'hessian_matrix_eigvals',
|
||||
'shape_index',
|
||||
'corner_kitchen_rosenfeld',
|
||||
'corner_harris',
|
||||
'corner_shi_tomasi',
|
||||
'corner_foerstner',
|
||||
'corner_subpix',
|
||||
'corner_peaks',
|
||||
'corner_moravec',
|
||||
'corner_fast',
|
||||
'corner_orientations',
|
||||
'match_template',
|
||||
'register_translation',
|
||||
'masked_register_translation',
|
||||
'BRIEF',
|
||||
'CENSURE',
|
||||
'ORB',
|
||||
'match_descriptors',
|
||||
'plot_matches',
|
||||
'blob_dog',
|
||||
'blob_doh',
|
||||
'blob_log',
|
||||
'haar_like_feature',
|
||||
'haar_like_feature_coord',
|
||||
'draw_haar_like_feature']
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
297
venv/Lib/site-packages/skimage/feature/_canny.py
Normal file
297
venv/Lib/site-packages/skimage/feature/_canny.py
Normal file
|
@ -0,0 +1,297 @@
|
|||
"""
|
||||
canny.py - Canny Edge detector
|
||||
|
||||
Reference: Canny, J., A Computational Approach To Edge Detection, IEEE Trans.
|
||||
Pattern Analysis and Machine Intelligence, 8:679-714, 1986
|
||||
|
||||
Originally part of CellProfiler, code licensed under both GPL and BSD licenses.
|
||||
Website: http://www.cellprofiler.org
|
||||
Copyright (c) 2003-2009 Massachusetts Institute of Technology
|
||||
Copyright (c) 2009-2011 Broad Institute
|
||||
All rights reserved.
|
||||
Original author: Lee Kamentsky
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
import scipy.ndimage as ndi
|
||||
from scipy.ndimage import generate_binary_structure, binary_erosion, label
|
||||
from ..filters import gaussian
|
||||
from .. import dtype_limits, img_as_float
|
||||
from .._shared.utils import check_nD
|
||||
|
||||
|
||||
def smooth_with_function_and_mask(image, function, mask):
|
||||
"""Smooth an image with a linear function, ignoring masked pixels.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : array
|
||||
Image you want to smooth.
|
||||
function : callable
|
||||
A function that does image smoothing.
|
||||
mask : array
|
||||
Mask with 1's for significant pixels, 0's for masked pixels.
|
||||
|
||||
Notes
|
||||
------
|
||||
This function calculates the fractional contribution of masked pixels
|
||||
by applying the function to the mask (which gets you the fraction of
|
||||
the pixel data that's due to significant points). We then mask the image
|
||||
and apply the function. The resulting values will be lower by the
|
||||
bleed-over fraction, so you can recalibrate by dividing by the function
|
||||
on the mask to recover the effect of smoothing from just the significant
|
||||
pixels.
|
||||
"""
|
||||
bleed_over = function(mask.astype(float))
|
||||
masked_image = np.zeros(image.shape, image.dtype)
|
||||
masked_image[mask] = image[mask]
|
||||
smoothed_image = function(masked_image)
|
||||
output_image = smoothed_image / (bleed_over + np.finfo(float).eps)
|
||||
return output_image
|
||||
|
||||
|
||||
def canny(image, sigma=1., low_threshold=None, high_threshold=None, mask=None,
|
||||
use_quantiles=False):
|
||||
"""Edge filter an image using the Canny algorithm.
|
||||
|
||||
Parameters
|
||||
-----------
|
||||
image : 2D array
|
||||
Grayscale input image to detect edges on; can be of any dtype.
|
||||
sigma : float, optional
|
||||
Standard deviation of the Gaussian filter.
|
||||
low_threshold : float, optional
|
||||
Lower bound for hysteresis thresholding (linking edges).
|
||||
If None, low_threshold is set to 10% of dtype's max.
|
||||
high_threshold : float, optional
|
||||
Upper bound for hysteresis thresholding (linking edges).
|
||||
If None, high_threshold is set to 20% of dtype's max.
|
||||
mask : array, dtype=bool, optional
|
||||
Mask to limit the application of Canny to a certain area.
|
||||
use_quantiles : bool, optional
|
||||
If True then treat low_threshold and high_threshold as quantiles of the
|
||||
edge magnitude image, rather than absolute edge magnitude values. If True
|
||||
then the thresholds must be in the range [0, 1].
|
||||
|
||||
Returns
|
||||
-------
|
||||
output : 2D array (image)
|
||||
The binary edge map.
|
||||
|
||||
See also
|
||||
--------
|
||||
skimage.sobel
|
||||
|
||||
Notes
|
||||
-----
|
||||
The steps of the algorithm are as follows:
|
||||
|
||||
* Smooth the image using a Gaussian with ``sigma`` width.
|
||||
|
||||
* Apply the horizontal and vertical Sobel operators to get the gradients
|
||||
within the image. The edge strength is the norm of the gradient.
|
||||
|
||||
* Thin potential edges to 1-pixel wide curves. First, find the normal
|
||||
to the edge at each point. This is done by looking at the
|
||||
signs and the relative magnitude of the X-Sobel and Y-Sobel
|
||||
to sort the points into 4 categories: horizontal, vertical,
|
||||
diagonal and antidiagonal. Then look in the normal and reverse
|
||||
directions to see if the values in either of those directions are
|
||||
greater than the point in question. Use interpolation to get a mix of
|
||||
points instead of picking the one that's the closest to the normal.
|
||||
|
||||
* Perform a hysteresis thresholding: first label all points above the
|
||||
high threshold as edges. Then recursively label any point above the
|
||||
low threshold that is 8-connected to a labeled point as an edge.
|
||||
|
||||
References
|
||||
-----------
|
||||
.. [1] Canny, J., A Computational Approach To Edge Detection, IEEE Trans.
|
||||
Pattern Analysis and Machine Intelligence, 8:679-714, 1986
|
||||
:DOI:`10.1109/TPAMI.1986.4767851`
|
||||
.. [2] William Green's Canny tutorial
|
||||
https://en.wikipedia.org/wiki/Canny_edge_detector
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from skimage import feature
|
||||
>>> # Generate noisy image of a square
|
||||
>>> im = np.zeros((256, 256))
|
||||
>>> im[64:-64, 64:-64] = 1
|
||||
>>> im += 0.2 * np.random.rand(*im.shape)
|
||||
>>> # First trial with the Canny filter, with the default smoothing
|
||||
>>> edges1 = feature.canny(im)
|
||||
>>> # Increase the smoothing for better results
|
||||
>>> edges2 = feature.canny(im, sigma=3)
|
||||
"""
|
||||
|
||||
#
|
||||
# The steps involved:
|
||||
#
|
||||
# * Smooth using the Gaussian with sigma above.
|
||||
#
|
||||
# * Apply the horizontal and vertical Sobel operators to get the gradients
|
||||
# within the image. The edge strength is the sum of the magnitudes
|
||||
# of the gradients in each direction.
|
||||
#
|
||||
# * Find the normal to the edge at each point using the arctangent of the
|
||||
# ratio of the Y sobel over the X sobel - pragmatically, we can
|
||||
# look at the signs of X and Y and the relative magnitude of X vs Y
|
||||
# to sort the points into 4 categories: horizontal, vertical,
|
||||
# diagonal and antidiagonal.
|
||||
#
|
||||
# * Look in the normal and reverse directions to see if the values
|
||||
# in either of those directions are greater than the point in question.
|
||||
# Use interpolation to get a mix of points instead of picking the one
|
||||
# that's the closest to the normal.
|
||||
#
|
||||
# * Label all points above the high threshold as edges.
|
||||
# * Recursively label any point above the low threshold that is 8-connected
|
||||
# to a labeled point as an edge.
|
||||
#
|
||||
# Regarding masks, any point touching a masked point will have a gradient
|
||||
# that is "infected" by the masked point, so it's enough to erode the
|
||||
# mask by one and then mask the output. We also mask out the border points
|
||||
# because who knows what lies beyond the edge of the image?
|
||||
#
|
||||
check_nD(image, 2)
|
||||
dtype_max = dtype_limits(image, clip_negative=False)[1]
|
||||
|
||||
if low_threshold is None:
|
||||
low_threshold = 0.1
|
||||
elif use_quantiles:
|
||||
if not(0.0 <= low_threshold <= 1.0):
|
||||
raise ValueError("Quantile thresholds must be between 0 and 1.")
|
||||
else:
|
||||
low_threshold = low_threshold / dtype_max
|
||||
|
||||
if high_threshold is None:
|
||||
high_threshold = 0.2
|
||||
elif use_quantiles:
|
||||
if not(0.0 <= high_threshold <= 1.0):
|
||||
raise ValueError("Quantile thresholds must be between 0 and 1.")
|
||||
else:
|
||||
high_threshold = high_threshold / dtype_max
|
||||
|
||||
if mask is None:
|
||||
mask = np.ones(image.shape, dtype=bool)
|
||||
|
||||
def fsmooth(x):
|
||||
return img_as_float(gaussian(x, sigma, mode='constant'))
|
||||
|
||||
smoothed = smooth_with_function_and_mask(image, fsmooth, mask)
|
||||
jsobel = ndi.sobel(smoothed, axis=1)
|
||||
isobel = ndi.sobel(smoothed, axis=0)
|
||||
abs_isobel = np.abs(isobel)
|
||||
abs_jsobel = np.abs(jsobel)
|
||||
magnitude = np.hypot(isobel, jsobel)
|
||||
|
||||
#
|
||||
# Make the eroded mask. Setting the border value to zero will wipe
|
||||
# out the image edges for us.
|
||||
#
|
||||
s = generate_binary_structure(2, 2)
|
||||
eroded_mask = binary_erosion(mask, s, border_value=0)
|
||||
eroded_mask = eroded_mask & (magnitude > 0)
|
||||
#
|
||||
#--------- Find local maxima --------------
|
||||
#
|
||||
# Assign each point to have a normal of 0-45 degrees, 45-90 degrees,
|
||||
# 90-135 degrees and 135-180 degrees.
|
||||
#
|
||||
local_maxima = np.zeros(image.shape, bool)
|
||||
#----- 0 to 45 degrees ------
|
||||
pts_plus = (isobel >= 0) & (jsobel >= 0) & (abs_isobel >= abs_jsobel)
|
||||
pts_minus = (isobel <= 0) & (jsobel <= 0) & (abs_isobel >= abs_jsobel)
|
||||
pts = pts_plus | pts_minus
|
||||
pts = eroded_mask & pts
|
||||
# Get the magnitudes shifted left to make a matrix of the points to the
|
||||
# right of pts. Similarly, shift left and down to get the points to the
|
||||
# top right of pts.
|
||||
c1 = magnitude[1:, :][pts[:-1, :]]
|
||||
c2 = magnitude[1:, 1:][pts[:-1, :-1]]
|
||||
m = magnitude[pts]
|
||||
w = abs_jsobel[pts] / abs_isobel[pts]
|
||||
c_plus = c2 * w + c1 * (1 - w) <= m
|
||||
c1 = magnitude[:-1, :][pts[1:, :]]
|
||||
c2 = magnitude[:-1, :-1][pts[1:, 1:]]
|
||||
c_minus = c2 * w + c1 * (1 - w) <= m
|
||||
local_maxima[pts] = c_plus & c_minus
|
||||
#----- 45 to 90 degrees ------
|
||||
# Mix diagonal and vertical
|
||||
#
|
||||
pts_plus = (isobel >= 0) & (jsobel >= 0) & (abs_isobel <= abs_jsobel)
|
||||
pts_minus = (isobel <= 0) & (jsobel <= 0) & (abs_isobel <= abs_jsobel)
|
||||
pts = pts_plus | pts_minus
|
||||
pts = eroded_mask & pts
|
||||
c1 = magnitude[:, 1:][pts[:, :-1]]
|
||||
c2 = magnitude[1:, 1:][pts[:-1, :-1]]
|
||||
m = magnitude[pts]
|
||||
w = abs_isobel[pts] / abs_jsobel[pts]
|
||||
c_plus = c2 * w + c1 * (1 - w) <= m
|
||||
c1 = magnitude[:, :-1][pts[:, 1:]]
|
||||
c2 = magnitude[:-1, :-1][pts[1:, 1:]]
|
||||
c_minus = c2 * w + c1 * (1 - w) <= m
|
||||
local_maxima[pts] = c_plus & c_minus
|
||||
#----- 90 to 135 degrees ------
|
||||
# Mix anti-diagonal and vertical
|
||||
#
|
||||
pts_plus = (isobel <= 0) & (jsobel >= 0) & (abs_isobel <= abs_jsobel)
|
||||
pts_minus = (isobel >= 0) & (jsobel <= 0) & (abs_isobel <= abs_jsobel)
|
||||
pts = pts_plus | pts_minus
|
||||
pts = eroded_mask & pts
|
||||
c1a = magnitude[:, 1:][pts[:, :-1]]
|
||||
c2a = magnitude[:-1, 1:][pts[1:, :-1]]
|
||||
m = magnitude[pts]
|
||||
w = abs_isobel[pts] / abs_jsobel[pts]
|
||||
c_plus = c2a * w + c1a * (1.0 - w) <= m
|
||||
c1 = magnitude[:, :-1][pts[:, 1:]]
|
||||
c2 = magnitude[1:, :-1][pts[:-1, 1:]]
|
||||
c_minus = c2 * w + c1 * (1.0 - w) <= m
|
||||
local_maxima[pts] = c_plus & c_minus
|
||||
#----- 135 to 180 degrees ------
|
||||
# Mix anti-diagonal and anti-horizontal
|
||||
#
|
||||
pts_plus = (isobel <= 0) & (jsobel >= 0) & (abs_isobel >= abs_jsobel)
|
||||
pts_minus = (isobel >= 0) & (jsobel <= 0) & (abs_isobel >= abs_jsobel)
|
||||
pts = pts_plus | pts_minus
|
||||
pts = eroded_mask & pts
|
||||
c1 = magnitude[:-1, :][pts[1:, :]]
|
||||
c2 = magnitude[:-1, 1:][pts[1:, :-1]]
|
||||
m = magnitude[pts]
|
||||
w = abs_jsobel[pts] / abs_isobel[pts]
|
||||
c_plus = c2 * w + c1 * (1 - w) <= m
|
||||
c1 = magnitude[1:, :][pts[:-1, :]]
|
||||
c2 = magnitude[1:, :-1][pts[:-1, 1:]]
|
||||
c_minus = c2 * w + c1 * (1 - w) <= m
|
||||
local_maxima[pts] = c_plus & c_minus
|
||||
|
||||
#
|
||||
#---- If use_quantiles is set then calculate the thresholds to use
|
||||
#
|
||||
if use_quantiles:
|
||||
high_threshold = np.percentile(magnitude, 100.0 * high_threshold)
|
||||
low_threshold = np.percentile(magnitude, 100.0 * low_threshold)
|
||||
|
||||
#
|
||||
#---- Create two masks at the two thresholds.
|
||||
#
|
||||
high_mask = local_maxima & (magnitude >= high_threshold)
|
||||
low_mask = local_maxima & (magnitude >= low_threshold)
|
||||
|
||||
#
|
||||
# Segment the low-mask, then only keep low-segments that have
|
||||
# some high_mask component in them
|
||||
#
|
||||
strel = np.ones((3, 3), bool)
|
||||
labels, count = label(low_mask, strel)
|
||||
if count == 0:
|
||||
return low_mask
|
||||
|
||||
sums = (np.array(ndi.sum(high_mask, labels,
|
||||
np.arange(count, dtype=np.int32) + 1),
|
||||
copy=False, ndmin=1))
|
||||
good_label = np.zeros((count + 1,), bool)
|
||||
good_label[1:] = sums > 0
|
||||
output_mask = good_label[labels]
|
||||
return output_mask
|
BIN
venv/Lib/site-packages/skimage/feature/_cascade.cp36-win32.pyd
Normal file
BIN
venv/Lib/site-packages/skimage/feature/_cascade.cp36-win32.pyd
Normal file
Binary file not shown.
222
venv/Lib/site-packages/skimage/feature/_daisy.py
Normal file
222
venv/Lib/site-packages/skimage/feature/_daisy.py
Normal file
|
@ -0,0 +1,222 @@
|
|||
import numpy as np
|
||||
from numpy import sqrt, pi, arctan2, cos, sin, exp
|
||||
from scipy.ndimage import gaussian_filter
|
||||
from .. import img_as_float, draw
|
||||
from ..color import gray2rgb
|
||||
from .._shared.utils import check_nD
|
||||
|
||||
|
||||
def daisy(image, step=4, radius=15, rings=3, histograms=8, orientations=8,
|
||||
normalization='l1', sigmas=None, ring_radii=None, visualize=False):
|
||||
'''Extract DAISY feature descriptors densely for the given image.
|
||||
|
||||
DAISY is a feature descriptor similar to SIFT formulated in a way that
|
||||
allows for fast dense extraction. Typically, this is practical for
|
||||
bag-of-features image representations.
|
||||
|
||||
The implementation follows Tola et al. [1]_ but deviate on the following
|
||||
points:
|
||||
|
||||
* Histogram bin contribution are smoothed with a circular Gaussian
|
||||
window over the tonal range (the angular range).
|
||||
* The sigma values of the spatial Gaussian smoothing in this code do not
|
||||
match the sigma values in the original code by Tola et al. [2]_. In
|
||||
their code, spatial smoothing is applied to both the input image and
|
||||
the center histogram. However, this smoothing is not documented in [1]_
|
||||
and, therefore, it is omitted.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : (M, N) array
|
||||
Input image (grayscale).
|
||||
step : int, optional
|
||||
Distance between descriptor sampling points.
|
||||
radius : int, optional
|
||||
Radius (in pixels) of the outermost ring.
|
||||
rings : int, optional
|
||||
Number of rings.
|
||||
histograms : int, optional
|
||||
Number of histograms sampled per ring.
|
||||
orientations : int, optional
|
||||
Number of orientations (bins) per histogram.
|
||||
normalization : [ 'l1' | 'l2' | 'daisy' | 'off' ], optional
|
||||
How to normalize the descriptors
|
||||
|
||||
* 'l1': L1-normalization of each descriptor.
|
||||
* 'l2': L2-normalization of each descriptor.
|
||||
* 'daisy': L2-normalization of individual histograms.
|
||||
* 'off': Disable normalization.
|
||||
|
||||
sigmas : 1D array of float, optional
|
||||
Standard deviation of spatial Gaussian smoothing for the center
|
||||
histogram and for each ring of histograms. The array of sigmas should
|
||||
be sorted from the center and out. I.e. the first sigma value defines
|
||||
the spatial smoothing of the center histogram and the last sigma value
|
||||
defines the spatial smoothing of the outermost ring. Specifying sigmas
|
||||
overrides the following parameter.
|
||||
|
||||
``rings = len(sigmas) - 1``
|
||||
|
||||
ring_radii : 1D array of int, optional
|
||||
Radius (in pixels) for each ring. Specifying ring_radii overrides the
|
||||
following two parameters.
|
||||
|
||||
``rings = len(ring_radii)``
|
||||
``radius = ring_radii[-1]``
|
||||
|
||||
If both sigmas and ring_radii are given, they must satisfy the
|
||||
following predicate since no radius is needed for the center
|
||||
histogram.
|
||||
|
||||
``len(ring_radii) == len(sigmas) + 1``
|
||||
|
||||
visualize : bool, optional
|
||||
Generate a visualization of the DAISY descriptors
|
||||
|
||||
Returns
|
||||
-------
|
||||
descs : array
|
||||
Grid of DAISY descriptors for the given image as an array
|
||||
dimensionality (P, Q, R) where
|
||||
|
||||
``P = ceil((M - radius*2) / step)``
|
||||
``Q = ceil((N - radius*2) / step)``
|
||||
``R = (rings * histograms + 1) * orientations``
|
||||
|
||||
descs_img : (M, N, 3) array (only if visualize==True)
|
||||
Visualization of the DAISY descriptors.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Tola et al. "Daisy: An efficient dense descriptor applied to wide-
|
||||
baseline stereo." Pattern Analysis and Machine Intelligence, IEEE
|
||||
Transactions on 32.5 (2010): 815-830.
|
||||
.. [2] http://cvlab.epfl.ch/software/daisy
|
||||
'''
|
||||
|
||||
check_nD(image, 2, 'img')
|
||||
|
||||
image = img_as_float(image)
|
||||
|
||||
# Validate parameters.
|
||||
if sigmas is not None and ring_radii is not None \
|
||||
and len(sigmas) - 1 != len(ring_radii):
|
||||
raise ValueError('`len(sigmas)-1 != len(ring_radii)`')
|
||||
if ring_radii is not None:
|
||||
rings = len(ring_radii)
|
||||
radius = ring_radii[-1]
|
||||
if sigmas is not None:
|
||||
rings = len(sigmas) - 1
|
||||
if sigmas is None:
|
||||
sigmas = [radius * (i + 1) / float(2 * rings) for i in range(rings)]
|
||||
if ring_radii is None:
|
||||
ring_radii = [radius * (i + 1) / float(rings) for i in range(rings)]
|
||||
if normalization not in ['l1', 'l2', 'daisy', 'off']:
|
||||
raise ValueError('Invalid normalization method.')
|
||||
|
||||
# Compute image derivatives.
|
||||
dx = np.zeros(image.shape)
|
||||
dy = np.zeros(image.shape)
|
||||
dx[:, :-1] = np.diff(image, n=1, axis=1)
|
||||
dy[:-1, :] = np.diff(image, n=1, axis=0)
|
||||
|
||||
# Compute gradient orientation and magnitude and their contribution
|
||||
# to the histograms.
|
||||
grad_mag = sqrt(dx ** 2 + dy ** 2)
|
||||
grad_ori = arctan2(dy, dx)
|
||||
orientation_kappa = orientations / pi
|
||||
orientation_angles = [2 * o * pi / orientations - pi
|
||||
for o in range(orientations)]
|
||||
hist = np.empty((orientations,) + image.shape, dtype=float)
|
||||
for i, o in enumerate(orientation_angles):
|
||||
# Weigh bin contribution by the circular normal distribution
|
||||
hist[i, :, :] = exp(orientation_kappa * cos(grad_ori - o))
|
||||
# Weigh bin contribution by the gradient magnitude
|
||||
hist[i, :, :] = np.multiply(hist[i, :, :], grad_mag)
|
||||
|
||||
# Smooth orientation histograms for the center and all rings.
|
||||
sigmas = [sigmas[0]] + sigmas
|
||||
hist_smooth = np.empty((rings + 1,) + hist.shape, dtype=float)
|
||||
for i in range(rings + 1):
|
||||
for j in range(orientations):
|
||||
hist_smooth[i, j, :, :] = gaussian_filter(hist[j, :, :],
|
||||
sigma=sigmas[i])
|
||||
|
||||
# Assemble descriptor grid.
|
||||
theta = [2 * pi * j / histograms for j in range(histograms)]
|
||||
desc_dims = (rings * histograms + 1) * orientations
|
||||
descs = np.empty((desc_dims, image.shape[0] - 2 * radius,
|
||||
image.shape[1] - 2 * radius))
|
||||
descs[:orientations, :, :] = hist_smooth[0, :, radius:-radius,
|
||||
radius:-radius]
|
||||
idx = orientations
|
||||
for i in range(rings):
|
||||
for j in range(histograms):
|
||||
y_min = radius + int(round(ring_radii[i] * sin(theta[j])))
|
||||
y_max = descs.shape[1] + y_min
|
||||
x_min = radius + int(round(ring_radii[i] * cos(theta[j])))
|
||||
x_max = descs.shape[2] + x_min
|
||||
descs[idx:idx + orientations, :, :] = hist_smooth[i + 1, :,
|
||||
y_min:y_max,
|
||||
x_min:x_max]
|
||||
idx += orientations
|
||||
descs = descs[:, ::step, ::step]
|
||||
descs = descs.swapaxes(0, 1).swapaxes(1, 2)
|
||||
|
||||
# Normalize descriptors.
|
||||
if normalization != 'off':
|
||||
descs += 1e-10
|
||||
if normalization == 'l1':
|
||||
descs /= np.sum(descs, axis=2)[:, :, np.newaxis]
|
||||
elif normalization == 'l2':
|
||||
descs /= sqrt(np.sum(descs ** 2, axis=2))[:, :, np.newaxis]
|
||||
elif normalization == 'daisy':
|
||||
for i in range(0, desc_dims, orientations):
|
||||
norms = sqrt(np.sum(descs[:, :, i:i + orientations] ** 2,
|
||||
axis=2))
|
||||
descs[:, :, i:i + orientations] /= norms[:, :, np.newaxis]
|
||||
|
||||
if visualize:
|
||||
descs_img = gray2rgb(image)
|
||||
for i in range(descs.shape[0]):
|
||||
for j in range(descs.shape[1]):
|
||||
# Draw center histogram sigma
|
||||
color = [1, 0, 0]
|
||||
desc_y = i * step + radius
|
||||
desc_x = j * step + radius
|
||||
rows, cols, val = draw.circle_perimeter_aa(desc_y, desc_x, int(sigmas[0]))
|
||||
draw.set_color(descs_img, (rows, cols), color, alpha=val)
|
||||
max_bin = np.max(descs[i, j, :])
|
||||
for o_num, o in enumerate(orientation_angles):
|
||||
# Draw center histogram bins
|
||||
bin_size = descs[i, j, o_num] / max_bin
|
||||
dy = sigmas[0] * bin_size * sin(o)
|
||||
dx = sigmas[0] * bin_size * cos(o)
|
||||
rows, cols, val = draw.line_aa(desc_y, desc_x, int(desc_y + dy),
|
||||
int(desc_x + dx))
|
||||
draw.set_color(descs_img, (rows, cols), color, alpha=val)
|
||||
for r_num, r in enumerate(ring_radii):
|
||||
color_offset = float(1 + r_num) / rings
|
||||
color = (1 - color_offset, 1, color_offset)
|
||||
for t_num, t in enumerate(theta):
|
||||
# Draw ring histogram sigmas
|
||||
hist_y = desc_y + int(round(r * sin(t)))
|
||||
hist_x = desc_x + int(round(r * cos(t)))
|
||||
rows, cols, val = draw.circle_perimeter_aa(hist_y, hist_x,
|
||||
int(sigmas[r_num + 1]))
|
||||
draw.set_color(descs_img, (rows, cols), color, alpha=val)
|
||||
for o_num, o in enumerate(orientation_angles):
|
||||
# Draw histogram bins
|
||||
bin_size = descs[i, j, orientations + r_num *
|
||||
histograms * orientations +
|
||||
t_num * orientations + o_num]
|
||||
bin_size /= max_bin
|
||||
dy = sigmas[r_num + 1] * bin_size * sin(o)
|
||||
dx = sigmas[r_num + 1] * bin_size * cos(o)
|
||||
rows, cols, val = draw.line_aa(hist_y, hist_x,
|
||||
int(hist_y + dy),
|
||||
int(hist_x + dx))
|
||||
draw.set_color(descs_img, (rows, cols), color, alpha=val)
|
||||
return descs, descs_img
|
||||
else:
|
||||
return descs
|
BIN
venv/Lib/site-packages/skimage/feature/_haar.cp36-win32.pyd
Normal file
BIN
venv/Lib/site-packages/skimage/feature/_haar.cp36-win32.pyd
Normal file
Binary file not shown.
Binary file not shown.
295
venv/Lib/site-packages/skimage/feature/_hog.py
Normal file
295
venv/Lib/site-packages/skimage/feature/_hog.py
Normal file
|
@ -0,0 +1,295 @@
|
|||
import numpy as np
|
||||
from . import _hoghistogram
|
||||
|
||||
|
||||
def _hog_normalize_block(block, method, eps=1e-5):
|
||||
if method == 'L1':
|
||||
out = block / (np.sum(np.abs(block)) + eps)
|
||||
elif method == 'L1-sqrt':
|
||||
out = np.sqrt(block / (np.sum(np.abs(block)) + eps))
|
||||
elif method == 'L2':
|
||||
out = block / np.sqrt(np.sum(block ** 2) + eps ** 2)
|
||||
elif method == 'L2-Hys':
|
||||
out = block / np.sqrt(np.sum(block ** 2) + eps ** 2)
|
||||
out = np.minimum(out, 0.2)
|
||||
out = out / np.sqrt(np.sum(out ** 2) + eps ** 2)
|
||||
else:
|
||||
raise ValueError('Selected block normalization method is invalid.')
|
||||
|
||||
return out
|
||||
|
||||
|
||||
def _hog_channel_gradient(channel):
|
||||
"""Compute unnormalized gradient image along `row` and `col` axes.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
channel : (M, N) ndarray
|
||||
Grayscale image or one of image channel.
|
||||
|
||||
Returns
|
||||
-------
|
||||
g_row, g_col : channel gradient along `row` and `col` axes correspondingly.
|
||||
"""
|
||||
g_row = np.empty(channel.shape, dtype=np.double)
|
||||
g_row[0, :] = 0
|
||||
g_row[-1, :] = 0
|
||||
g_row[1:-1, :] = channel[2:, :] - channel[:-2, :]
|
||||
g_col = np.empty(channel.shape, dtype=np.double)
|
||||
g_col[:, 0] = 0
|
||||
g_col[:, -1] = 0
|
||||
g_col[:, 1:-1] = channel[:, 2:] - channel[:, :-2]
|
||||
|
||||
return g_row, g_col
|
||||
|
||||
|
||||
def hog(image, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(3, 3),
|
||||
block_norm='L2-Hys', visualize=False, transform_sqrt=False,
|
||||
feature_vector=True, multichannel=None):
|
||||
"""Extract Histogram of Oriented Gradients (HOG) for a given image.
|
||||
|
||||
Compute a Histogram of Oriented Gradients (HOG) by
|
||||
|
||||
1. (optional) global image normalization
|
||||
2. computing the gradient image in `row` and `col`
|
||||
3. computing gradient histograms
|
||||
4. normalizing across blocks
|
||||
5. flattening into a feature vector
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : (M, N[, C]) ndarray
|
||||
Input image.
|
||||
orientations : int, optional
|
||||
Number of orientation bins.
|
||||
pixels_per_cell : 2-tuple (int, int), optional
|
||||
Size (in pixels) of a cell.
|
||||
cells_per_block : 2-tuple (int, int), optional
|
||||
Number of cells in each block.
|
||||
block_norm : str {'L1', 'L1-sqrt', 'L2', 'L2-Hys'}, optional
|
||||
Block normalization method:
|
||||
|
||||
``L1``
|
||||
Normalization using L1-norm.
|
||||
``L1-sqrt``
|
||||
Normalization using L1-norm, followed by square root.
|
||||
``L2``
|
||||
Normalization using L2-norm.
|
||||
``L2-Hys``
|
||||
Normalization using L2-norm, followed by limiting the
|
||||
maximum values to 0.2 (`Hys` stands for `hysteresis`) and
|
||||
renormalization using L2-norm. (default)
|
||||
For details, see [3]_, [4]_.
|
||||
|
||||
visualize : bool, optional
|
||||
Also return an image of the HOG. For each cell and orientation bin,
|
||||
the image contains a line segment that is centered at the cell center,
|
||||
is perpendicular to the midpoint of the range of angles spanned by the
|
||||
orientation bin, and has intensity proportional to the corresponding
|
||||
histogram value.
|
||||
transform_sqrt : bool, optional
|
||||
Apply power law compression to normalize the image before
|
||||
processing. DO NOT use this if the image contains negative
|
||||
values. Also see `notes` section below.
|
||||
feature_vector : bool, optional
|
||||
Return the data as a feature vector by calling .ravel() on the result
|
||||
just before returning.
|
||||
multichannel : boolean, optional
|
||||
If True, the last `image` dimension is considered as a color channel,
|
||||
otherwise as spatial.
|
||||
|
||||
Returns
|
||||
-------
|
||||
out : (n_blocks_row, n_blocks_col, n_cells_row, n_cells_col, n_orient) ndarray
|
||||
HOG descriptor for the image. If `feature_vector` is True, a 1D
|
||||
(flattened) array is returned.
|
||||
hog_image : (M, N) ndarray, optional
|
||||
A visualisation of the HOG image. Only provided if `visualize` is True.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] https://en.wikipedia.org/wiki/Histogram_of_oriented_gradients
|
||||
|
||||
.. [2] Dalal, N and Triggs, B, Histograms of Oriented Gradients for
|
||||
Human Detection, IEEE Computer Society Conference on Computer
|
||||
Vision and Pattern Recognition 2005 San Diego, CA, USA,
|
||||
https://lear.inrialpes.fr/people/triggs/pubs/Dalal-cvpr05.pdf,
|
||||
:DOI:`10.1109/CVPR.2005.177`
|
||||
|
||||
.. [3] Lowe, D.G., Distinctive image features from scale-invatiant
|
||||
keypoints, International Journal of Computer Vision (2004) 60: 91,
|
||||
http://www.cs.ubc.ca/~lowe/papers/ijcv04.pdf,
|
||||
:DOI:`10.1023/B:VISI.0000029664.99615.94`
|
||||
|
||||
.. [4] Dalal, N, Finding People in Images and Videos,
|
||||
Human-Computer Interaction [cs.HC], Institut National Polytechnique
|
||||
de Grenoble - INPG, 2006,
|
||||
https://tel.archives-ouvertes.fr/tel-00390303/file/NavneetDalalThesis.pdf
|
||||
|
||||
Notes
|
||||
-----
|
||||
The presented code implements the HOG extraction method from [2]_ with
|
||||
the following changes: (I) blocks of (3, 3) cells are used ((2, 2) in the
|
||||
paper); (II) no smoothing within cells (Gaussian spatial window with sigma=8pix
|
||||
in the paper); (III) L1 block normalization is used (L2-Hys in the paper).
|
||||
|
||||
Power law compression, also known as Gamma correction, is used to reduce
|
||||
the effects of shadowing and illumination variations. The compression makes
|
||||
the dark regions lighter. When the kwarg `transform_sqrt` is set to
|
||||
``True``, the function computes the square root of each color channel
|
||||
and then applies the hog algorithm to the image.
|
||||
"""
|
||||
image = np.atleast_2d(image)
|
||||
|
||||
if multichannel is None:
|
||||
multichannel = (image.ndim == 3)
|
||||
|
||||
ndim_spatial = image.ndim - 1 if multichannel else image.ndim
|
||||
if ndim_spatial != 2:
|
||||
raise ValueError('Only images with 2 spatial dimensions are '
|
||||
'supported. If using with color/multichannel '
|
||||
'images, specify `multichannel=True`.')
|
||||
|
||||
"""
|
||||
The first stage applies an optional global image normalization
|
||||
equalisation that is designed to reduce the influence of illumination
|
||||
effects. In practice we use gamma (power law) compression, either
|
||||
computing the square root or the log of each color channel.
|
||||
Image texture strength is typically proportional to the local surface
|
||||
illumination so this compression helps to reduce the effects of local
|
||||
shadowing and illumination variations.
|
||||
"""
|
||||
|
||||
if transform_sqrt:
|
||||
image = np.sqrt(image)
|
||||
|
||||
"""
|
||||
The second stage computes first order image gradients. These capture
|
||||
contour, silhouette and some texture information, while providing
|
||||
further resistance to illumination variations. The locally dominant
|
||||
color channel is used, which provides color invariance to a large
|
||||
extent. Variant methods may also include second order image derivatives,
|
||||
which act as primitive bar detectors - a useful feature for capturing,
|
||||
e.g. bar like structures in bicycles and limbs in humans.
|
||||
"""
|
||||
|
||||
if image.dtype.kind == 'u':
|
||||
# convert uint image to float
|
||||
# to avoid problems with subtracting unsigned numbers
|
||||
image = image.astype('float')
|
||||
|
||||
if multichannel:
|
||||
g_row_by_ch = np.empty_like(image, dtype=np.double)
|
||||
g_col_by_ch = np.empty_like(image, dtype=np.double)
|
||||
g_magn = np.empty_like(image, dtype=np.double)
|
||||
|
||||
for idx_ch in range(image.shape[2]):
|
||||
g_row_by_ch[:, :, idx_ch], g_col_by_ch[:, :, idx_ch] = \
|
||||
_hog_channel_gradient(image[:, :, idx_ch])
|
||||
g_magn[:, :, idx_ch] = np.hypot(g_row_by_ch[:, :, idx_ch],
|
||||
g_col_by_ch[:, :, idx_ch])
|
||||
|
||||
# For each pixel select the channel with the highest gradient magnitude
|
||||
idcs_max = g_magn.argmax(axis=2)
|
||||
rr, cc = np.meshgrid(np.arange(image.shape[0]),
|
||||
np.arange(image.shape[1]),
|
||||
indexing='ij',
|
||||
sparse=True)
|
||||
g_row = g_row_by_ch[rr, cc, idcs_max]
|
||||
g_col = g_col_by_ch[rr, cc, idcs_max]
|
||||
else:
|
||||
g_row, g_col = _hog_channel_gradient(image)
|
||||
|
||||
"""
|
||||
The third stage aims to produce an encoding that is sensitive to
|
||||
local image content while remaining resistant to small changes in
|
||||
pose or appearance. The adopted method pools gradient orientation
|
||||
information locally in the same way as the SIFT [Lowe 2004]
|
||||
feature. The image window is divided into small spatial regions,
|
||||
called "cells". For each cell we accumulate a local 1-D histogram
|
||||
of gradient or edge orientations over all the pixels in the
|
||||
cell. This combined cell-level 1-D histogram forms the basic
|
||||
"orientation histogram" representation. Each orientation histogram
|
||||
divides the gradient angle range into a fixed number of
|
||||
predetermined bins. The gradient magnitudes of the pixels in the
|
||||
cell are used to vote into the orientation histogram.
|
||||
"""
|
||||
|
||||
s_row, s_col = image.shape[:2]
|
||||
c_row, c_col = pixels_per_cell
|
||||
b_row, b_col = cells_per_block
|
||||
|
||||
n_cells_row = int(s_row // c_row) # number of cells along row-axis
|
||||
n_cells_col = int(s_col // c_col) # number of cells along col-axis
|
||||
|
||||
# compute orientations integral images
|
||||
orientation_histogram = np.zeros((n_cells_row, n_cells_col, orientations))
|
||||
|
||||
_hoghistogram.hog_histograms(g_col, g_row, c_col, c_row, s_col, s_row,
|
||||
n_cells_col, n_cells_row,
|
||||
orientations, orientation_histogram)
|
||||
|
||||
# now compute the histogram for each cell
|
||||
hog_image = None
|
||||
|
||||
if visualize:
|
||||
from .. import draw
|
||||
|
||||
radius = min(c_row, c_col) // 2 - 1
|
||||
orientations_arr = np.arange(orientations)
|
||||
# set dr_arr, dc_arr to correspond to midpoints of orientation bins
|
||||
orientation_bin_midpoints = (
|
||||
np.pi * (orientations_arr + .5) / orientations)
|
||||
dr_arr = radius * np.sin(orientation_bin_midpoints)
|
||||
dc_arr = radius * np.cos(orientation_bin_midpoints)
|
||||
hog_image = np.zeros((s_row, s_col), dtype=float)
|
||||
for r in range(n_cells_row):
|
||||
for c in range(n_cells_col):
|
||||
for o, dr, dc in zip(orientations_arr, dr_arr, dc_arr):
|
||||
centre = tuple([r * c_row + c_row // 2,
|
||||
c * c_col + c_col // 2])
|
||||
rr, cc = draw.line(int(centre[0] - dc),
|
||||
int(centre[1] + dr),
|
||||
int(centre[0] + dc),
|
||||
int(centre[1] - dr))
|
||||
hog_image[rr, cc] += orientation_histogram[r, c, o]
|
||||
|
||||
"""
|
||||
The fourth stage computes normalization, which takes local groups of
|
||||
cells and contrast normalizes their overall responses before passing
|
||||
to next stage. Normalization introduces better invariance to illumination,
|
||||
shadowing, and edge contrast. It is performed by accumulating a measure
|
||||
of local histogram "energy" over local groups of cells that we call
|
||||
"blocks". The result is used to normalize each cell in the block.
|
||||
Typically each individual cell is shared between several blocks, but
|
||||
its normalizations are block dependent and thus different. The cell
|
||||
thus appears several times in the final output vector with different
|
||||
normalizations. This may seem redundant but it improves the performance.
|
||||
We refer to the normalized block descriptors as Histogram of Oriented
|
||||
Gradient (HOG) descriptors.
|
||||
"""
|
||||
|
||||
n_blocks_row = (n_cells_row - b_row) + 1
|
||||
n_blocks_col = (n_cells_col - b_col) + 1
|
||||
normalized_blocks = np.zeros((n_blocks_row, n_blocks_col,
|
||||
b_row, b_col, orientations))
|
||||
|
||||
for r in range(n_blocks_row):
|
||||
for c in range(n_blocks_col):
|
||||
block = orientation_histogram[r:r + b_row, c:c + b_col, :]
|
||||
normalized_blocks[r, c, :] = \
|
||||
_hog_normalize_block(block, method=block_norm)
|
||||
|
||||
"""
|
||||
The final step collects the HOG descriptors from all blocks of a dense
|
||||
overlapping grid of blocks covering the detection window into a combined
|
||||
feature vector for use in the window classifier.
|
||||
"""
|
||||
|
||||
if feature_vector:
|
||||
normalized_blocks = normalized_blocks.ravel()
|
||||
|
||||
if visualize:
|
||||
return normalized_blocks, hog_image
|
||||
else:
|
||||
return normalized_blocks
|
Binary file not shown.
|
@ -0,0 +1,11 @@
|
|||
import os
|
||||
import numpy as np
|
||||
|
||||
# Putting this in cython was giving strange bugs for different versions
|
||||
# of cython which seemed to indicate troubles with the __file__ variable
|
||||
# not being defined. Keeping it in pure python makes it more reliable
|
||||
this_dir = os.path.dirname(__file__)
|
||||
POS = np.loadtxt(os.path.join(this_dir, "orb_descriptor_positions.txt"),
|
||||
dtype=np.int8)
|
||||
POS0 = np.ascontiguousarray(POS[:, :2])
|
||||
POS1 = np.ascontiguousarray(POS[:, 2:])
|
BIN
venv/Lib/site-packages/skimage/feature/_texture.cp36-win32.pyd
Normal file
BIN
venv/Lib/site-packages/skimage/feature/_texture.cp36-win32.pyd
Normal file
Binary file not shown.
644
venv/Lib/site-packages/skimage/feature/blob.py
Normal file
644
venv/Lib/site-packages/skimage/feature/blob.py
Normal file
|
@ -0,0 +1,644 @@
|
|||
import numpy as np
|
||||
from scipy.ndimage import gaussian_filter, gaussian_laplace
|
||||
import math
|
||||
from math import sqrt, log
|
||||
from scipy import spatial
|
||||
from ..util import img_as_float
|
||||
from .peak import peak_local_max
|
||||
from ._hessian_det_appx import _hessian_matrix_det
|
||||
from ..transform import integral_image
|
||||
from .._shared.utils import check_nD
|
||||
|
||||
|
||||
# This basic blob detection algorithm is based on:
|
||||
# http://www.cs.utah.edu/~jfishbau/advimproc/project1/ (04.04.2013)
|
||||
# Theory behind: https://en.wikipedia.org/wiki/Blob_detection (04.04.2013)
|
||||
|
||||
|
||||
def _compute_disk_overlap(d, r1, r2):
|
||||
"""
|
||||
Compute fraction of surface overlap between two disks of radii
|
||||
``r1`` and ``r2``, with centers separated by a distance ``d``.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
d : float
|
||||
Distance between centers.
|
||||
r1 : float
|
||||
Radius of the first disk.
|
||||
r2 : float
|
||||
Radius of the second disk.
|
||||
|
||||
Returns
|
||||
-------
|
||||
fraction: float
|
||||
Fraction of area of the overlap between the two disks.
|
||||
"""
|
||||
|
||||
ratio1 = (d ** 2 + r1 ** 2 - r2 ** 2) / (2 * d * r1)
|
||||
ratio1 = np.clip(ratio1, -1, 1)
|
||||
acos1 = math.acos(ratio1)
|
||||
|
||||
ratio2 = (d ** 2 + r2 ** 2 - r1 ** 2) / (2 * d * r2)
|
||||
ratio2 = np.clip(ratio2, -1, 1)
|
||||
acos2 = math.acos(ratio2)
|
||||
|
||||
a = -d + r2 + r1
|
||||
b = d - r2 + r1
|
||||
c = d + r2 - r1
|
||||
d = d + r2 + r1
|
||||
area = (r1 ** 2 * acos1 + r2 ** 2 * acos2 -
|
||||
0.5 * sqrt(abs(a * b * c * d)))
|
||||
return area / (math.pi * (min(r1, r2) ** 2))
|
||||
|
||||
|
||||
def _compute_sphere_overlap(d, r1, r2):
|
||||
"""
|
||||
Compute volume overlap fraction between two spheres of radii
|
||||
``r1`` and ``r2``, with centers separated by a distance ``d``.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
d : float
|
||||
Distance between centers.
|
||||
r1 : float
|
||||
Radius of the first sphere.
|
||||
r2 : float
|
||||
Radius of the second sphere.
|
||||
|
||||
Returns
|
||||
-------
|
||||
fraction: float
|
||||
Fraction of volume of the overlap between the two spheres.
|
||||
|
||||
Notes
|
||||
-----
|
||||
See for example http://mathworld.wolfram.com/Sphere-SphereIntersection.html
|
||||
for more details.
|
||||
"""
|
||||
vol = (math.pi / (12 * d) * (r1 + r2 - d)**2 *
|
||||
(d**2 + 2 * d * (r1 + r2) - 3 * (r1**2 + r2**2) + 6 * r1 * r2))
|
||||
return vol / (4./3 * math.pi * min(r1, r2) ** 3)
|
||||
|
||||
|
||||
def _blob_overlap(blob1, blob2, *, sigma_dim=1):
|
||||
"""Finds the overlapping area fraction between two blobs.
|
||||
|
||||
Returns a float representing fraction of overlapped area. Note that 0.0
|
||||
is *always* returned for dimension greater than 3.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
blob1 : sequence of arrays
|
||||
A sequence of ``(row, col, sigma)`` or ``(pln, row, col, sigma)``,
|
||||
where ``row, col`` (or ``(pln, row, col)``) are coordinates
|
||||
of blob and ``sigma`` is the standard deviation of the Gaussian kernel
|
||||
which detected the blob.
|
||||
blob2 : sequence of arrays
|
||||
A sequence of ``(row, col, sigma)`` or ``(pln, row, col, sigma)``,
|
||||
where ``row, col`` (or ``(pln, row, col)``) are coordinates
|
||||
of blob and ``sigma`` is the standard deviation of the Gaussian kernel
|
||||
which detected the blob.
|
||||
sigma_dim : int, optional
|
||||
The dimensionality of the sigma value. Can be 1 or the same as the
|
||||
dimensionality of the blob space (2 or 3).
|
||||
|
||||
Returns
|
||||
-------
|
||||
f : float
|
||||
Fraction of overlapped area (or volume in 3D).
|
||||
"""
|
||||
ndim = len(blob1) - sigma_dim
|
||||
if ndim > 3:
|
||||
return 0.0
|
||||
root_ndim = sqrt(ndim)
|
||||
|
||||
# we divide coordinates by sigma * sqrt(ndim) to rescale space to isotropy,
|
||||
# giving spheres of radius = 1 or < 1.
|
||||
if blob1[-1] > blob2[-1]:
|
||||
max_sigma = blob1[-sigma_dim:]
|
||||
r1 = 1
|
||||
r2 = blob2[-1] / blob1[-1]
|
||||
else:
|
||||
max_sigma = blob2[-sigma_dim:]
|
||||
r2 = 1
|
||||
r1 = blob1[-1] / blob2[-1]
|
||||
pos1 = blob1[:ndim] / (max_sigma * root_ndim)
|
||||
pos2 = blob2[:ndim] / (max_sigma * root_ndim)
|
||||
|
||||
d = np.sqrt(np.sum((pos2 - pos1)**2))
|
||||
if d > r1 + r2: # centers farther than sum of radii, so no overlap
|
||||
return 0.0
|
||||
|
||||
# one blob is inside the other
|
||||
if d <= abs(r1 - r2):
|
||||
return 1.0
|
||||
|
||||
if ndim == 2:
|
||||
return _compute_disk_overlap(d, r1, r2)
|
||||
|
||||
else: # ndim=3 http://mathworld.wolfram.com/Sphere-SphereIntersection.html
|
||||
return _compute_sphere_overlap(d, r1, r2)
|
||||
|
||||
|
||||
def _prune_blobs(blobs_array, overlap, *, sigma_dim=1):
|
||||
"""Eliminated blobs with area overlap.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
blobs_array : ndarray
|
||||
A 2d array with each row representing 3 (or 4) values,
|
||||
``(row, col, sigma)`` or ``(pln, row, col, sigma)`` in 3D,
|
||||
where ``(row, col)`` (``(pln, row, col)``) are coordinates of the blob
|
||||
and ``sigma`` is the standard deviation of the Gaussian kernel which
|
||||
detected the blob.
|
||||
This array must not have a dimension of size 0.
|
||||
overlap : float
|
||||
A value between 0 and 1. If the fraction of area overlapping for 2
|
||||
blobs is greater than `overlap` the smaller blob is eliminated.
|
||||
sigma_dim : int, optional
|
||||
The number of columns in ``blobs_array`` corresponding to sigmas rather
|
||||
than positions.
|
||||
|
||||
Returns
|
||||
-------
|
||||
A : ndarray
|
||||
`array` with overlapping blobs removed.
|
||||
"""
|
||||
sigma = blobs_array[:, -sigma_dim:].max()
|
||||
distance = 2 * sigma * sqrt(blobs_array.shape[1] - sigma_dim)
|
||||
tree = spatial.cKDTree(blobs_array[:, :-sigma_dim])
|
||||
pairs = np.array(list(tree.query_pairs(distance)))
|
||||
if len(pairs) == 0:
|
||||
return blobs_array
|
||||
else:
|
||||
for (i, j) in pairs:
|
||||
blob1, blob2 = blobs_array[i], blobs_array[j]
|
||||
if _blob_overlap(blob1, blob2, sigma_dim=sigma_dim) > overlap:
|
||||
# note: this test works even in the anisotropic case because
|
||||
# all sigmas increase together.
|
||||
if blob1[-1] > blob2[-1]:
|
||||
blob2[-1] = 0
|
||||
else:
|
||||
blob1[-1] = 0
|
||||
|
||||
return np.array([b for b in blobs_array if b[-1] > 0])
|
||||
|
||||
|
||||
def _format_exclude_border(img_ndim, exclude_border):
|
||||
"""Format an ``exclude_border`` argument as a tuple of ints for calling
|
||||
``peak_local_max``.
|
||||
"""
|
||||
if isinstance(exclude_border, tuple):
|
||||
if len(exclude_border) != img_ndim:
|
||||
raise ValueError(
|
||||
"`exclude_border` should have the same length as the "
|
||||
"dimensionality of the image.")
|
||||
for exclude in exclude_border:
|
||||
if not isinstance(exclude, int):
|
||||
raise ValueError(
|
||||
"exclude border, when expressed as a tuple, must only "
|
||||
"contain ints.")
|
||||
return exclude_border
|
||||
elif isinstance(exclude_border, int):
|
||||
return (exclude_border,) * img_ndim + (0,)
|
||||
elif exclude_border is True:
|
||||
raise ValueError("exclude_border cannot be True")
|
||||
elif exclude_border is False:
|
||||
return (0,) * (img_ndim + 1)
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Unsupported value ({exclude_border}) for exclude_border"
|
||||
)
|
||||
|
||||
|
||||
def blob_dog(image, min_sigma=1, max_sigma=50, sigma_ratio=1.6, threshold=2.0,
|
||||
overlap=.5, *, exclude_border=False):
|
||||
r"""Finds blobs in the given grayscale image.
|
||||
|
||||
Blobs are found using the Difference of Gaussian (DoG) method [1]_.
|
||||
For each blob found, the method returns its coordinates and the standard
|
||||
deviation of the Gaussian kernel that detected the blob.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : 2D or 3D ndarray
|
||||
Input grayscale image, blobs are assumed to be light on dark
|
||||
background (white on black).
|
||||
min_sigma : scalar or sequence of scalars, optional
|
||||
The minimum standard deviation for Gaussian kernel. Keep this low to
|
||||
detect smaller blobs. The standard deviations of the Gaussian filter
|
||||
are given for each axis as a sequence, or as a single number, in
|
||||
which case it is equal for all axes.
|
||||
max_sigma : scalar or sequence of scalars, optional
|
||||
The maximum standard deviation for Gaussian kernel. Keep this high to
|
||||
detect larger blobs. The standard deviations of the Gaussian filter
|
||||
are given for each axis as a sequence, or as a single number, in
|
||||
which case it is equal for all axes.
|
||||
sigma_ratio : float, optional
|
||||
The ratio between the standard deviation of Gaussian Kernels used for
|
||||
computing the Difference of Gaussians
|
||||
threshold : float, optional.
|
||||
The absolute lower bound for scale space maxima. Local maxima smaller
|
||||
than thresh are ignored. Reduce this to detect blobs with less
|
||||
intensities.
|
||||
overlap : float, optional
|
||||
A value between 0 and 1. If the area of two blobs overlaps by a
|
||||
fraction greater than `threshold`, the smaller blob is eliminated.
|
||||
exclude_border : tuple of ints, int, or False, optional
|
||||
If tuple of ints, the length of the tuple must match the input array's
|
||||
dimensionality. Each element of the tuple will exclude peaks from
|
||||
within `exclude_border`-pixels of the border of the image along that
|
||||
dimension.
|
||||
If nonzero int, `exclude_border` excludes peaks from within
|
||||
`exclude_border`-pixels of the border of the image.
|
||||
If zero or False, peaks are identified regardless of their
|
||||
distance from the border.
|
||||
|
||||
Returns
|
||||
-------
|
||||
A : (n, image.ndim + sigma) ndarray
|
||||
A 2d array with each row representing 2 coordinate values for a 2D
|
||||
image, and 3 coordinate values for a 3D image, plus the sigma(s) used.
|
||||
When a single sigma is passed, outputs are:
|
||||
``(r, c, sigma)`` or ``(p, r, c, sigma)`` where ``(r, c)`` or
|
||||
``(p, r, c)`` are coordinates of the blob and ``sigma`` is the standard
|
||||
deviation of the Gaussian kernel which detected the blob. When an
|
||||
anisotropic gaussian is used (sigmas per dimension), the detected sigma
|
||||
is returned for each dimension.
|
||||
|
||||
See also
|
||||
--------
|
||||
skimage.filters.difference_of_gaussians
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] https://en.wikipedia.org/wiki/Blob_detection#The_difference_of_Gaussians_approach
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from skimage import data, feature
|
||||
>>> feature.blob_dog(data.coins(), threshold=.5, max_sigma=40)
|
||||
array([[120. , 272. , 16.777216],
|
||||
[193. , 213. , 16.777216],
|
||||
[263. , 245. , 16.777216],
|
||||
[185. , 347. , 16.777216],
|
||||
[128. , 154. , 10.48576 ],
|
||||
[198. , 155. , 10.48576 ],
|
||||
[124. , 337. , 10.48576 ],
|
||||
[ 45. , 336. , 16.777216],
|
||||
[195. , 102. , 16.777216],
|
||||
[125. , 45. , 16.777216],
|
||||
[261. , 173. , 16.777216],
|
||||
[194. , 277. , 16.777216],
|
||||
[127. , 102. , 10.48576 ],
|
||||
[125. , 208. , 10.48576 ],
|
||||
[267. , 115. , 10.48576 ],
|
||||
[263. , 302. , 16.777216],
|
||||
[196. , 43. , 10.48576 ],
|
||||
[260. , 46. , 16.777216],
|
||||
[267. , 359. , 16.777216],
|
||||
[ 54. , 276. , 10.48576 ],
|
||||
[ 58. , 100. , 10.48576 ],
|
||||
[ 52. , 155. , 16.777216],
|
||||
[ 52. , 216. , 16.777216],
|
||||
[ 54. , 42. , 16.777216]])
|
||||
|
||||
Notes
|
||||
-----
|
||||
The radius of each blob is approximately :math:`\sqrt{2}\sigma` for
|
||||
a 2-D image and :math:`\sqrt{3}\sigma` for a 3-D image.
|
||||
"""
|
||||
image = img_as_float(image)
|
||||
|
||||
# if both min and max sigma are scalar, function returns only one sigma
|
||||
scalar_sigma = np.isscalar(max_sigma) and np.isscalar(min_sigma)
|
||||
|
||||
# Gaussian filter requires that sequence-type sigmas have same
|
||||
# dimensionality as image. This broadcasts scalar kernels
|
||||
if np.isscalar(max_sigma):
|
||||
max_sigma = np.full(image.ndim, max_sigma, dtype=float)
|
||||
if np.isscalar(min_sigma):
|
||||
min_sigma = np.full(image.ndim, min_sigma, dtype=float)
|
||||
|
||||
# Convert sequence types to array
|
||||
min_sigma = np.asarray(min_sigma, dtype=float)
|
||||
max_sigma = np.asarray(max_sigma, dtype=float)
|
||||
|
||||
# k such that min_sigma*(sigma_ratio**k) > max_sigma
|
||||
k = int(np.mean(np.log(max_sigma / min_sigma) / np.log(sigma_ratio) + 1))
|
||||
|
||||
# a geometric progression of standard deviations for gaussian kernels
|
||||
sigma_list = np.array([min_sigma * (sigma_ratio ** i)
|
||||
for i in range(k + 1)])
|
||||
|
||||
gaussian_images = [gaussian_filter(image, s) for s in sigma_list]
|
||||
|
||||
# computing difference between two successive Gaussian blurred images
|
||||
# multiplying with average standard deviation provides scale invariance
|
||||
dog_images = [(gaussian_images[i] - gaussian_images[i + 1])
|
||||
* np.mean(sigma_list[i]) for i in range(k)]
|
||||
|
||||
image_cube = np.stack(dog_images, axis=-1)
|
||||
|
||||
exclude_border = _format_exclude_border(image.ndim, exclude_border)
|
||||
local_maxima = peak_local_max(
|
||||
image_cube,
|
||||
threshold_abs=threshold,
|
||||
footprint=np.ones((3,) * (image.ndim + 1)),
|
||||
threshold_rel=0.0,
|
||||
exclude_border=exclude_border,
|
||||
)
|
||||
|
||||
# Catch no peaks
|
||||
if local_maxima.size == 0:
|
||||
return np.empty((0, 3))
|
||||
|
||||
# Convert local_maxima to float64
|
||||
lm = local_maxima.astype(np.float64)
|
||||
|
||||
# translate final column of lm, which contains the index of the
|
||||
# sigma that produced the maximum intensity value, into the sigma
|
||||
sigmas_of_peaks = sigma_list[local_maxima[:, -1]]
|
||||
|
||||
if scalar_sigma:
|
||||
# select one sigma column, keeping dimension
|
||||
sigmas_of_peaks = sigmas_of_peaks[:, 0:1]
|
||||
|
||||
# Remove sigma index and replace with sigmas
|
||||
lm = np.hstack([lm[:, :-1], sigmas_of_peaks])
|
||||
|
||||
sigma_dim = sigmas_of_peaks.shape[1]
|
||||
|
||||
return _prune_blobs(lm, overlap, sigma_dim=sigma_dim)
|
||||
|
||||
|
||||
def blob_log(image, min_sigma=1, max_sigma=50, num_sigma=10, threshold=.2,
|
||||
overlap=.5, log_scale=False, *, exclude_border=False):
|
||||
r"""Finds blobs in the given grayscale image.
|
||||
|
||||
Blobs are found using the Laplacian of Gaussian (LoG) method [1]_.
|
||||
For each blob found, the method returns its coordinates and the standard
|
||||
deviation of the Gaussian kernel that detected the blob.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : 2D or 3D ndarray
|
||||
Input grayscale image, blobs are assumed to be light on dark
|
||||
background (white on black).
|
||||
min_sigma : scalar or sequence of scalars, optional
|
||||
the minimum standard deviation for Gaussian kernel. Keep this low to
|
||||
detect smaller blobs. The standard deviations of the Gaussian filter
|
||||
are given for each axis as a sequence, or as a single number, in
|
||||
which case it is equal for all axes.
|
||||
max_sigma : scalar or sequence of scalars, optional
|
||||
The maximum standard deviation for Gaussian kernel. Keep this high to
|
||||
detect larger blobs. The standard deviations of the Gaussian filter
|
||||
are given for each axis as a sequence, or as a single number, in
|
||||
which case it is equal for all axes.
|
||||
num_sigma : int, optional
|
||||
The number of intermediate values of standard deviations to consider
|
||||
between `min_sigma` and `max_sigma`.
|
||||
threshold : float, optional.
|
||||
The absolute lower bound for scale space maxima. Local maxima smaller
|
||||
than thresh are ignored. Reduce this to detect blobs with less
|
||||
intensities.
|
||||
overlap : float, optional
|
||||
A value between 0 and 1. If the area of two blobs overlaps by a
|
||||
fraction greater than `threshold`, the smaller blob is eliminated.
|
||||
log_scale : bool, optional
|
||||
If set intermediate values of standard deviations are interpolated
|
||||
using a logarithmic scale to the base `10`. If not, linear
|
||||
interpolation is used.
|
||||
exclude_border : tuple of ints, int, or False, optional
|
||||
If tuple of ints, the length of the tuple must match the input array's
|
||||
dimensionality. Each element of the tuple will exclude peaks from
|
||||
within `exclude_border`-pixels of the border of the image along that
|
||||
dimension.
|
||||
If nonzero int, `exclude_border` excludes peaks from within
|
||||
`exclude_border`-pixels of the border of the image.
|
||||
If zero or False, peaks are identified regardless of their
|
||||
distance from the border.
|
||||
|
||||
Returns
|
||||
-------
|
||||
A : (n, image.ndim + sigma) ndarray
|
||||
A 2d array with each row representing 2 coordinate values for a 2D
|
||||
image, and 3 coordinate values for a 3D image, plus the sigma(s) used.
|
||||
When a single sigma is passed, outputs are:
|
||||
``(r, c, sigma)`` or ``(p, r, c, sigma)`` where ``(r, c)`` or
|
||||
``(p, r, c)`` are coordinates of the blob and ``sigma`` is the standard
|
||||
deviation of the Gaussian kernel which detected the blob. When an
|
||||
anisotropic gaussian is used (sigmas per dimension), the detected sigma
|
||||
is returned for each dimension.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] https://en.wikipedia.org/wiki/Blob_detection#The_Laplacian_of_Gaussian
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from skimage import data, feature, exposure
|
||||
>>> img = data.coins()
|
||||
>>> img = exposure.equalize_hist(img) # improves detection
|
||||
>>> feature.blob_log(img, threshold = .3)
|
||||
array([[124. , 336. , 11.88888889],
|
||||
[198. , 155. , 11.88888889],
|
||||
[194. , 213. , 17.33333333],
|
||||
[121. , 272. , 17.33333333],
|
||||
[263. , 244. , 17.33333333],
|
||||
[194. , 276. , 17.33333333],
|
||||
[266. , 115. , 11.88888889],
|
||||
[128. , 154. , 11.88888889],
|
||||
[260. , 174. , 17.33333333],
|
||||
[198. , 103. , 11.88888889],
|
||||
[126. , 208. , 11.88888889],
|
||||
[127. , 102. , 11.88888889],
|
||||
[263. , 302. , 17.33333333],
|
||||
[197. , 44. , 11.88888889],
|
||||
[185. , 344. , 17.33333333],
|
||||
[126. , 46. , 11.88888889],
|
||||
[113. , 323. , 1. ]])
|
||||
|
||||
Notes
|
||||
-----
|
||||
The radius of each blob is approximately :math:`\sqrt{2}\sigma` for
|
||||
a 2-D image and :math:`\sqrt{3}\sigma` for a 3-D image.
|
||||
"""
|
||||
image = img_as_float(image)
|
||||
|
||||
# if both min and max sigma are scalar, function returns only one sigma
|
||||
scalar_sigma = (
|
||||
True if np.isscalar(max_sigma) and np.isscalar(min_sigma) else False
|
||||
)
|
||||
|
||||
# Gaussian filter requires that sequence-type sigmas have same
|
||||
# dimensionality as image. This broadcasts scalar kernels
|
||||
if np.isscalar(max_sigma):
|
||||
max_sigma = np.full(image.ndim, max_sigma, dtype=float)
|
||||
if np.isscalar(min_sigma):
|
||||
min_sigma = np.full(image.ndim, min_sigma, dtype=float)
|
||||
|
||||
# Convert sequence types to array
|
||||
min_sigma = np.asarray(min_sigma, dtype=float)
|
||||
max_sigma = np.asarray(max_sigma, dtype=float)
|
||||
|
||||
if log_scale:
|
||||
# for anisotropic data, we use the "highest resolution/variance" axis
|
||||
standard_axis = np.argmax(min_sigma)
|
||||
start = np.log10(min_sigma[standard_axis])
|
||||
stop = np.log10(max_sigma[standard_axis])
|
||||
scale = np.logspace(start, stop, num_sigma)[:, np.newaxis]
|
||||
sigma_list = scale * min_sigma / np.max(min_sigma)
|
||||
else:
|
||||
scale = np.linspace(0, 1, num_sigma)[:, np.newaxis]
|
||||
sigma_list = scale * (max_sigma - min_sigma) + min_sigma
|
||||
|
||||
# computing gaussian laplace
|
||||
# average s**2 provides scale invariance
|
||||
gl_images = [-gaussian_laplace(image, s) * np.mean(s) ** 2
|
||||
for s in sigma_list]
|
||||
|
||||
image_cube = np.stack(gl_images, axis=-1)
|
||||
|
||||
exclude_border = _format_exclude_border(image.ndim, exclude_border)
|
||||
local_maxima = peak_local_max(
|
||||
image_cube,
|
||||
threshold_abs=threshold,
|
||||
footprint=np.ones((3,) * (image.ndim + 1)),
|
||||
threshold_rel=0.0,
|
||||
exclude_border=exclude_border,
|
||||
)
|
||||
|
||||
# Catch no peaks
|
||||
if local_maxima.size == 0:
|
||||
return np.empty((0, 3))
|
||||
|
||||
# Convert local_maxima to float64
|
||||
lm = local_maxima.astype(np.float64)
|
||||
|
||||
# translate final column of lm, which contains the index of the
|
||||
# sigma that produced the maximum intensity value, into the sigma
|
||||
sigmas_of_peaks = sigma_list[local_maxima[:, -1]]
|
||||
|
||||
if scalar_sigma:
|
||||
# select one sigma column, keeping dimension
|
||||
sigmas_of_peaks = sigmas_of_peaks[:, 0:1]
|
||||
|
||||
# Remove sigma index and replace with sigmas
|
||||
lm = np.hstack([lm[:, :-1], sigmas_of_peaks])
|
||||
|
||||
sigma_dim = sigmas_of_peaks.shape[1]
|
||||
|
||||
return _prune_blobs(lm, overlap, sigma_dim=sigma_dim)
|
||||
|
||||
|
||||
def blob_doh(image, min_sigma=1, max_sigma=30, num_sigma=10, threshold=0.01,
|
||||
overlap=.5, log_scale=False):
|
||||
"""Finds blobs in the given grayscale image.
|
||||
|
||||
Blobs are found using the Determinant of Hessian method [1]_. For each blob
|
||||
found, the method returns its coordinates and the standard deviation
|
||||
of the Gaussian Kernel used for the Hessian matrix whose determinant
|
||||
detected the blob. Determinant of Hessians is approximated using [2]_.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : 2D ndarray
|
||||
Input grayscale image.Blobs can either be light on dark or vice versa.
|
||||
min_sigma : float, optional
|
||||
The minimum standard deviation for Gaussian Kernel used to compute
|
||||
Hessian matrix. Keep this low to detect smaller blobs.
|
||||
max_sigma : float, optional
|
||||
The maximum standard deviation for Gaussian Kernel used to compute
|
||||
Hessian matrix. Keep this high to detect larger blobs.
|
||||
num_sigma : int, optional
|
||||
The number of intermediate values of standard deviations to consider
|
||||
between `min_sigma` and `max_sigma`.
|
||||
threshold : float, optional.
|
||||
The absolute lower bound for scale space maxima. Local maxima smaller
|
||||
than thresh are ignored. Reduce this to detect less prominent blobs.
|
||||
overlap : float, optional
|
||||
A value between 0 and 1. If the area of two blobs overlaps by a
|
||||
fraction greater than `threshold`, the smaller blob is eliminated.
|
||||
log_scale : bool, optional
|
||||
If set intermediate values of standard deviations are interpolated
|
||||
using a logarithmic scale to the base `10`. If not, linear
|
||||
interpolation is used.
|
||||
|
||||
Returns
|
||||
-------
|
||||
A : (n, 3) ndarray
|
||||
A 2d array with each row representing 3 values, ``(y,x,sigma)``
|
||||
where ``(y,x)`` are coordinates of the blob and ``sigma`` is the
|
||||
standard deviation of the Gaussian kernel of the Hessian Matrix whose
|
||||
determinant detected the blob.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] https://en.wikipedia.org/wiki/Blob_detection#The_determinant_of_the_Hessian
|
||||
|
||||
.. [2] Herbert Bay, Andreas Ess, Tinne Tuytelaars, Luc Van Gool,
|
||||
"SURF: Speeded Up Robust Features"
|
||||
ftp://ftp.vision.ee.ethz.ch/publications/articles/eth_biwi_00517.pdf
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from skimage import data, feature
|
||||
>>> img = data.coins()
|
||||
>>> feature.blob_doh(img)
|
||||
array([[197. , 153. , 20.33333333],
|
||||
[124. , 336. , 20.33333333],
|
||||
[126. , 153. , 20.33333333],
|
||||
[195. , 100. , 23.55555556],
|
||||
[192. , 212. , 23.55555556],
|
||||
[121. , 271. , 30. ],
|
||||
[126. , 101. , 20.33333333],
|
||||
[193. , 275. , 23.55555556],
|
||||
[123. , 205. , 20.33333333],
|
||||
[270. , 363. , 30. ],
|
||||
[265. , 113. , 23.55555556],
|
||||
[262. , 243. , 23.55555556],
|
||||
[185. , 348. , 30. ],
|
||||
[156. , 302. , 30. ],
|
||||
[123. , 44. , 23.55555556],
|
||||
[260. , 173. , 30. ],
|
||||
[197. , 44. , 20.33333333]])
|
||||
|
||||
Notes
|
||||
-----
|
||||
The radius of each blob is approximately `sigma`.
|
||||
Computation of Determinant of Hessians is independent of the standard
|
||||
deviation. Therefore detecting larger blobs won't take more time. In
|
||||
methods line :py:meth:`blob_dog` and :py:meth:`blob_log` the computation
|
||||
of Gaussians for larger `sigma` takes more time. The downside is that
|
||||
this method can't be used for detecting blobs of radius less than `3px`
|
||||
due to the box filters used in the approximation of Hessian Determinant.
|
||||
"""
|
||||
check_nD(image, 2)
|
||||
|
||||
image = img_as_float(image)
|
||||
image = integral_image(image)
|
||||
|
||||
if log_scale:
|
||||
start, stop = log(min_sigma, 10), log(max_sigma, 10)
|
||||
sigma_list = np.logspace(start, stop, num_sigma)
|
||||
else:
|
||||
sigma_list = np.linspace(min_sigma, max_sigma, num_sigma)
|
||||
|
||||
hessian_images = [_hessian_matrix_det(image, s) for s in sigma_list]
|
||||
image_cube = np.dstack(hessian_images)
|
||||
|
||||
local_maxima = peak_local_max(image_cube, threshold_abs=threshold,
|
||||
footprint=np.ones((3,) * image_cube.ndim),
|
||||
threshold_rel=0.0,
|
||||
exclude_border=False)
|
||||
|
||||
# Catch no peaks
|
||||
if local_maxima.size == 0:
|
||||
return np.empty((0, 3))
|
||||
# Convert local_maxima to float64
|
||||
lm = local_maxima.astype(np.float64)
|
||||
# Convert the last index to its corresponding scale value
|
||||
lm[:, -1] = sigma_list[local_maxima[:, -1]]
|
||||
return _prune_blobs(lm, overlap)
|
186
venv/Lib/site-packages/skimage/feature/brief.py
Normal file
186
venv/Lib/site-packages/skimage/feature/brief.py
Normal file
|
@ -0,0 +1,186 @@
|
|||
import numpy as np
|
||||
from scipy.ndimage import gaussian_filter
|
||||
|
||||
from .util import (DescriptorExtractor, _mask_border_keypoints,
|
||||
_prepare_grayscale_input_2D)
|
||||
|
||||
from .brief_cy import _brief_loop
|
||||
from .._shared.utils import check_nD
|
||||
|
||||
|
||||
class BRIEF(DescriptorExtractor):
|
||||
|
||||
"""BRIEF binary descriptor extractor.
|
||||
|
||||
BRIEF (Binary Robust Independent Elementary Features) is an efficient
|
||||
feature point descriptor. It is highly discriminative even when using
|
||||
relatively few bits and is computed using simple intensity difference
|
||||
tests.
|
||||
|
||||
For each keypoint, intensity comparisons are carried out for a specifically
|
||||
distributed number N of pixel-pairs resulting in a binary descriptor of
|
||||
length N. For binary descriptors the Hamming distance can be used for
|
||||
feature matching, which leads to lower computational cost in comparison to
|
||||
the L2 norm.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
descriptor_size : int, optional
|
||||
Size of BRIEF descriptor for each keypoint. Sizes 128, 256 and 512
|
||||
recommended by the authors. Default is 256.
|
||||
patch_size : int, optional
|
||||
Length of the two dimensional square patch sampling region around
|
||||
the keypoints. Default is 49.
|
||||
mode : {'normal', 'uniform'}, optional
|
||||
Probability distribution for sampling location of decision pixel-pairs
|
||||
around keypoints.
|
||||
sample_seed : int, optional
|
||||
Seed for the random sampling of the decision pixel-pairs. From a square
|
||||
window with length `patch_size`, pixel pairs are sampled using the
|
||||
`mode` parameter to build the descriptors using intensity comparison.
|
||||
The value of `sample_seed` must be the same for the images to be
|
||||
matched while building the descriptors.
|
||||
sigma : float, optional
|
||||
Standard deviation of the Gaussian low-pass filter applied to the image
|
||||
to alleviate noise sensitivity, which is strongly recommended to obtain
|
||||
discriminative and good descriptors.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
descriptors : (Q, `descriptor_size`) array of dtype bool
|
||||
2D ndarray of binary descriptors of size `descriptor_size` for Q
|
||||
keypoints after filtering out border keypoints with value at an
|
||||
index ``(i, j)`` either being ``True`` or ``False`` representing
|
||||
the outcome of the intensity comparison for i-th keypoint on j-th
|
||||
decision pixel-pair. It is ``Q == np.sum(mask)``.
|
||||
mask : (N, ) array of dtype bool
|
||||
Mask indicating whether a keypoint has been filtered out
|
||||
(``False``) or is described in the `descriptors` array (``True``).
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from skimage.feature import (corner_harris, corner_peaks, BRIEF,
|
||||
... match_descriptors)
|
||||
>>> import numpy as np
|
||||
>>> square1 = np.zeros((8, 8), dtype=np.int32)
|
||||
>>> square1[2:6, 2:6] = 1
|
||||
>>> square1
|
||||
array([[0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 1, 1, 1, 1, 0, 0],
|
||||
[0, 0, 1, 1, 1, 1, 0, 0],
|
||||
[0, 0, 1, 1, 1, 1, 0, 0],
|
||||
[0, 0, 1, 1, 1, 1, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)
|
||||
>>> square2 = np.zeros((9, 9), dtype=np.int32)
|
||||
>>> square2[2:7, 2:7] = 1
|
||||
>>> square2
|
||||
array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 1, 1, 1, 1, 1, 0, 0],
|
||||
[0, 0, 1, 1, 1, 1, 1, 0, 0],
|
||||
[0, 0, 1, 1, 1, 1, 1, 0, 0],
|
||||
[0, 0, 1, 1, 1, 1, 1, 0, 0],
|
||||
[0, 0, 1, 1, 1, 1, 1, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)
|
||||
>>> keypoints1 = corner_peaks(corner_harris(square1), min_distance=1,
|
||||
... threshold_rel=0)
|
||||
>>> keypoints2 = corner_peaks(corner_harris(square2), min_distance=1,
|
||||
... threshold_rel=0)
|
||||
>>> extractor = BRIEF(patch_size=5)
|
||||
>>> extractor.extract(square1, keypoints1)
|
||||
>>> descriptors1 = extractor.descriptors
|
||||
>>> extractor.extract(square2, keypoints2)
|
||||
>>> descriptors2 = extractor.descriptors
|
||||
>>> matches = match_descriptors(descriptors1, descriptors2)
|
||||
>>> matches
|
||||
array([[0, 0],
|
||||
[1, 1],
|
||||
[2, 2],
|
||||
[3, 3]])
|
||||
>>> keypoints1[matches[:, 0]]
|
||||
array([[2, 2],
|
||||
[2, 5],
|
||||
[5, 2],
|
||||
[5, 5]])
|
||||
>>> keypoints2[matches[:, 1]]
|
||||
array([[2, 2],
|
||||
[2, 6],
|
||||
[6, 2],
|
||||
[6, 6]])
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, descriptor_size=256, patch_size=49,
|
||||
mode='normal', sigma=1, sample_seed=1):
|
||||
|
||||
mode = mode.lower()
|
||||
if mode not in ('normal', 'uniform'):
|
||||
raise ValueError("`mode` must be 'normal' or 'uniform'.")
|
||||
|
||||
self.descriptor_size = descriptor_size
|
||||
self.patch_size = patch_size
|
||||
self.mode = mode
|
||||
self.sigma = sigma
|
||||
self.sample_seed = sample_seed
|
||||
|
||||
self.descriptors = None
|
||||
self.mask = None
|
||||
|
||||
def extract(self, image, keypoints):
|
||||
"""Extract BRIEF binary descriptors for given keypoints in image.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : 2D array
|
||||
Input image.
|
||||
keypoints : (N, 2) array
|
||||
Keypoint coordinates as ``(row, col)``.
|
||||
|
||||
"""
|
||||
check_nD(image, 2)
|
||||
|
||||
random = np.random.RandomState()
|
||||
random.seed(self.sample_seed)
|
||||
|
||||
image = _prepare_grayscale_input_2D(image)
|
||||
|
||||
# Gaussian low-pass filtering to alleviate noise sensitivity
|
||||
image = np.ascontiguousarray(gaussian_filter(image, self.sigma))
|
||||
|
||||
# Sampling pairs of decision pixels in patch_size x patch_size window
|
||||
desc_size = self.descriptor_size
|
||||
patch_size = self.patch_size
|
||||
if self.mode == 'normal':
|
||||
samples = (patch_size / 5.0) * random.randn(desc_size * 8)
|
||||
samples = np.array(samples, dtype=np.int32)
|
||||
samples = samples[(samples < (patch_size // 2))
|
||||
& (samples > - (patch_size - 2) // 2)]
|
||||
|
||||
pos1 = samples[:desc_size * 2].reshape(desc_size, 2)
|
||||
pos2 = samples[desc_size * 2:desc_size * 4].reshape(desc_size, 2)
|
||||
elif self.mode == 'uniform':
|
||||
samples = random.randint(-(patch_size - 2) // 2,
|
||||
(patch_size // 2) + 1,
|
||||
(desc_size * 2, 2))
|
||||
samples = np.array(samples, dtype=np.int32)
|
||||
pos1, pos2 = np.split(samples, 2)
|
||||
|
||||
pos1 = np.ascontiguousarray(pos1)
|
||||
pos2 = np.ascontiguousarray(pos2)
|
||||
|
||||
# Removing keypoints that are within (patch_size / 2) distance from the
|
||||
# image border
|
||||
self.mask = _mask_border_keypoints(image.shape, keypoints,
|
||||
patch_size // 2)
|
||||
|
||||
keypoints = np.array(keypoints[self.mask, :], dtype=np.intp,
|
||||
order='C', copy=False)
|
||||
|
||||
self.descriptors = np.zeros((keypoints.shape[0], desc_size),
|
||||
dtype=bool, order='C')
|
||||
|
||||
_brief_loop(image, self.descriptors.view(np.uint8), keypoints,
|
||||
pos1, pos2)
|
BIN
venv/Lib/site-packages/skimage/feature/brief_cy.cp36-win32.pyd
Normal file
BIN
venv/Lib/site-packages/skimage/feature/brief_cy.cp36-win32.pyd
Normal file
Binary file not shown.
296
venv/Lib/site-packages/skimage/feature/censure.py
Normal file
296
venv/Lib/site-packages/skimage/feature/censure.py
Normal file
|
@ -0,0 +1,296 @@
|
|||
import numpy as np
|
||||
from scipy.ndimage import maximum_filter, minimum_filter, convolve
|
||||
|
||||
from ..transform import integral_image
|
||||
from ..feature import structure_tensor
|
||||
from ..morphology import octagon, star
|
||||
from ..feature.censure_cy import _censure_dob_loop
|
||||
from ..feature.util import (FeatureDetector, _prepare_grayscale_input_2D,
|
||||
_mask_border_keypoints)
|
||||
from .._shared.utils import check_nD
|
||||
|
||||
# The paper(Reference [1]) mentions the sizes of the Octagon shaped filter
|
||||
# kernel for the first seven scales only. The sizes of the later scales
|
||||
# have been extrapolated based on the following statement in the paper.
|
||||
# "These octagons scale linearly and were experimentally chosen to correspond
|
||||
# to the seven DOBs described in the previous section."
|
||||
OCTAGON_OUTER_SHAPE = [(5, 2), (5, 3), (7, 3), (9, 4), (9, 7), (13, 7),
|
||||
(15, 10), (15, 11), (15, 12), (17, 13), (17, 14)]
|
||||
OCTAGON_INNER_SHAPE = [(3, 0), (3, 1), (3, 2), (5, 2), (5, 3), (5, 4), (5, 5),
|
||||
(7, 5), (7, 6), (9, 6), (9, 7)]
|
||||
|
||||
# The sizes for the STAR shaped filter kernel for different scales have been
|
||||
# taken from the OpenCV implementation.
|
||||
STAR_SHAPE = [1, 2, 3, 4, 6, 8, 11, 12, 16, 22, 23, 32, 45, 46, 64, 90, 128]
|
||||
STAR_FILTER_SHAPE = [(1, 0), (3, 1), (4, 2), (5, 3), (7, 4), (8, 5),
|
||||
(9, 6), (11, 8), (13, 10), (14, 11), (15, 12), (16, 14)]
|
||||
|
||||
|
||||
def _filter_image(image, min_scale, max_scale, mode):
|
||||
|
||||
response = np.zeros((image.shape[0], image.shape[1],
|
||||
max_scale - min_scale + 1), dtype=np.double)
|
||||
|
||||
if mode == 'dob':
|
||||
|
||||
# make response[:, :, i] contiguous memory block
|
||||
item_size = response.itemsize
|
||||
response.strides = (item_size * response.shape[1], item_size,
|
||||
item_size * response.shape[0] * response.shape[1])
|
||||
|
||||
integral_img = integral_image(image)
|
||||
|
||||
for i in range(max_scale - min_scale + 1):
|
||||
n = min_scale + i
|
||||
|
||||
# Constant multipliers for the outer region and the inner region
|
||||
# of the bi-level filters with the constraint of keeping the
|
||||
# DC bias 0.
|
||||
inner_weight = (1.0 / (2 * n + 1) ** 2)
|
||||
outer_weight = (1.0 / (12 * n ** 2 + 4 * n))
|
||||
|
||||
_censure_dob_loop(n, integral_img, response[:, :, i],
|
||||
inner_weight, outer_weight)
|
||||
|
||||
# NOTE : For the Octagon shaped filter, we implemented and evaluated the
|
||||
# slanted integral image based image filtering but the performance was
|
||||
# more or less equal to image filtering using
|
||||
# scipy.ndimage.filters.convolve(). Hence we have decided to use the
|
||||
# later for a much cleaner implementation.
|
||||
elif mode == 'octagon':
|
||||
# TODO : Decide the shapes of Octagon filters for scales > 7
|
||||
|
||||
for i in range(max_scale - min_scale + 1):
|
||||
mo, no = OCTAGON_OUTER_SHAPE[min_scale + i - 1]
|
||||
mi, ni = OCTAGON_INNER_SHAPE[min_scale + i - 1]
|
||||
response[:, :, i] = convolve(image,
|
||||
_octagon_kernel(mo, no, mi, ni))
|
||||
|
||||
elif mode == 'star':
|
||||
|
||||
for i in range(max_scale - min_scale + 1):
|
||||
m = STAR_SHAPE[STAR_FILTER_SHAPE[min_scale + i - 1][0]]
|
||||
n = STAR_SHAPE[STAR_FILTER_SHAPE[min_scale + i - 1][1]]
|
||||
response[:, :, i] = convolve(image, _star_kernel(m, n))
|
||||
|
||||
return response
|
||||
|
||||
|
||||
def _octagon_kernel(mo, no, mi, ni):
|
||||
outer = (mo + 2 * no) ** 2 - 2 * no * (no + 1)
|
||||
inner = (mi + 2 * ni) ** 2 - 2 * ni * (ni + 1)
|
||||
outer_weight = 1.0 / (outer - inner)
|
||||
inner_weight = 1.0 / inner
|
||||
c = ((mo + 2 * no) - (mi + 2 * ni)) // 2
|
||||
outer_oct = octagon(mo, no)
|
||||
inner_oct = np.zeros((mo + 2 * no, mo + 2 * no))
|
||||
inner_oct[c: -c, c: -c] = octagon(mi, ni)
|
||||
bfilter = (outer_weight * outer_oct -
|
||||
(outer_weight + inner_weight) * inner_oct)
|
||||
return bfilter
|
||||
|
||||
|
||||
def _star_kernel(m, n):
|
||||
c = m + m // 2 - n - n // 2
|
||||
outer_star = star(m)
|
||||
inner_star = np.zeros_like(outer_star)
|
||||
inner_star[c: -c, c: -c] = star(n)
|
||||
outer_weight = 1.0 / (np.sum(outer_star - inner_star))
|
||||
inner_weight = 1.0 / np.sum(inner_star)
|
||||
bfilter = (outer_weight * outer_star -
|
||||
(outer_weight + inner_weight) * inner_star)
|
||||
return bfilter
|
||||
|
||||
|
||||
def _suppress_lines(feature_mask, image, sigma, line_threshold):
|
||||
Axx, Axy, Ayy = structure_tensor(image, sigma)
|
||||
feature_mask[(Axx + Ayy) ** 2
|
||||
> line_threshold * (Axx * Ayy - Axy ** 2)] = False
|
||||
|
||||
|
||||
class CENSURE(FeatureDetector):
|
||||
|
||||
"""CENSURE keypoint detector.
|
||||
|
||||
min_scale : int, optional
|
||||
Minimum scale to extract keypoints from.
|
||||
max_scale : int, optional
|
||||
Maximum scale to extract keypoints from. The keypoints will be
|
||||
extracted from all the scales except the first and the last i.e.
|
||||
from the scales in the range [min_scale + 1, max_scale - 1]. The filter
|
||||
sizes for different scales is such that the two adjacent scales
|
||||
comprise of an octave.
|
||||
mode : {'DoB', 'Octagon', 'STAR'}, optional
|
||||
Type of bi-level filter used to get the scales of the input image.
|
||||
Possible values are 'DoB', 'Octagon' and 'STAR'. The three modes
|
||||
represent the shape of the bi-level filters i.e. box(square), octagon
|
||||
and star respectively. For instance, a bi-level octagon filter consists
|
||||
of a smaller inner octagon and a larger outer octagon with the filter
|
||||
weights being uniformly negative in both the inner octagon while
|
||||
uniformly positive in the difference region. Use STAR and Octagon for
|
||||
better features and DoB for better performance.
|
||||
non_max_threshold : float, optional
|
||||
Threshold value used to suppress maximas and minimas with a weak
|
||||
magnitude response obtained after Non-Maximal Suppression.
|
||||
line_threshold : float, optional
|
||||
Threshold for rejecting interest points which have ratio of principal
|
||||
curvatures greater than this value.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
keypoints : (N, 2) array
|
||||
Keypoint coordinates as ``(row, col)``.
|
||||
scales : (N, ) array
|
||||
Corresponding scales.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Motilal Agrawal, Kurt Konolige and Morten Rufus Blas
|
||||
"CENSURE: Center Surround Extremas for Realtime Feature
|
||||
Detection and Matching",
|
||||
https://link.springer.com/chapter/10.1007/978-3-540-88693-8_8
|
||||
:DOI:`10.1007/978-3-540-88693-8_8`
|
||||
|
||||
.. [2] Adam Schmidt, Marek Kraft, Michal Fularz and Zuzanna Domagala
|
||||
"Comparative Assessment of Point Feature Detectors and
|
||||
Descriptors in the Context of Robot Navigation"
|
||||
http://yadda.icm.edu.pl/yadda/element/bwmeta1.element.baztech-268aaf28-0faf-4872-a4df-7e2e61cb364c/c/Schmidt_comparative.pdf
|
||||
:DOI:`10.1.1.465.1117`
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from skimage.data import astronaut
|
||||
>>> from skimage.color import rgb2gray
|
||||
>>> from skimage.feature import CENSURE
|
||||
>>> img = rgb2gray(astronaut()[100:300, 100:300])
|
||||
>>> censure = CENSURE()
|
||||
>>> censure.detect(img)
|
||||
>>> censure.keypoints
|
||||
array([[ 4, 148],
|
||||
[ 12, 73],
|
||||
[ 21, 176],
|
||||
[ 91, 22],
|
||||
[ 93, 56],
|
||||
[ 94, 22],
|
||||
[ 95, 54],
|
||||
[100, 51],
|
||||
[103, 51],
|
||||
[106, 67],
|
||||
[108, 15],
|
||||
[117, 20],
|
||||
[122, 60],
|
||||
[125, 37],
|
||||
[129, 37],
|
||||
[133, 76],
|
||||
[145, 44],
|
||||
[146, 94],
|
||||
[150, 114],
|
||||
[153, 33],
|
||||
[154, 156],
|
||||
[155, 151],
|
||||
[184, 63]])
|
||||
>>> censure.scales
|
||||
array([2, 6, 6, 2, 4, 3, 2, 3, 2, 6, 3, 2, 2, 3, 2, 2, 2, 3, 2, 2, 4, 2,
|
||||
2])
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, min_scale=1, max_scale=7, mode='DoB',
|
||||
non_max_threshold=0.15, line_threshold=10):
|
||||
|
||||
mode = mode.lower()
|
||||
if mode not in ('dob', 'octagon', 'star'):
|
||||
raise ValueError("`mode` must be one of 'DoB', 'Octagon', 'STAR'.")
|
||||
|
||||
if min_scale < 1 or max_scale < 1 or max_scale - min_scale < 2:
|
||||
raise ValueError('The scales must be >= 1 and the number of '
|
||||
'scales should be >= 3.')
|
||||
|
||||
self.min_scale = min_scale
|
||||
self.max_scale = max_scale
|
||||
self.mode = mode
|
||||
self.non_max_threshold = non_max_threshold
|
||||
self.line_threshold = line_threshold
|
||||
|
||||
self.keypoints = None
|
||||
self.scales = None
|
||||
|
||||
def detect(self, image):
|
||||
"""Detect CENSURE keypoints along with the corresponding scale.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : 2D ndarray
|
||||
Input image.
|
||||
|
||||
"""
|
||||
|
||||
# (1) First we generate the required scales on the input grayscale
|
||||
# image using a bi-level filter and stack them up in `filter_response`.
|
||||
|
||||
# (2) We then perform Non-Maximal suppression in 3 x 3 x 3 window on
|
||||
# the filter_response to suppress points that are neither minima or
|
||||
# maxima in 3 x 3 x 3 neighbourhood. We obtain a boolean ndarray
|
||||
# `feature_mask` containing all the minimas and maximas in
|
||||
# `filter_response` as True.
|
||||
# (3) Then we suppress all the points in the `feature_mask` for which
|
||||
# the corresponding point in the image at a particular scale has the
|
||||
# ratio of principal curvatures greater than `line_threshold`.
|
||||
# (4) Finally, we remove the border keypoints and return the keypoints
|
||||
# along with its corresponding scale.
|
||||
|
||||
check_nD(image, 2)
|
||||
|
||||
num_scales = self.max_scale - self.min_scale
|
||||
|
||||
image = np.ascontiguousarray(_prepare_grayscale_input_2D(image))
|
||||
|
||||
# Generating all the scales
|
||||
filter_response = _filter_image(image, self.min_scale, self.max_scale,
|
||||
self.mode)
|
||||
|
||||
# Suppressing points that are neither minima or maxima in their
|
||||
# 3 x 3 x 3 neighborhood to zero
|
||||
minimas = minimum_filter(filter_response, (3, 3, 3)) == filter_response
|
||||
maximas = maximum_filter(filter_response, (3, 3, 3)) == filter_response
|
||||
|
||||
feature_mask = minimas | maximas
|
||||
feature_mask[filter_response < self.non_max_threshold] = False
|
||||
|
||||
for i in range(1, num_scales):
|
||||
# sigma = (window_size - 1) / 6.0, so the window covers > 99% of
|
||||
# the kernel's distribution
|
||||
# window_size = 7 + 2 * (min_scale - 1 + i)
|
||||
# Hence sigma = 1 + (min_scale - 1 + i)/ 3.0
|
||||
_suppress_lines(feature_mask[:, :, i], image,
|
||||
(1 + (self.min_scale + i - 1) / 3.0),
|
||||
self.line_threshold)
|
||||
|
||||
rows, cols, scales = np.nonzero(feature_mask[..., 1:num_scales])
|
||||
keypoints = np.column_stack([rows, cols])
|
||||
scales = scales + self.min_scale + 1
|
||||
|
||||
if self.mode == 'dob':
|
||||
self.keypoints = keypoints
|
||||
self.scales = scales
|
||||
return
|
||||
|
||||
cumulative_mask = np.zeros(keypoints.shape[0], dtype=np.bool)
|
||||
|
||||
if self.mode == 'octagon':
|
||||
for i in range(self.min_scale + 1, self.max_scale):
|
||||
c = (OCTAGON_OUTER_SHAPE[i - 1][0] - 1) // 2 \
|
||||
+ OCTAGON_OUTER_SHAPE[i - 1][1]
|
||||
cumulative_mask |= (
|
||||
_mask_border_keypoints(image.shape, keypoints, c)
|
||||
& (scales == i))
|
||||
elif self.mode == 'star':
|
||||
for i in range(self.min_scale + 1, self.max_scale):
|
||||
c = STAR_SHAPE[STAR_FILTER_SHAPE[i - 1][0]] \
|
||||
+ STAR_SHAPE[STAR_FILTER_SHAPE[i - 1][0]] // 2
|
||||
cumulative_mask |= (
|
||||
_mask_border_keypoints(image.shape, keypoints, c)
|
||||
& (scales == i))
|
||||
|
||||
self.keypoints = keypoints[cumulative_mask]
|
||||
self.scales = scales[cumulative_mask]
|
BIN
venv/Lib/site-packages/skimage/feature/censure_cy.cp36-win32.pyd
Normal file
BIN
venv/Lib/site-packages/skimage/feature/censure_cy.cp36-win32.pyd
Normal file
Binary file not shown.
1139
venv/Lib/site-packages/skimage/feature/corner.py
Normal file
1139
venv/Lib/site-packages/skimage/feature/corner.py
Normal file
File diff suppressed because it is too large
Load diff
BIN
venv/Lib/site-packages/skimage/feature/corner_cy.cp36-win32.pyd
Normal file
BIN
venv/Lib/site-packages/skimage/feature/corner_cy.cp36-win32.pyd
Normal file
Binary file not shown.
321
venv/Lib/site-packages/skimage/feature/haar.py
Normal file
321
venv/Lib/site-packages/skimage/feature/haar.py
Normal file
|
@ -0,0 +1,321 @@
|
|||
|
||||
from itertools import chain
|
||||
from operator import add
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ._haar import haar_like_feature_coord_wrapper
|
||||
from ._haar import haar_like_feature_wrapper
|
||||
from ..color import gray2rgb
|
||||
from ..draw import rectangle
|
||||
from .._shared.utils import check_random_state
|
||||
from ..util import img_as_float
|
||||
|
||||
FEATURE_TYPE = ('type-2-x', 'type-2-y',
|
||||
'type-3-x', 'type-3-y',
|
||||
'type-4')
|
||||
|
||||
|
||||
def _validate_feature_type(feature_type):
|
||||
"""Transform feature type to an iterable and check that it exists."""
|
||||
if feature_type is None:
|
||||
feature_type_ = FEATURE_TYPE
|
||||
else:
|
||||
if isinstance(feature_type, str):
|
||||
feature_type_ = [feature_type]
|
||||
else:
|
||||
feature_type_ = feature_type
|
||||
for feat_t in feature_type_:
|
||||
if feat_t not in FEATURE_TYPE:
|
||||
raise ValueError(
|
||||
'The given feature type is unknown. Got {} instead of one'
|
||||
' of {}.'.format(feat_t, FEATURE_TYPE))
|
||||
return feature_type_
|
||||
|
||||
|
||||
def haar_like_feature_coord(width, height, feature_type=None):
|
||||
"""Compute the coordinates of Haar-like features.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
width : int
|
||||
Width of the detection window.
|
||||
height : int
|
||||
Height of the detection window.
|
||||
feature_type : str or list of str or None, optional
|
||||
The type of feature to consider:
|
||||
|
||||
- 'type-2-x': 2 rectangles varying along the x axis;
|
||||
- 'type-2-y': 2 rectangles varying along the y axis;
|
||||
- 'type-3-x': 3 rectangles varying along the x axis;
|
||||
- 'type-3-y': 3 rectangles varying along the y axis;
|
||||
- 'type-4': 4 rectangles varying along x and y axis.
|
||||
|
||||
By default all features are extracted.
|
||||
|
||||
Returns
|
||||
-------
|
||||
feature_coord : (n_features, n_rectangles, 2, 2), ndarray of list of \
|
||||
tuple coord
|
||||
Coordinates of the rectangles for each feature.
|
||||
feature_type : (n_features,), ndarray of str
|
||||
The corresponding type for each feature.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> from skimage.transform import integral_image
|
||||
>>> from skimage.feature import haar_like_feature_coord
|
||||
>>> feat_coord, feat_type = haar_like_feature_coord(2, 2, 'type-4')
|
||||
>>> feat_coord # doctest: +SKIP
|
||||
array([ list([[(0, 0), (0, 0)], [(0, 1), (0, 1)],
|
||||
[(1, 1), (1, 1)], [(1, 0), (1, 0)]])], dtype=object)
|
||||
>>> feat_type
|
||||
array(['type-4'], dtype=object)
|
||||
|
||||
"""
|
||||
feature_type_ = _validate_feature_type(feature_type)
|
||||
|
||||
feat_coord, feat_type = zip(*[haar_like_feature_coord_wrapper(width,
|
||||
height,
|
||||
feat_t)
|
||||
for feat_t in feature_type_])
|
||||
|
||||
return np.concatenate(feat_coord), np.hstack(feat_type)
|
||||
|
||||
|
||||
def haar_like_feature(int_image, r, c, width, height, feature_type=None,
|
||||
feature_coord=None):
|
||||
"""Compute the Haar-like features for a region of interest (ROI) of an
|
||||
integral image.
|
||||
|
||||
Haar-like features have been successfully used for image classification and
|
||||
object detection [1]_. It has been used for real-time face detection
|
||||
algorithm proposed in [2]_.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
int_image : (M, N) ndarray
|
||||
Integral image for which the features need to be computed.
|
||||
r : int
|
||||
Row-coordinate of top left corner of the detection window.
|
||||
c : int
|
||||
Column-coordinate of top left corner of the detection window.
|
||||
width : int
|
||||
Width of the detection window.
|
||||
height : int
|
||||
Height of the detection window.
|
||||
feature_type : str or list of str or None, optional
|
||||
The type of feature to consider:
|
||||
|
||||
- 'type-2-x': 2 rectangles varying along the x axis;
|
||||
- 'type-2-y': 2 rectangles varying along the y axis;
|
||||
- 'type-3-x': 3 rectangles varying along the x axis;
|
||||
- 'type-3-y': 3 rectangles varying along the y axis;
|
||||
- 'type-4': 4 rectangles varying along x and y axis.
|
||||
|
||||
By default all features are extracted.
|
||||
|
||||
If using with `feature_coord`, it should correspond to the feature
|
||||
type of each associated coordinate feature.
|
||||
feature_coord : ndarray of list of tuples or None, optional
|
||||
The array of coordinates to be extracted. This is useful when you want
|
||||
to recompute only a subset of features. In this case `feature_type`
|
||||
needs to be an array containing the type of each feature, as returned
|
||||
by :func:`haar_like_feature_coord`. By default, all coordinates are
|
||||
computed.
|
||||
|
||||
Returns
|
||||
-------
|
||||
haar_features : (n_features,) ndarray of int or float
|
||||
Resulting Haar-like features. Each value is equal to the subtraction of
|
||||
sums of the positive and negative rectangles. The data type depends of
|
||||
the data type of `int_image`: `int` when the data type of `int_image`
|
||||
is `uint` or `int` and `float` when the data type of `int_image` is
|
||||
`float`.
|
||||
|
||||
Notes
|
||||
-----
|
||||
When extracting those features in parallel, be aware that the choice of the
|
||||
backend (i.e. multiprocessing vs threading) will have an impact on the
|
||||
performance. The rule of thumb is as follows: use multiprocessing when
|
||||
extracting features for all possible ROI in an image; use threading when
|
||||
extracting the feature at specific location for a limited number of ROIs.
|
||||
Refer to the example
|
||||
:ref:`sphx_glr_auto_examples_applications_plot_haar_extraction_selection_classification.py`
|
||||
for more insights.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> from skimage.transform import integral_image
|
||||
>>> from skimage.feature import haar_like_feature
|
||||
>>> img = np.ones((5, 5), dtype=np.uint8)
|
||||
>>> img_ii = integral_image(img)
|
||||
>>> feature = haar_like_feature(img_ii, 0, 0, 5, 5, 'type-3-x')
|
||||
>>> feature
|
||||
array([-1, -2, -3, -4, -1, -2, -3, -4, -1, -2, -3, -4, -1, -2, -3, -4, -1,
|
||||
-2, -3, -4, -1, -2, -3, -4, -1, -2, -3, -1, -2, -3, -1, -2, -3, -1,
|
||||
-2, -1, -2, -1, -2, -1, -1, -1])
|
||||
|
||||
You can compute the feature for some pre-computed coordinates.
|
||||
|
||||
>>> from skimage.feature import haar_like_feature_coord
|
||||
>>> feature_coord, feature_type = zip(
|
||||
... *[haar_like_feature_coord(5, 5, feat_t)
|
||||
... for feat_t in ('type-2-x', 'type-3-x')])
|
||||
>>> # only select one feature over two
|
||||
>>> feature_coord = np.concatenate([x[::2] for x in feature_coord])
|
||||
>>> feature_type = np.concatenate([x[::2] for x in feature_type])
|
||||
>>> feature = haar_like_feature(img_ii, 0, 0, 5, 5,
|
||||
... feature_type=feature_type,
|
||||
... feature_coord=feature_coord)
|
||||
>>> feature
|
||||
array([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, -1, -3, -1, -3, -1, -3, -1, -3, -1,
|
||||
-3, -1, -3, -1, -3, -2, -1, -3, -2, -2, -2, -1])
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] https://en.wikipedia.org/wiki/Haar-like_feature
|
||||
.. [2] Oren, M., Papageorgiou, C., Sinha, P., Osuna, E., & Poggio, T.
|
||||
(1997, June). Pedestrian detection using wavelet templates.
|
||||
In Computer Vision and Pattern Recognition, 1997. Proceedings.,
|
||||
1997 IEEE Computer Society Conference on (pp. 193-199). IEEE.
|
||||
http://tinyurl.com/y6ulxfta
|
||||
:DOI:`10.1109/CVPR.1997.609319`
|
||||
.. [3] Viola, Paul, and Michael J. Jones. "Robust real-time face
|
||||
detection." International journal of computer vision 57.2
|
||||
(2004): 137-154.
|
||||
https://www.merl.com/publications/docs/TR2004-043.pdf
|
||||
:DOI:`10.1109/CVPR.2001.990517`
|
||||
|
||||
"""
|
||||
if feature_coord is None:
|
||||
feature_type_ = _validate_feature_type(feature_type)
|
||||
|
||||
return np.hstack(list(chain.from_iterable(
|
||||
haar_like_feature_wrapper(int_image, r, c, width, height, feat_t,
|
||||
feature_coord)
|
||||
for feat_t in feature_type_)))
|
||||
else:
|
||||
if feature_coord.shape[0] != feature_type.shape[0]:
|
||||
raise ValueError("Inconsistent size between feature coordinates"
|
||||
"and feature types.")
|
||||
|
||||
mask_feature = [feature_type == feat_t for feat_t in FEATURE_TYPE]
|
||||
haar_feature_idx, haar_feature = zip(
|
||||
*[(np.flatnonzero(mask),
|
||||
haar_like_feature_wrapper(int_image, r, c, width, height,
|
||||
feat_t, feature_coord[mask]))
|
||||
for mask, feat_t in zip(mask_feature, FEATURE_TYPE)
|
||||
if np.count_nonzero(mask)])
|
||||
|
||||
haar_feature_idx = np.concatenate(haar_feature_idx)
|
||||
haar_feature = np.concatenate(haar_feature)
|
||||
|
||||
haar_feature[haar_feature_idx] = haar_feature.copy()
|
||||
return haar_feature
|
||||
|
||||
|
||||
def draw_haar_like_feature(image, r, c, width, height,
|
||||
feature_coord,
|
||||
color_positive_block=(1., 0., 0.),
|
||||
color_negative_block=(0., 1., 0.),
|
||||
alpha=0.5, max_n_features=None, random_state=None):
|
||||
"""Visualization of Haar-like features.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : (M, N) ndarray
|
||||
The region of an integral image for which the features need to be
|
||||
computed.
|
||||
r : int
|
||||
Row-coordinate of top left corner of the detection window.
|
||||
c : int
|
||||
Column-coordinate of top left corner of the detection window.
|
||||
width : int
|
||||
Width of the detection window.
|
||||
height : int
|
||||
Height of the detection window.
|
||||
feature_coord : ndarray of list of tuples or None, optional
|
||||
The array of coordinates to be extracted. This is useful when you want
|
||||
to recompute only a subset of features. In this case `feature_type`
|
||||
needs to be an array containing the type of each feature, as returned
|
||||
by :func:`haar_like_feature_coord`. By default, all coordinates are
|
||||
computed.
|
||||
color_positive_rectangle : tuple of 3 floats
|
||||
Floats specifying the color for the positive block. Corresponding
|
||||
values define (R, G, B) values. Default value is red (1, 0, 0).
|
||||
color_negative_block : tuple of 3 floats
|
||||
Floats specifying the color for the negative block Corresponding values
|
||||
define (R, G, B) values. Default value is blue (0, 1, 0).
|
||||
alpha : float
|
||||
Value in the range [0, 1] that specifies opacity of visualization. 1 -
|
||||
fully transparent, 0 - opaque.
|
||||
max_n_features : int, default=None
|
||||
The maximum number of features to be returned.
|
||||
By default, all features are returned.
|
||||
random_state : int, RandomState instance or None, optional
|
||||
If int, random_state is the seed used by the random number generator;
|
||||
If RandomState instance, random_state is the random number generator;
|
||||
If None, the random number generator is the RandomState instance used
|
||||
by `np.random`. The random state is used when generating a set of
|
||||
features smaller than the total number of available features.
|
||||
|
||||
Returns
|
||||
-------
|
||||
features : (M, N), ndarray
|
||||
An image in which the different features will be added.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> from skimage.feature import haar_like_feature_coord
|
||||
>>> from skimage.feature import draw_haar_like_feature
|
||||
>>> feature_coord, _ = haar_like_feature_coord(2, 2, 'type-4')
|
||||
>>> image = draw_haar_like_feature(np.zeros((2, 2)),
|
||||
... 0, 0, 2, 2,
|
||||
... feature_coord,
|
||||
... max_n_features=1)
|
||||
>>> image
|
||||
array([[[0. , 0.5, 0. ],
|
||||
[0.5, 0. , 0. ]],
|
||||
<BLANKLINE>
|
||||
[[0.5, 0. , 0. ],
|
||||
[0. , 0.5, 0. ]]])
|
||||
|
||||
"""
|
||||
random_state = check_random_state(random_state)
|
||||
color_positive_block = np.asarray(color_positive_block, dtype=np.float64)
|
||||
color_negative_block = np.asarray(color_negative_block, dtype=np.float64)
|
||||
|
||||
if max_n_features is None:
|
||||
feature_coord_ = feature_coord
|
||||
else:
|
||||
feature_coord_ = random_state.choice(feature_coord,
|
||||
size=max_n_features,
|
||||
replace=False)
|
||||
|
||||
output = np.copy(image)
|
||||
if len(image.shape) < 3:
|
||||
output = gray2rgb(image)
|
||||
output = img_as_float(output)
|
||||
|
||||
for coord in feature_coord_:
|
||||
for idx_rect, rect in enumerate(coord):
|
||||
coord_start, coord_end = rect
|
||||
coord_start = tuple(map(add, coord_start, [r, c]))
|
||||
coord_end = tuple(map(add, coord_end, [r, c]))
|
||||
rr, cc = rectangle(coord_start, coord_end)
|
||||
|
||||
if ((idx_rect + 1) % 2) == 0:
|
||||
new_value = ((1 - alpha) *
|
||||
output[rr, cc] + alpha * color_positive_block)
|
||||
else:
|
||||
new_value = ((1 - alpha) *
|
||||
output[rr, cc] + alpha * color_negative_block)
|
||||
output[rr, cc] = new_value
|
||||
|
||||
return output
|
97
venv/Lib/site-packages/skimage/feature/match.py
Normal file
97
venv/Lib/site-packages/skimage/feature/match.py
Normal file
|
@ -0,0 +1,97 @@
|
|||
import numpy as np
|
||||
from scipy.spatial.distance import cdist
|
||||
|
||||
|
||||
def match_descriptors(descriptors1, descriptors2, metric=None, p=2,
|
||||
max_distance=np.inf, cross_check=True, max_ratio=1.0):
|
||||
"""Brute-force matching of descriptors.
|
||||
|
||||
For each descriptor in the first set this matcher finds the closest
|
||||
descriptor in the second set (and vice-versa in the case of enabled
|
||||
cross-checking).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
descriptors1 : (M, P) array
|
||||
Descriptors of size P about M keypoints in the first image.
|
||||
descriptors2 : (N, P) array
|
||||
Descriptors of size P about N keypoints in the second image.
|
||||
metric : {'euclidean', 'cityblock', 'minkowski', 'hamming', ...} , optional
|
||||
The metric to compute the distance between two descriptors. See
|
||||
`scipy.spatial.distance.cdist` for all possible types. The hamming
|
||||
distance should be used for binary descriptors. By default the L2-norm
|
||||
is used for all descriptors of dtype float or double and the Hamming
|
||||
distance is used for binary descriptors automatically.
|
||||
p : int, optional
|
||||
The p-norm to apply for ``metric='minkowski'``.
|
||||
max_distance : float, optional
|
||||
Maximum allowed distance between descriptors of two keypoints
|
||||
in separate images to be regarded as a match.
|
||||
cross_check : bool, optional
|
||||
If True, the matched keypoints are returned after cross checking i.e. a
|
||||
matched pair (keypoint1, keypoint2) is returned if keypoint2 is the
|
||||
best match for keypoint1 in second image and keypoint1 is the best
|
||||
match for keypoint2 in first image.
|
||||
max_ratio : float, optional
|
||||
Maximum ratio of distances between first and second closest descriptor
|
||||
in the second set of descriptors. This threshold is useful to filter
|
||||
ambiguous matches between the two descriptor sets. The choice of this
|
||||
value depends on the statistics of the chosen descriptor, e.g.,
|
||||
for SIFT descriptors a value of 0.8 is usually chosen, see
|
||||
D.G. Lowe, "Distinctive Image Features from Scale-Invariant Keypoints",
|
||||
International Journal of Computer Vision, 2004.
|
||||
|
||||
Returns
|
||||
-------
|
||||
matches : (Q, 2) array
|
||||
Indices of corresponding matches in first and second set of
|
||||
descriptors, where ``matches[:, 0]`` denote the indices in the first
|
||||
and ``matches[:, 1]`` the indices in the second set of descriptors.
|
||||
|
||||
"""
|
||||
|
||||
if descriptors1.shape[1] != descriptors2.shape[1]:
|
||||
raise ValueError("Descriptor length must equal.")
|
||||
|
||||
if metric is None:
|
||||
if np.issubdtype(descriptors1.dtype, np.bool_):
|
||||
metric = 'hamming'
|
||||
else:
|
||||
metric = 'euclidean'
|
||||
|
||||
kwargs = {}
|
||||
# Scipy raises an error if p is passed as an extra argument when it isn't
|
||||
# necessary for the chosen metric.
|
||||
if metric == 'minkowski':
|
||||
kwargs['p'] = p
|
||||
distances = cdist(descriptors1, descriptors2, metric=metric, **kwargs)
|
||||
|
||||
indices1 = np.arange(descriptors1.shape[0])
|
||||
indices2 = np.argmin(distances, axis=1)
|
||||
|
||||
if cross_check:
|
||||
matches1 = np.argmin(distances, axis=0)
|
||||
mask = indices1 == matches1[indices2]
|
||||
indices1 = indices1[mask]
|
||||
indices2 = indices2[mask]
|
||||
|
||||
if max_distance < np.inf:
|
||||
mask = distances[indices1, indices2] < max_distance
|
||||
indices1 = indices1[mask]
|
||||
indices2 = indices2[mask]
|
||||
|
||||
if max_ratio < 1.0:
|
||||
best_distances = distances[indices1, indices2]
|
||||
distances[indices1, indices2] = np.inf
|
||||
second_best_indices2 = np.argmin(distances[indices1], axis=1)
|
||||
second_best_distances = distances[indices1, second_best_indices2]
|
||||
second_best_distances[second_best_distances == 0] \
|
||||
= np.finfo(np.double).eps
|
||||
ratio = best_distances / second_best_distances
|
||||
mask = ratio < max_ratio
|
||||
indices1 = indices1[mask]
|
||||
indices2 = indices2[mask]
|
||||
|
||||
matches = np.column_stack((indices1, indices2))
|
||||
|
||||
return matches
|
349
venv/Lib/site-packages/skimage/feature/orb.py
Normal file
349
venv/Lib/site-packages/skimage/feature/orb.py
Normal file
|
@ -0,0 +1,349 @@
|
|||
import numpy as np
|
||||
|
||||
from ..feature.util import (FeatureDetector, DescriptorExtractor,
|
||||
_mask_border_keypoints,
|
||||
_prepare_grayscale_input_2D)
|
||||
|
||||
from ..feature import (corner_fast, corner_orientations, corner_peaks,
|
||||
corner_harris)
|
||||
from ..transform import pyramid_gaussian
|
||||
from .._shared.utils import check_nD
|
||||
|
||||
from .orb_cy import _orb_loop
|
||||
|
||||
|
||||
OFAST_MASK = np.zeros((31, 31))
|
||||
OFAST_UMAX = [15, 15, 15, 15, 14, 14, 14, 13, 13, 12, 11, 10, 9, 8, 6, 3]
|
||||
for i in range(-15, 16):
|
||||
for j in range(-OFAST_UMAX[abs(i)], OFAST_UMAX[abs(i)] + 1):
|
||||
OFAST_MASK[15 + j, 15 + i] = 1
|
||||
|
||||
|
||||
class ORB(FeatureDetector, DescriptorExtractor):
|
||||
|
||||
"""Oriented FAST and rotated BRIEF feature detector and binary descriptor
|
||||
extractor.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
n_keypoints : int, optional
|
||||
Number of keypoints to be returned. The function will return the best
|
||||
`n_keypoints` according to the Harris corner response if more than
|
||||
`n_keypoints` are detected. If not, then all the detected keypoints
|
||||
are returned.
|
||||
fast_n : int, optional
|
||||
The `n` parameter in `skimage.feature.corner_fast`. Minimum number of
|
||||
consecutive pixels out of 16 pixels on the circle that should all be
|
||||
either brighter or darker w.r.t test-pixel. A point c on the circle is
|
||||
darker w.r.t test pixel p if ``Ic < Ip - threshold`` and brighter if
|
||||
``Ic > Ip + threshold``. Also stands for the n in ``FAST-n`` corner
|
||||
detector.
|
||||
fast_threshold : float, optional
|
||||
The ``threshold`` parameter in ``feature.corner_fast``. Threshold used
|
||||
to decide whether the pixels on the circle are brighter, darker or
|
||||
similar w.r.t. the test pixel. Decrease the threshold when more
|
||||
corners are desired and vice-versa.
|
||||
harris_k : float, optional
|
||||
The `k` parameter in `skimage.feature.corner_harris`. Sensitivity
|
||||
factor to separate corners from edges, typically in range ``[0, 0.2]``.
|
||||
Small values of `k` result in detection of sharp corners.
|
||||
downscale : float, optional
|
||||
Downscale factor for the image pyramid. Default value 1.2 is chosen so
|
||||
that there are more dense scales which enable robust scale invariance
|
||||
for a subsequent feature description.
|
||||
n_scales : int, optional
|
||||
Maximum number of scales from the bottom of the image pyramid to
|
||||
extract the features from.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
keypoints : (N, 2) array
|
||||
Keypoint coordinates as ``(row, col)``.
|
||||
scales : (N, ) array
|
||||
Corresponding scales.
|
||||
orientations : (N, ) array
|
||||
Corresponding orientations in radians.
|
||||
responses : (N, ) array
|
||||
Corresponding Harris corner responses.
|
||||
descriptors : (Q, `descriptor_size`) array of dtype bool
|
||||
2D array of binary descriptors of size `descriptor_size` for Q
|
||||
keypoints after filtering out border keypoints with value at an
|
||||
index ``(i, j)`` either being ``True`` or ``False`` representing
|
||||
the outcome of the intensity comparison for i-th keypoint on j-th
|
||||
decision pixel-pair. It is ``Q == np.sum(mask)``.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Ethan Rublee, Vincent Rabaud, Kurt Konolige and Gary Bradski
|
||||
"ORB: An efficient alternative to SIFT and SURF"
|
||||
http://www.vision.cs.chubu.ac.jp/CV-R/pdf/Rublee_iccv2011.pdf
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from skimage.feature import ORB, match_descriptors
|
||||
>>> img1 = np.zeros((100, 100))
|
||||
>>> img2 = np.zeros_like(img1)
|
||||
>>> np.random.seed(1)
|
||||
>>> square = np.random.rand(20, 20)
|
||||
>>> img1[40:60, 40:60] = square
|
||||
>>> img2[53:73, 53:73] = square
|
||||
>>> detector_extractor1 = ORB(n_keypoints=5)
|
||||
>>> detector_extractor2 = ORB(n_keypoints=5)
|
||||
>>> detector_extractor1.detect_and_extract(img1)
|
||||
>>> detector_extractor2.detect_and_extract(img2)
|
||||
>>> matches = match_descriptors(detector_extractor1.descriptors,
|
||||
... detector_extractor2.descriptors)
|
||||
>>> matches
|
||||
array([[0, 0],
|
||||
[1, 1],
|
||||
[2, 2],
|
||||
[3, 3],
|
||||
[4, 4]])
|
||||
>>> detector_extractor1.keypoints[matches[:, 0]]
|
||||
array([[42., 40.],
|
||||
[47., 58.],
|
||||
[44., 40.],
|
||||
[59., 42.],
|
||||
[45., 44.]])
|
||||
>>> detector_extractor2.keypoints[matches[:, 1]]
|
||||
array([[55., 53.],
|
||||
[60., 71.],
|
||||
[57., 53.],
|
||||
[72., 55.],
|
||||
[58., 57.]])
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, downscale=1.2, n_scales=8,
|
||||
n_keypoints=500, fast_n=9, fast_threshold=0.08,
|
||||
harris_k=0.04):
|
||||
self.downscale = downscale
|
||||
self.n_scales = n_scales
|
||||
self.n_keypoints = n_keypoints
|
||||
self.fast_n = fast_n
|
||||
self.fast_threshold = fast_threshold
|
||||
self.harris_k = harris_k
|
||||
|
||||
self.keypoints = None
|
||||
self.scales = None
|
||||
self.responses = None
|
||||
self.orientations = None
|
||||
self.descriptors = None
|
||||
|
||||
def _build_pyramid(self, image):
|
||||
image = _prepare_grayscale_input_2D(image)
|
||||
return list(pyramid_gaussian(image, self.n_scales - 1,
|
||||
self.downscale, multichannel=False))
|
||||
|
||||
def _detect_octave(self, octave_image):
|
||||
dtype = octave_image.dtype
|
||||
# Extract keypoints for current octave
|
||||
fast_response = corner_fast(octave_image, self.fast_n,
|
||||
self.fast_threshold)
|
||||
keypoints = corner_peaks(fast_response, min_distance=1,
|
||||
threshold_rel=0)
|
||||
|
||||
if len(keypoints) == 0:
|
||||
return (np.zeros((0, 2), dtype=dtype),
|
||||
np.zeros((0, ), dtype=dtype),
|
||||
np.zeros((0, ), dtype=dtype))
|
||||
|
||||
mask = _mask_border_keypoints(octave_image.shape, keypoints,
|
||||
distance=16)
|
||||
keypoints = keypoints[mask]
|
||||
|
||||
orientations = corner_orientations(octave_image, keypoints,
|
||||
OFAST_MASK)
|
||||
|
||||
harris_response = corner_harris(octave_image, method='k',
|
||||
k=self.harris_k)
|
||||
responses = harris_response[keypoints[:, 0], keypoints[:, 1]]
|
||||
|
||||
return keypoints, orientations, responses
|
||||
|
||||
def detect(self, image):
|
||||
"""Detect oriented FAST keypoints along with the corresponding scale.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : 2D array
|
||||
Input image.
|
||||
|
||||
"""
|
||||
check_nD(image, 2)
|
||||
|
||||
pyramid = self._build_pyramid(image)
|
||||
|
||||
keypoints_list = []
|
||||
orientations_list = []
|
||||
scales_list = []
|
||||
responses_list = []
|
||||
|
||||
for octave in range(len(pyramid)):
|
||||
|
||||
octave_image = np.ascontiguousarray(pyramid[octave])
|
||||
|
||||
keypoints, orientations, responses = self._detect_octave(
|
||||
octave_image)
|
||||
|
||||
keypoints_list.append(keypoints * self.downscale ** octave)
|
||||
orientations_list.append(orientations)
|
||||
scales_list.append(np.full(
|
||||
keypoints.shape[0], self.downscale ** octave,
|
||||
dtype=octave_image.dtype))
|
||||
responses_list.append(responses)
|
||||
|
||||
keypoints = np.vstack(keypoints_list)
|
||||
orientations = np.hstack(orientations_list)
|
||||
scales = np.hstack(scales_list)
|
||||
responses = np.hstack(responses_list)
|
||||
|
||||
if keypoints.shape[0] < self.n_keypoints:
|
||||
self.keypoints = keypoints
|
||||
self.scales = scales
|
||||
self.orientations = orientations
|
||||
self.responses = responses
|
||||
else:
|
||||
# Choose best n_keypoints according to Harris corner response
|
||||
best_indices = responses.argsort()[::-1][:self.n_keypoints]
|
||||
self.keypoints = keypoints[best_indices]
|
||||
self.scales = scales[best_indices]
|
||||
self.orientations = orientations[best_indices]
|
||||
self.responses = responses[best_indices]
|
||||
|
||||
def _extract_octave(self, octave_image, keypoints, orientations):
|
||||
mask = _mask_border_keypoints(octave_image.shape, keypoints,
|
||||
distance=20)
|
||||
keypoints = np.array(keypoints[mask], dtype=np.intp, order='C',
|
||||
copy=False)
|
||||
orientations = np.array(orientations[mask], order='C',
|
||||
copy=False)
|
||||
|
||||
descriptors = _orb_loop(octave_image, keypoints, orientations)
|
||||
|
||||
return descriptors, mask
|
||||
|
||||
def extract(self, image, keypoints, scales, orientations):
|
||||
"""Extract rBRIEF binary descriptors for given keypoints in image.
|
||||
|
||||
Note that the keypoints must be extracted using the same `downscale`
|
||||
and `n_scales` parameters. Additionally, if you want to extract both
|
||||
keypoints and descriptors you should use the faster
|
||||
`detect_and_extract`.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : 2D array
|
||||
Input image.
|
||||
keypoints : (N, 2) array
|
||||
Keypoint coordinates as ``(row, col)``.
|
||||
scales : (N, ) array
|
||||
Corresponding scales.
|
||||
orientations : (N, ) array
|
||||
Corresponding orientations in radians.
|
||||
|
||||
"""
|
||||
check_nD(image, 2)
|
||||
|
||||
pyramid = self._build_pyramid(image)
|
||||
|
||||
descriptors_list = []
|
||||
mask_list = []
|
||||
|
||||
# Determine octaves from scales
|
||||
octaves = (np.log(scales) / np.log(self.downscale)).astype(np.intp)
|
||||
|
||||
for octave in range(len(pyramid)):
|
||||
|
||||
# Mask for all keypoints in current octave
|
||||
octave_mask = octaves == octave
|
||||
|
||||
if np.sum(octave_mask) > 0:
|
||||
|
||||
octave_image = np.ascontiguousarray(pyramid[octave])
|
||||
|
||||
octave_keypoints = keypoints[octave_mask]
|
||||
octave_keypoints /= self.downscale ** octave
|
||||
octave_orientations = orientations[octave_mask]
|
||||
|
||||
descriptors, mask = self._extract_octave(octave_image,
|
||||
octave_keypoints,
|
||||
octave_orientations)
|
||||
|
||||
descriptors_list.append(descriptors)
|
||||
mask_list.append(mask)
|
||||
|
||||
self.descriptors = np.vstack(descriptors_list).view(np.bool)
|
||||
self.mask_ = np.hstack(mask_list)
|
||||
|
||||
def detect_and_extract(self, image):
|
||||
"""Detect oriented FAST keypoints and extract rBRIEF descriptors.
|
||||
|
||||
Note that this is faster than first calling `detect` and then
|
||||
`extract`.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : 2D array
|
||||
Input image.
|
||||
|
||||
"""
|
||||
check_nD(image, 2)
|
||||
|
||||
pyramid = self._build_pyramid(image)
|
||||
|
||||
keypoints_list = []
|
||||
responses_list = []
|
||||
scales_list = []
|
||||
orientations_list = []
|
||||
descriptors_list = []
|
||||
|
||||
for octave in range(len(pyramid)):
|
||||
|
||||
octave_image = np.ascontiguousarray(pyramid[octave])
|
||||
|
||||
keypoints, orientations, responses = self._detect_octave(
|
||||
octave_image)
|
||||
|
||||
if len(keypoints) == 0:
|
||||
keypoints_list.append(keypoints)
|
||||
responses_list.append(responses)
|
||||
descriptors_list.append(np.zeros((0, 256), dtype=np.bool))
|
||||
continue
|
||||
|
||||
descriptors, mask = self._extract_octave(octave_image, keypoints,
|
||||
orientations)
|
||||
|
||||
scaled_keypoints = keypoints[mask] * self.downscale ** octave
|
||||
keypoints_list.append(scaled_keypoints)
|
||||
responses_list.append(responses[mask])
|
||||
orientations_list.append(orientations[mask])
|
||||
scales_list.append(self.downscale ** octave *
|
||||
np.ones(scaled_keypoints.shape[0], dtype=np.intp))
|
||||
descriptors_list.append(descriptors)
|
||||
|
||||
if len(scales_list) == 0:
|
||||
raise RuntimeError(
|
||||
"ORB found no features. Try passing in an image containing "
|
||||
"greater intensity contrasts between adjacent pixels.")
|
||||
|
||||
keypoints = np.vstack(keypoints_list)
|
||||
responses = np.hstack(responses_list)
|
||||
scales = np.hstack(scales_list)
|
||||
orientations = np.hstack(orientations_list)
|
||||
descriptors = np.vstack(descriptors_list).view(np.bool)
|
||||
|
||||
if keypoints.shape[0] < self.n_keypoints:
|
||||
self.keypoints = keypoints
|
||||
self.scales = scales
|
||||
self.orientations = orientations
|
||||
self.responses = responses
|
||||
self.descriptors = descriptors
|
||||
else:
|
||||
# Choose best n_keypoints according to Harris corner response
|
||||
best_indices = responses.argsort()[::-1][:self.n_keypoints]
|
||||
self.keypoints = keypoints[best_indices]
|
||||
self.scales = scales[best_indices]
|
||||
self.orientations = orientations[best_indices]
|
||||
self.responses = responses[best_indices]
|
||||
self.descriptors = descriptors[best_indices]
|
BIN
venv/Lib/site-packages/skimage/feature/orb_cy.cp36-win32.pyd
Normal file
BIN
venv/Lib/site-packages/skimage/feature/orb_cy.cp36-win32.pyd
Normal file
Binary file not shown.
|
@ -0,0 +1,256 @@
|
|||
8.000000000000000000e+00 -3.000000000000000000e+00 9.000000000000000000e+00 5.000000000000000000e+00
|
||||
4.000000000000000000e+00 2.000000000000000000e+00 7.000000000000000000e+00 -1.200000000000000000e+01
|
||||
-1.100000000000000000e+01 9.000000000000000000e+00 -8.000000000000000000e+00 2.000000000000000000e+00
|
||||
7.000000000000000000e+00 -1.200000000000000000e+01 1.200000000000000000e+01 -1.300000000000000000e+01
|
||||
2.000000000000000000e+00 -1.300000000000000000e+01 2.000000000000000000e+00 1.200000000000000000e+01
|
||||
1.000000000000000000e+00 -7.000000000000000000e+00 1.000000000000000000e+00 6.000000000000000000e+00
|
||||
-2.000000000000000000e+00 -1.000000000000000000e+01 -2.000000000000000000e+00 -4.000000000000000000e+00
|
||||
-1.300000000000000000e+01 -1.300000000000000000e+01 -1.100000000000000000e+01 -8.000000000000000000e+00
|
||||
-1.300000000000000000e+01 -3.000000000000000000e+00 -1.200000000000000000e+01 -9.000000000000000000e+00
|
||||
1.000000000000000000e+01 4.000000000000000000e+00 1.100000000000000000e+01 9.000000000000000000e+00
|
||||
-1.300000000000000000e+01 -8.000000000000000000e+00 -8.000000000000000000e+00 -9.000000000000000000e+00
|
||||
-1.100000000000000000e+01 7.000000000000000000e+00 -9.000000000000000000e+00 1.200000000000000000e+01
|
||||
7.000000000000000000e+00 7.000000000000000000e+00 1.200000000000000000e+01 6.000000000000000000e+00
|
||||
-4.000000000000000000e+00 -5.000000000000000000e+00 -3.000000000000000000e+00 0.000000000000000000e+00
|
||||
-1.300000000000000000e+01 2.000000000000000000e+00 -1.200000000000000000e+01 -3.000000000000000000e+00
|
||||
-9.000000000000000000e+00 0.000000000000000000e+00 -7.000000000000000000e+00 5.000000000000000000e+00
|
||||
1.200000000000000000e+01 -6.000000000000000000e+00 1.200000000000000000e+01 -1.000000000000000000e+00
|
||||
-3.000000000000000000e+00 6.000000000000000000e+00 -2.000000000000000000e+00 1.200000000000000000e+01
|
||||
-6.000000000000000000e+00 -1.300000000000000000e+01 -4.000000000000000000e+00 -8.000000000000000000e+00
|
||||
1.100000000000000000e+01 -1.300000000000000000e+01 1.200000000000000000e+01 -8.000000000000000000e+00
|
||||
4.000000000000000000e+00 7.000000000000000000e+00 5.000000000000000000e+00 1.000000000000000000e+00
|
||||
5.000000000000000000e+00 -3.000000000000000000e+00 1.000000000000000000e+01 -3.000000000000000000e+00
|
||||
3.000000000000000000e+00 -7.000000000000000000e+00 6.000000000000000000e+00 1.200000000000000000e+01
|
||||
-8.000000000000000000e+00 -7.000000000000000000e+00 -6.000000000000000000e+00 -2.000000000000000000e+00
|
||||
-2.000000000000000000e+00 1.100000000000000000e+01 -1.000000000000000000e+00 -1.000000000000000000e+01
|
||||
-1.300000000000000000e+01 1.200000000000000000e+01 -8.000000000000000000e+00 1.000000000000000000e+01
|
||||
-7.000000000000000000e+00 3.000000000000000000e+00 -5.000000000000000000e+00 -3.000000000000000000e+00
|
||||
-4.000000000000000000e+00 2.000000000000000000e+00 -3.000000000000000000e+00 7.000000000000000000e+00
|
||||
-1.000000000000000000e+01 -1.200000000000000000e+01 -6.000000000000000000e+00 1.100000000000000000e+01
|
||||
5.000000000000000000e+00 -1.200000000000000000e+01 6.000000000000000000e+00 -7.000000000000000000e+00
|
||||
5.000000000000000000e+00 -6.000000000000000000e+00 7.000000000000000000e+00 -1.000000000000000000e+00
|
||||
1.000000000000000000e+00 0.000000000000000000e+00 4.000000000000000000e+00 -5.000000000000000000e+00
|
||||
9.000000000000000000e+00 1.100000000000000000e+01 1.100000000000000000e+01 -1.300000000000000000e+01
|
||||
4.000000000000000000e+00 7.000000000000000000e+00 4.000000000000000000e+00 1.200000000000000000e+01
|
||||
2.000000000000000000e+00 -1.000000000000000000e+00 4.000000000000000000e+00 4.000000000000000000e+00
|
||||
-4.000000000000000000e+00 -1.200000000000000000e+01 -2.000000000000000000e+00 7.000000000000000000e+00
|
||||
-8.000000000000000000e+00 -5.000000000000000000e+00 -7.000000000000000000e+00 -1.000000000000000000e+01
|
||||
4.000000000000000000e+00 1.100000000000000000e+01 9.000000000000000000e+00 1.200000000000000000e+01
|
||||
0.000000000000000000e+00 -8.000000000000000000e+00 1.000000000000000000e+00 -1.300000000000000000e+01
|
||||
-1.300000000000000000e+01 -2.000000000000000000e+00 -8.000000000000000000e+00 2.000000000000000000e+00
|
||||
-3.000000000000000000e+00 -2.000000000000000000e+00 -2.000000000000000000e+00 3.000000000000000000e+00
|
||||
-6.000000000000000000e+00 9.000000000000000000e+00 -4.000000000000000000e+00 -9.000000000000000000e+00
|
||||
8.000000000000000000e+00 1.200000000000000000e+01 1.000000000000000000e+01 7.000000000000000000e+00
|
||||
0.000000000000000000e+00 9.000000000000000000e+00 1.000000000000000000e+00 3.000000000000000000e+00
|
||||
7.000000000000000000e+00 -5.000000000000000000e+00 1.100000000000000000e+01 -1.000000000000000000e+01
|
||||
-1.300000000000000000e+01 -6.000000000000000000e+00 -1.100000000000000000e+01 0.000000000000000000e+00
|
||||
1.000000000000000000e+01 7.000000000000000000e+00 1.200000000000000000e+01 1.000000000000000000e+00
|
||||
-6.000000000000000000e+00 -3.000000000000000000e+00 -6.000000000000000000e+00 1.200000000000000000e+01
|
||||
1.000000000000000000e+01 -9.000000000000000000e+00 1.200000000000000000e+01 -4.000000000000000000e+00
|
||||
-1.300000000000000000e+01 8.000000000000000000e+00 -8.000000000000000000e+00 -1.200000000000000000e+01
|
||||
-1.300000000000000000e+01 0.000000000000000000e+00 -8.000000000000000000e+00 -4.000000000000000000e+00
|
||||
3.000000000000000000e+00 3.000000000000000000e+00 7.000000000000000000e+00 8.000000000000000000e+00
|
||||
5.000000000000000000e+00 7.000000000000000000e+00 1.000000000000000000e+01 -7.000000000000000000e+00
|
||||
-1.000000000000000000e+00 7.000000000000000000e+00 1.000000000000000000e+00 -1.200000000000000000e+01
|
||||
3.000000000000000000e+00 -1.000000000000000000e+01 5.000000000000000000e+00 6.000000000000000000e+00
|
||||
2.000000000000000000e+00 -4.000000000000000000e+00 3.000000000000000000e+00 -1.000000000000000000e+01
|
||||
-1.300000000000000000e+01 0.000000000000000000e+00 -1.300000000000000000e+01 5.000000000000000000e+00
|
||||
-1.300000000000000000e+01 -7.000000000000000000e+00 -1.200000000000000000e+01 1.200000000000000000e+01
|
||||
-1.300000000000000000e+01 3.000000000000000000e+00 -1.100000000000000000e+01 8.000000000000000000e+00
|
||||
-7.000000000000000000e+00 1.200000000000000000e+01 -4.000000000000000000e+00 7.000000000000000000e+00
|
||||
6.000000000000000000e+00 -1.000000000000000000e+01 1.200000000000000000e+01 8.000000000000000000e+00
|
||||
-9.000000000000000000e+00 -1.000000000000000000e+00 -7.000000000000000000e+00 -6.000000000000000000e+00
|
||||
-2.000000000000000000e+00 -5.000000000000000000e+00 0.000000000000000000e+00 1.200000000000000000e+01
|
||||
-1.200000000000000000e+01 5.000000000000000000e+00 -7.000000000000000000e+00 5.000000000000000000e+00
|
||||
3.000000000000000000e+00 -1.000000000000000000e+01 8.000000000000000000e+00 -1.300000000000000000e+01
|
||||
-7.000000000000000000e+00 -7.000000000000000000e+00 -4.000000000000000000e+00 5.000000000000000000e+00
|
||||
-3.000000000000000000e+00 -2.000000000000000000e+00 -1.000000000000000000e+00 -7.000000000000000000e+00
|
||||
2.000000000000000000e+00 9.000000000000000000e+00 5.000000000000000000e+00 -1.100000000000000000e+01
|
||||
-1.100000000000000000e+01 -1.300000000000000000e+01 -5.000000000000000000e+00 -1.300000000000000000e+01
|
||||
-1.000000000000000000e+00 6.000000000000000000e+00 0.000000000000000000e+00 -1.000000000000000000e+00
|
||||
5.000000000000000000e+00 -3.000000000000000000e+00 5.000000000000000000e+00 2.000000000000000000e+00
|
||||
-4.000000000000000000e+00 -1.300000000000000000e+01 -4.000000000000000000e+00 1.200000000000000000e+01
|
||||
-9.000000000000000000e+00 -6.000000000000000000e+00 -9.000000000000000000e+00 6.000000000000000000e+00
|
||||
-1.200000000000000000e+01 -1.000000000000000000e+01 -8.000000000000000000e+00 -4.000000000000000000e+00
|
||||
1.000000000000000000e+01 2.000000000000000000e+00 1.200000000000000000e+01 -3.000000000000000000e+00
|
||||
7.000000000000000000e+00 1.200000000000000000e+01 1.200000000000000000e+01 1.200000000000000000e+01
|
||||
-7.000000000000000000e+00 -1.300000000000000000e+01 -6.000000000000000000e+00 5.000000000000000000e+00
|
||||
-4.000000000000000000e+00 9.000000000000000000e+00 -3.000000000000000000e+00 4.000000000000000000e+00
|
||||
7.000000000000000000e+00 -1.000000000000000000e+00 1.200000000000000000e+01 2.000000000000000000e+00
|
||||
-7.000000000000000000e+00 6.000000000000000000e+00 -5.000000000000000000e+00 1.000000000000000000e+00
|
||||
-1.300000000000000000e+01 1.100000000000000000e+01 -1.200000000000000000e+01 5.000000000000000000e+00
|
||||
-3.000000000000000000e+00 7.000000000000000000e+00 -2.000000000000000000e+00 -6.000000000000000000e+00
|
||||
7.000000000000000000e+00 -8.000000000000000000e+00 1.200000000000000000e+01 -7.000000000000000000e+00
|
||||
-1.300000000000000000e+01 -7.000000000000000000e+00 -1.100000000000000000e+01 -1.200000000000000000e+01
|
||||
1.000000000000000000e+00 -3.000000000000000000e+00 1.200000000000000000e+01 1.200000000000000000e+01
|
||||
2.000000000000000000e+00 -6.000000000000000000e+00 3.000000000000000000e+00 0.000000000000000000e+00
|
||||
-4.000000000000000000e+00 3.000000000000000000e+00 -2.000000000000000000e+00 -1.300000000000000000e+01
|
||||
-1.000000000000000000e+00 -1.300000000000000000e+01 1.000000000000000000e+00 9.000000000000000000e+00
|
||||
7.000000000000000000e+00 1.000000000000000000e+00 8.000000000000000000e+00 -6.000000000000000000e+00
|
||||
1.000000000000000000e+00 -1.000000000000000000e+00 3.000000000000000000e+00 1.200000000000000000e+01
|
||||
9.000000000000000000e+00 1.000000000000000000e+00 1.200000000000000000e+01 6.000000000000000000e+00
|
||||
-1.000000000000000000e+00 -9.000000000000000000e+00 -1.000000000000000000e+00 3.000000000000000000e+00
|
||||
-1.300000000000000000e+01 -1.300000000000000000e+01 -1.000000000000000000e+01 5.000000000000000000e+00
|
||||
7.000000000000000000e+00 7.000000000000000000e+00 1.000000000000000000e+01 1.200000000000000000e+01
|
||||
1.200000000000000000e+01 -5.000000000000000000e+00 1.200000000000000000e+01 9.000000000000000000e+00
|
||||
6.000000000000000000e+00 3.000000000000000000e+00 7.000000000000000000e+00 1.100000000000000000e+01
|
||||
5.000000000000000000e+00 -1.300000000000000000e+01 6.000000000000000000e+00 1.000000000000000000e+01
|
||||
2.000000000000000000e+00 -1.200000000000000000e+01 2.000000000000000000e+00 3.000000000000000000e+00
|
||||
3.000000000000000000e+00 8.000000000000000000e+00 4.000000000000000000e+00 -6.000000000000000000e+00
|
||||
2.000000000000000000e+00 6.000000000000000000e+00 1.200000000000000000e+01 -1.300000000000000000e+01
|
||||
9.000000000000000000e+00 -1.200000000000000000e+01 1.000000000000000000e+01 3.000000000000000000e+00
|
||||
-8.000000000000000000e+00 4.000000000000000000e+00 -7.000000000000000000e+00 9.000000000000000000e+00
|
||||
-1.100000000000000000e+01 1.200000000000000000e+01 -4.000000000000000000e+00 -6.000000000000000000e+00
|
||||
1.000000000000000000e+00 1.200000000000000000e+01 2.000000000000000000e+00 -8.000000000000000000e+00
|
||||
6.000000000000000000e+00 -9.000000000000000000e+00 7.000000000000000000e+00 -4.000000000000000000e+00
|
||||
2.000000000000000000e+00 3.000000000000000000e+00 3.000000000000000000e+00 -2.000000000000000000e+00
|
||||
6.000000000000000000e+00 3.000000000000000000e+00 1.100000000000000000e+01 0.000000000000000000e+00
|
||||
3.000000000000000000e+00 -3.000000000000000000e+00 8.000000000000000000e+00 -8.000000000000000000e+00
|
||||
7.000000000000000000e+00 8.000000000000000000e+00 9.000000000000000000e+00 3.000000000000000000e+00
|
||||
-1.100000000000000000e+01 -5.000000000000000000e+00 -6.000000000000000000e+00 -4.000000000000000000e+00
|
||||
-1.000000000000000000e+01 1.100000000000000000e+01 -5.000000000000000000e+00 1.000000000000000000e+01
|
||||
-5.000000000000000000e+00 -8.000000000000000000e+00 -3.000000000000000000e+00 1.200000000000000000e+01
|
||||
-1.000000000000000000e+01 5.000000000000000000e+00 -9.000000000000000000e+00 0.000000000000000000e+00
|
||||
8.000000000000000000e+00 -1.000000000000000000e+00 1.200000000000000000e+01 -6.000000000000000000e+00
|
||||
4.000000000000000000e+00 -6.000000000000000000e+00 6.000000000000000000e+00 -1.100000000000000000e+01
|
||||
-1.000000000000000000e+01 1.200000000000000000e+01 -8.000000000000000000e+00 7.000000000000000000e+00
|
||||
4.000000000000000000e+00 -2.000000000000000000e+00 6.000000000000000000e+00 7.000000000000000000e+00
|
||||
-2.000000000000000000e+00 0.000000000000000000e+00 -2.000000000000000000e+00 1.200000000000000000e+01
|
||||
-5.000000000000000000e+00 -8.000000000000000000e+00 -5.000000000000000000e+00 2.000000000000000000e+00
|
||||
7.000000000000000000e+00 -6.000000000000000000e+00 1.000000000000000000e+01 1.200000000000000000e+01
|
||||
-9.000000000000000000e+00 -1.300000000000000000e+01 -8.000000000000000000e+00 -8.000000000000000000e+00
|
||||
-5.000000000000000000e+00 -1.300000000000000000e+01 -5.000000000000000000e+00 -2.000000000000000000e+00
|
||||
8.000000000000000000e+00 -8.000000000000000000e+00 9.000000000000000000e+00 -1.300000000000000000e+01
|
||||
-9.000000000000000000e+00 -1.100000000000000000e+01 -9.000000000000000000e+00 0.000000000000000000e+00
|
||||
1.000000000000000000e+00 -8.000000000000000000e+00 1.000000000000000000e+00 -2.000000000000000000e+00
|
||||
7.000000000000000000e+00 -4.000000000000000000e+00 9.000000000000000000e+00 1.000000000000000000e+00
|
||||
-2.000000000000000000e+00 1.000000000000000000e+00 -1.000000000000000000e+00 -4.000000000000000000e+00
|
||||
1.100000000000000000e+01 -6.000000000000000000e+00 1.200000000000000000e+01 -1.100000000000000000e+01
|
||||
-1.200000000000000000e+01 -9.000000000000000000e+00 -6.000000000000000000e+00 4.000000000000000000e+00
|
||||
3.000000000000000000e+00 7.000000000000000000e+00 7.000000000000000000e+00 1.200000000000000000e+01
|
||||
5.000000000000000000e+00 5.000000000000000000e+00 1.000000000000000000e+01 8.000000000000000000e+00
|
||||
0.000000000000000000e+00 -4.000000000000000000e+00 2.000000000000000000e+00 8.000000000000000000e+00
|
||||
-9.000000000000000000e+00 1.200000000000000000e+01 -5.000000000000000000e+00 -1.300000000000000000e+01
|
||||
0.000000000000000000e+00 7.000000000000000000e+00 2.000000000000000000e+00 1.200000000000000000e+01
|
||||
-1.000000000000000000e+00 2.000000000000000000e+00 1.000000000000000000e+00 7.000000000000000000e+00
|
||||
5.000000000000000000e+00 1.100000000000000000e+01 7.000000000000000000e+00 -9.000000000000000000e+00
|
||||
3.000000000000000000e+00 5.000000000000000000e+00 6.000000000000000000e+00 -8.000000000000000000e+00
|
||||
-1.300000000000000000e+01 -4.000000000000000000e+00 -8.000000000000000000e+00 9.000000000000000000e+00
|
||||
-5.000000000000000000e+00 9.000000000000000000e+00 -3.000000000000000000e+00 -3.000000000000000000e+00
|
||||
-4.000000000000000000e+00 -7.000000000000000000e+00 -3.000000000000000000e+00 -1.200000000000000000e+01
|
||||
6.000000000000000000e+00 5.000000000000000000e+00 8.000000000000000000e+00 0.000000000000000000e+00
|
||||
-7.000000000000000000e+00 6.000000000000000000e+00 -6.000000000000000000e+00 1.200000000000000000e+01
|
||||
-1.300000000000000000e+01 6.000000000000000000e+00 -5.000000000000000000e+00 -2.000000000000000000e+00
|
||||
1.000000000000000000e+00 -1.000000000000000000e+01 3.000000000000000000e+00 1.000000000000000000e+01
|
||||
4.000000000000000000e+00 1.000000000000000000e+00 8.000000000000000000e+00 -4.000000000000000000e+00
|
||||
-2.000000000000000000e+00 -2.000000000000000000e+00 2.000000000000000000e+00 -1.300000000000000000e+01
|
||||
2.000000000000000000e+00 -1.200000000000000000e+01 1.200000000000000000e+01 1.200000000000000000e+01
|
||||
-2.000000000000000000e+00 -1.300000000000000000e+01 0.000000000000000000e+00 -6.000000000000000000e+00
|
||||
4.000000000000000000e+00 1.000000000000000000e+00 9.000000000000000000e+00 3.000000000000000000e+00
|
||||
-6.000000000000000000e+00 -1.000000000000000000e+01 -3.000000000000000000e+00 -5.000000000000000000e+00
|
||||
-3.000000000000000000e+00 -1.300000000000000000e+01 -1.000000000000000000e+00 1.000000000000000000e+00
|
||||
7.000000000000000000e+00 5.000000000000000000e+00 1.200000000000000000e+01 -1.100000000000000000e+01
|
||||
4.000000000000000000e+00 -2.000000000000000000e+00 5.000000000000000000e+00 -7.000000000000000000e+00
|
||||
-1.300000000000000000e+01 9.000000000000000000e+00 -9.000000000000000000e+00 -5.000000000000000000e+00
|
||||
7.000000000000000000e+00 1.000000000000000000e+00 8.000000000000000000e+00 6.000000000000000000e+00
|
||||
7.000000000000000000e+00 -8.000000000000000000e+00 7.000000000000000000e+00 6.000000000000000000e+00
|
||||
-7.000000000000000000e+00 -4.000000000000000000e+00 -7.000000000000000000e+00 1.000000000000000000e+00
|
||||
-8.000000000000000000e+00 1.100000000000000000e+01 -7.000000000000000000e+00 -8.000000000000000000e+00
|
||||
-1.300000000000000000e+01 6.000000000000000000e+00 -1.200000000000000000e+01 -8.000000000000000000e+00
|
||||
2.000000000000000000e+00 4.000000000000000000e+00 3.000000000000000000e+00 9.000000000000000000e+00
|
||||
1.000000000000000000e+01 -5.000000000000000000e+00 1.200000000000000000e+01 3.000000000000000000e+00
|
||||
-6.000000000000000000e+00 -5.000000000000000000e+00 -6.000000000000000000e+00 7.000000000000000000e+00
|
||||
8.000000000000000000e+00 -3.000000000000000000e+00 9.000000000000000000e+00 -8.000000000000000000e+00
|
||||
2.000000000000000000e+00 -1.200000000000000000e+01 2.000000000000000000e+00 8.000000000000000000e+00
|
||||
-1.100000000000000000e+01 -2.000000000000000000e+00 -1.000000000000000000e+01 3.000000000000000000e+00
|
||||
-1.200000000000000000e+01 -1.300000000000000000e+01 -7.000000000000000000e+00 -9.000000000000000000e+00
|
||||
-1.100000000000000000e+01 0.000000000000000000e+00 -1.000000000000000000e+01 -5.000000000000000000e+00
|
||||
5.000000000000000000e+00 -3.000000000000000000e+00 1.100000000000000000e+01 8.000000000000000000e+00
|
||||
-2.000000000000000000e+00 -1.300000000000000000e+01 -1.000000000000000000e+00 1.200000000000000000e+01
|
||||
-1.000000000000000000e+00 -8.000000000000000000e+00 0.000000000000000000e+00 9.000000000000000000e+00
|
||||
-1.300000000000000000e+01 -1.100000000000000000e+01 -1.200000000000000000e+01 -5.000000000000000000e+00
|
||||
-1.000000000000000000e+01 -2.000000000000000000e+00 -1.000000000000000000e+01 1.100000000000000000e+01
|
||||
-3.000000000000000000e+00 9.000000000000000000e+00 -2.000000000000000000e+00 -1.300000000000000000e+01
|
||||
2.000000000000000000e+00 -3.000000000000000000e+00 3.000000000000000000e+00 2.000000000000000000e+00
|
||||
-9.000000000000000000e+00 -1.300000000000000000e+01 -4.000000000000000000e+00 0.000000000000000000e+00
|
||||
-4.000000000000000000e+00 6.000000000000000000e+00 -3.000000000000000000e+00 -1.000000000000000000e+01
|
||||
-4.000000000000000000e+00 1.200000000000000000e+01 -2.000000000000000000e+00 -7.000000000000000000e+00
|
||||
-6.000000000000000000e+00 -1.100000000000000000e+01 -4.000000000000000000e+00 9.000000000000000000e+00
|
||||
6.000000000000000000e+00 -3.000000000000000000e+00 6.000000000000000000e+00 1.100000000000000000e+01
|
||||
-1.300000000000000000e+01 1.100000000000000000e+01 -5.000000000000000000e+00 5.000000000000000000e+00
|
||||
1.100000000000000000e+01 1.100000000000000000e+01 1.200000000000000000e+01 6.000000000000000000e+00
|
||||
7.000000000000000000e+00 -5.000000000000000000e+00 1.200000000000000000e+01 -2.000000000000000000e+00
|
||||
-1.000000000000000000e+00 1.200000000000000000e+01 0.000000000000000000e+00 7.000000000000000000e+00
|
||||
-4.000000000000000000e+00 -8.000000000000000000e+00 -3.000000000000000000e+00 -2.000000000000000000e+00
|
||||
-7.000000000000000000e+00 1.000000000000000000e+00 -6.000000000000000000e+00 7.000000000000000000e+00
|
||||
-1.300000000000000000e+01 -1.200000000000000000e+01 -8.000000000000000000e+00 -1.300000000000000000e+01
|
||||
-7.000000000000000000e+00 -2.000000000000000000e+00 -6.000000000000000000e+00 -8.000000000000000000e+00
|
||||
-8.000000000000000000e+00 5.000000000000000000e+00 -6.000000000000000000e+00 -9.000000000000000000e+00
|
||||
-5.000000000000000000e+00 -1.000000000000000000e+00 -4.000000000000000000e+00 5.000000000000000000e+00
|
||||
-1.300000000000000000e+01 7.000000000000000000e+00 -8.000000000000000000e+00 1.000000000000000000e+01
|
||||
1.000000000000000000e+00 5.000000000000000000e+00 5.000000000000000000e+00 -1.300000000000000000e+01
|
||||
1.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+01 -1.300000000000000000e+01
|
||||
9.000000000000000000e+00 1.200000000000000000e+01 1.000000000000000000e+01 -1.000000000000000000e+00
|
||||
5.000000000000000000e+00 -8.000000000000000000e+00 1.000000000000000000e+01 -9.000000000000000000e+00
|
||||
-1.000000000000000000e+00 1.100000000000000000e+01 1.000000000000000000e+00 -1.300000000000000000e+01
|
||||
-9.000000000000000000e+00 -3.000000000000000000e+00 -6.000000000000000000e+00 2.000000000000000000e+00
|
||||
-1.000000000000000000e+00 -1.000000000000000000e+01 1.000000000000000000e+00 1.200000000000000000e+01
|
||||
-1.300000000000000000e+01 1.000000000000000000e+00 -8.000000000000000000e+00 -1.000000000000000000e+01
|
||||
8.000000000000000000e+00 -1.100000000000000000e+01 1.000000000000000000e+01 -6.000000000000000000e+00
|
||||
2.000000000000000000e+00 -1.300000000000000000e+01 3.000000000000000000e+00 -6.000000000000000000e+00
|
||||
7.000000000000000000e+00 -1.300000000000000000e+01 1.200000000000000000e+01 -9.000000000000000000e+00
|
||||
-1.000000000000000000e+01 -1.000000000000000000e+01 -5.000000000000000000e+00 -7.000000000000000000e+00
|
||||
-1.000000000000000000e+01 -8.000000000000000000e+00 -8.000000000000000000e+00 -1.300000000000000000e+01
|
||||
4.000000000000000000e+00 -6.000000000000000000e+00 8.000000000000000000e+00 5.000000000000000000e+00
|
||||
3.000000000000000000e+00 1.200000000000000000e+01 8.000000000000000000e+00 -1.300000000000000000e+01
|
||||
-4.000000000000000000e+00 2.000000000000000000e+00 -3.000000000000000000e+00 -3.000000000000000000e+00
|
||||
5.000000000000000000e+00 -1.300000000000000000e+01 1.000000000000000000e+01 -1.200000000000000000e+01
|
||||
4.000000000000000000e+00 -1.300000000000000000e+01 5.000000000000000000e+00 -1.000000000000000000e+00
|
||||
-9.000000000000000000e+00 9.000000000000000000e+00 -4.000000000000000000e+00 3.000000000000000000e+00
|
||||
0.000000000000000000e+00 3.000000000000000000e+00 3.000000000000000000e+00 -9.000000000000000000e+00
|
||||
-1.200000000000000000e+01 1.000000000000000000e+00 -6.000000000000000000e+00 1.000000000000000000e+00
|
||||
3.000000000000000000e+00 2.000000000000000000e+00 4.000000000000000000e+00 -8.000000000000000000e+00
|
||||
-1.000000000000000000e+01 -1.000000000000000000e+01 -1.000000000000000000e+01 9.000000000000000000e+00
|
||||
8.000000000000000000e+00 -1.300000000000000000e+01 1.200000000000000000e+01 1.200000000000000000e+01
|
||||
-8.000000000000000000e+00 -1.200000000000000000e+01 -6.000000000000000000e+00 -5.000000000000000000e+00
|
||||
2.000000000000000000e+00 2.000000000000000000e+00 3.000000000000000000e+00 7.000000000000000000e+00
|
||||
1.000000000000000000e+01 6.000000000000000000e+00 1.100000000000000000e+01 -8.000000000000000000e+00
|
||||
6.000000000000000000e+00 8.000000000000000000e+00 8.000000000000000000e+00 -1.200000000000000000e+01
|
||||
-7.000000000000000000e+00 1.000000000000000000e+01 -6.000000000000000000e+00 5.000000000000000000e+00
|
||||
-3.000000000000000000e+00 -9.000000000000000000e+00 -3.000000000000000000e+00 9.000000000000000000e+00
|
||||
-1.000000000000000000e+00 -1.300000000000000000e+01 -1.000000000000000000e+00 5.000000000000000000e+00
|
||||
-3.000000000000000000e+00 -7.000000000000000000e+00 -3.000000000000000000e+00 4.000000000000000000e+00
|
||||
-8.000000000000000000e+00 -2.000000000000000000e+00 -8.000000000000000000e+00 3.000000000000000000e+00
|
||||
4.000000000000000000e+00 2.000000000000000000e+00 1.200000000000000000e+01 1.200000000000000000e+01
|
||||
2.000000000000000000e+00 -5.000000000000000000e+00 3.000000000000000000e+00 1.100000000000000000e+01
|
||||
6.000000000000000000e+00 -9.000000000000000000e+00 1.100000000000000000e+01 -1.300000000000000000e+01
|
||||
3.000000000000000000e+00 -1.000000000000000000e+00 7.000000000000000000e+00 1.200000000000000000e+01
|
||||
1.100000000000000000e+01 -1.000000000000000000e+00 1.200000000000000000e+01 4.000000000000000000e+00
|
||||
-3.000000000000000000e+00 0.000000000000000000e+00 -3.000000000000000000e+00 6.000000000000000000e+00
|
||||
4.000000000000000000e+00 -1.100000000000000000e+01 4.000000000000000000e+00 1.200000000000000000e+01
|
||||
2.000000000000000000e+00 -4.000000000000000000e+00 2.000000000000000000e+00 1.000000000000000000e+00
|
||||
-1.000000000000000000e+01 -6.000000000000000000e+00 -8.000000000000000000e+00 1.000000000000000000e+00
|
||||
-1.300000000000000000e+01 7.000000000000000000e+00 -1.100000000000000000e+01 1.000000000000000000e+00
|
||||
-1.300000000000000000e+01 1.200000000000000000e+01 -1.100000000000000000e+01 -1.300000000000000000e+01
|
||||
6.000000000000000000e+00 0.000000000000000000e+00 1.100000000000000000e+01 -1.300000000000000000e+01
|
||||
0.000000000000000000e+00 -1.000000000000000000e+00 1.000000000000000000e+00 4.000000000000000000e+00
|
||||
-1.300000000000000000e+01 3.000000000000000000e+00 -9.000000000000000000e+00 -2.000000000000000000e+00
|
||||
-9.000000000000000000e+00 8.000000000000000000e+00 -6.000000000000000000e+00 -3.000000000000000000e+00
|
||||
-1.300000000000000000e+01 -6.000000000000000000e+00 -8.000000000000000000e+00 -2.000000000000000000e+00
|
||||
5.000000000000000000e+00 -9.000000000000000000e+00 8.000000000000000000e+00 1.000000000000000000e+01
|
||||
2.000000000000000000e+00 7.000000000000000000e+00 3.000000000000000000e+00 -9.000000000000000000e+00
|
||||
-1.000000000000000000e+00 -6.000000000000000000e+00 -1.000000000000000000e+00 -1.000000000000000000e+00
|
||||
9.000000000000000000e+00 5.000000000000000000e+00 1.100000000000000000e+01 -2.000000000000000000e+00
|
||||
1.100000000000000000e+01 -3.000000000000000000e+00 1.200000000000000000e+01 -8.000000000000000000e+00
|
||||
3.000000000000000000e+00 0.000000000000000000e+00 3.000000000000000000e+00 5.000000000000000000e+00
|
||||
-1.000000000000000000e+00 4.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+01
|
||||
3.000000000000000000e+00 -6.000000000000000000e+00 4.000000000000000000e+00 5.000000000000000000e+00
|
||||
-1.300000000000000000e+01 0.000000000000000000e+00 -1.000000000000000000e+01 5.000000000000000000e+00
|
||||
5.000000000000000000e+00 8.000000000000000000e+00 1.200000000000000000e+01 1.100000000000000000e+01
|
||||
8.000000000000000000e+00 9.000000000000000000e+00 9.000000000000000000e+00 -6.000000000000000000e+00
|
||||
7.000000000000000000e+00 -4.000000000000000000e+00 8.000000000000000000e+00 -1.200000000000000000e+01
|
||||
-1.000000000000000000e+01 4.000000000000000000e+00 -1.000000000000000000e+01 9.000000000000000000e+00
|
||||
7.000000000000000000e+00 3.000000000000000000e+00 1.200000000000000000e+01 4.000000000000000000e+00
|
||||
9.000000000000000000e+00 -7.000000000000000000e+00 1.000000000000000000e+01 -2.000000000000000000e+00
|
||||
7.000000000000000000e+00 0.000000000000000000e+00 1.200000000000000000e+01 -2.000000000000000000e+00
|
||||
-1.000000000000000000e+00 -6.000000000000000000e+00 0.000000000000000000e+00 -1.100000000000000000e+01
|
357
venv/Lib/site-packages/skimage/feature/peak.py
Normal file
357
venv/Lib/site-packages/skimage/feature/peak.py
Normal file
|
@ -0,0 +1,357 @@
|
|||
import numpy as np
|
||||
import scipy.ndimage as ndi
|
||||
from .. import measure
|
||||
from ..filters import rank_order
|
||||
|
||||
|
||||
def _get_high_intensity_peaks(image, mask, num_peaks):
|
||||
"""
|
||||
Return the highest intensity peak coordinates.
|
||||
"""
|
||||
# get coordinates of peaks
|
||||
coord = np.nonzero(mask)
|
||||
intensities = image[coord]
|
||||
# Highest peak first
|
||||
idx_maxsort = np.argsort(-intensities)
|
||||
coord = np.transpose(coord)[idx_maxsort]
|
||||
# select num_peaks peaks
|
||||
if len(coord) > num_peaks:
|
||||
coord = coord[:num_peaks]
|
||||
return coord
|
||||
|
||||
|
||||
def _get_peak_mask(image, min_distance, footprint, threshold_abs,
|
||||
threshold_rel):
|
||||
"""
|
||||
Return the mask containing all peak candidates above thresholds.
|
||||
"""
|
||||
if footprint is not None:
|
||||
image_max = ndi.maximum_filter(image, footprint=footprint,
|
||||
mode='constant')
|
||||
else:
|
||||
size = 2 * min_distance + 1
|
||||
image_max = ndi.maximum_filter(image, size=size, mode='constant')
|
||||
mask = image == image_max
|
||||
if threshold_rel is not None:
|
||||
threshold = max(threshold_abs, threshold_rel * image.max())
|
||||
else:
|
||||
threshold = threshold_abs
|
||||
mask &= image > threshold
|
||||
return mask
|
||||
|
||||
|
||||
def _exclude_border(mask, exclude_border):
|
||||
"""
|
||||
Remove peaks near the borders
|
||||
"""
|
||||
# zero out the image borders
|
||||
for i, excluded in enumerate(exclude_border):
|
||||
if excluded == 0:
|
||||
continue
|
||||
mask[(slice(None),) * i + (slice(None, excluded),)] = False
|
||||
mask[(slice(None),) * i + (slice(-excluded, None),)] = False
|
||||
return mask
|
||||
|
||||
|
||||
def peak_local_max(image, min_distance=1, threshold_abs=None,
|
||||
threshold_rel=None, exclude_border=True, indices=True,
|
||||
num_peaks=np.inf, footprint=None, labels=None,
|
||||
num_peaks_per_label=np.inf):
|
||||
"""Find peaks in an image as coordinate list or boolean mask.
|
||||
|
||||
Peaks are the local maxima in a region of `2 * min_distance + 1`
|
||||
(i.e. peaks are separated by at least `min_distance`).
|
||||
|
||||
If there are multiple local maxima with identical pixel intensities
|
||||
inside the region defined with `min_distance`,
|
||||
the coordinates of all such pixels are returned.
|
||||
|
||||
If both `threshold_abs` and `threshold_rel` are provided, the maximum
|
||||
of the two is chosen as the minimum intensity threshold of peaks.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray
|
||||
Input image.
|
||||
min_distance : int, optional
|
||||
Minimum number of pixels separating peaks in a region of `2 *
|
||||
min_distance + 1` (i.e. peaks are separated by at least
|
||||
`min_distance`).
|
||||
To find the maximum number of peaks, use `min_distance=1`.
|
||||
threshold_abs : float, optional
|
||||
Minimum intensity of peaks. By default, the absolute threshold is
|
||||
the minimum intensity of the image.
|
||||
threshold_rel : float, optional
|
||||
Minimum intensity of peaks, calculated as `max(image) * threshold_rel`.
|
||||
exclude_border : int, tuple of ints, or bool, optional
|
||||
If positive integer, `exclude_border` excludes peaks from within
|
||||
`exclude_border`-pixels of the border of the image.
|
||||
If tuple of non-negative ints, the length of the tuple must match the
|
||||
input array's dimensionality. Each element of the tuple will exclude
|
||||
peaks from within `exclude_border`-pixels of the border of the image
|
||||
along that dimension.
|
||||
If True, takes the `min_distance` parameter as value.
|
||||
If zero or False, peaks are identified regardless of their distance
|
||||
from the border.
|
||||
indices : bool, optional
|
||||
If True, the output will be an array representing peak
|
||||
coordinates. The coordinates are sorted according to peaks
|
||||
values (Larger first). If False, the output will be a boolean
|
||||
array shaped as `image.shape` with peaks present at True
|
||||
elements.
|
||||
num_peaks : int, optional
|
||||
Maximum number of peaks. When the number of peaks exceeds `num_peaks`,
|
||||
return `num_peaks` peaks based on highest peak intensity.
|
||||
footprint : ndarray of bools, optional
|
||||
If provided, `footprint == 1` represents the local region within which
|
||||
to search for peaks at every point in `image`. Overrides
|
||||
`min_distance`.
|
||||
labels : ndarray of ints, optional
|
||||
If provided, each unique region `labels == value` represents a unique
|
||||
region to search for peaks. Zero is reserved for background.
|
||||
num_peaks_per_label : int, optional
|
||||
Maximum number of peaks for each label.
|
||||
|
||||
Returns
|
||||
-------
|
||||
output : ndarray or ndarray of bools
|
||||
|
||||
* If `indices = True` : (row, column, ...) coordinates of peaks.
|
||||
* If `indices = False` : Boolean array shaped like `image`, with peaks
|
||||
represented by True values.
|
||||
|
||||
Notes
|
||||
-----
|
||||
The peak local maximum function returns the coordinates of local peaks
|
||||
(maxima) in an image. A maximum filter is used for finding local maxima.
|
||||
This operation dilates the original image. After comparison of the dilated
|
||||
and original image, this function returns the coordinates or a mask of the
|
||||
peaks where the dilated image equals the original image.
|
||||
|
||||
See also
|
||||
--------
|
||||
skimage.feature.corner_peaks
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> img1 = np.zeros((7, 7))
|
||||
>>> img1[3, 4] = 1
|
||||
>>> img1[3, 2] = 1.5
|
||||
>>> img1
|
||||
array([[0. , 0. , 0. , 0. , 0. , 0. , 0. ],
|
||||
[0. , 0. , 0. , 0. , 0. , 0. , 0. ],
|
||||
[0. , 0. , 0. , 0. , 0. , 0. , 0. ],
|
||||
[0. , 0. , 1.5, 0. , 1. , 0. , 0. ],
|
||||
[0. , 0. , 0. , 0. , 0. , 0. , 0. ],
|
||||
[0. , 0. , 0. , 0. , 0. , 0. , 0. ],
|
||||
[0. , 0. , 0. , 0. , 0. , 0. , 0. ]])
|
||||
|
||||
>>> peak_local_max(img1, min_distance=1)
|
||||
array([[3, 2],
|
||||
[3, 4]])
|
||||
|
||||
>>> peak_local_max(img1, min_distance=2)
|
||||
array([[3, 2]])
|
||||
|
||||
>>> img2 = np.zeros((20, 20, 20))
|
||||
>>> img2[10, 10, 10] = 1
|
||||
>>> peak_local_max(img2, exclude_border=0)
|
||||
array([[10, 10, 10]])
|
||||
|
||||
"""
|
||||
out = np.zeros_like(image, dtype=np.bool)
|
||||
|
||||
threshold_abs = threshold_abs if threshold_abs is not None else image.min()
|
||||
|
||||
if isinstance(exclude_border, bool):
|
||||
exclude_border = (min_distance if exclude_border else 0,) * image.ndim
|
||||
elif isinstance(exclude_border, int):
|
||||
if exclude_border < 0:
|
||||
raise ValueError("`exclude_border` cannot be a negative value")
|
||||
exclude_border = (exclude_border,) * image.ndim
|
||||
elif isinstance(exclude_border, tuple):
|
||||
if len(exclude_border) != image.ndim:
|
||||
raise ValueError(
|
||||
"`exclude_border` should have the same length as the "
|
||||
"dimensionality of the image.")
|
||||
for exclude in exclude_border:
|
||||
if not isinstance(exclude, int):
|
||||
raise ValueError(
|
||||
"`exclude_border`, when expressed as a tuple, must only "
|
||||
"contain ints."
|
||||
)
|
||||
if exclude < 0:
|
||||
raise ValueError(
|
||||
"`exclude_border` cannot contain a negative value")
|
||||
else:
|
||||
raise TypeError(
|
||||
"`exclude_border` must be bool, int, or tuple with the same "
|
||||
"length as the dimensionality of the image.")
|
||||
|
||||
# no peak for a trivial image
|
||||
if np.all(image == image.flat[0]):
|
||||
if indices is True:
|
||||
return np.empty((0, image.ndim), np.int)
|
||||
else:
|
||||
return out
|
||||
|
||||
# In the case of labels, call ndi on each label
|
||||
if labels is not None:
|
||||
label_values = np.unique(labels)
|
||||
# Reorder label values to have consecutive integers (no gaps)
|
||||
if np.any(np.diff(label_values) != 1):
|
||||
mask = labels >= 1
|
||||
labels[mask] = 1 + rank_order(labels[mask])[0].astype(labels.dtype)
|
||||
labels = labels.astype(np.int32)
|
||||
|
||||
# create a mask for the non-exclude region
|
||||
inner_mask = _exclude_border(np.ones_like(labels, dtype=bool),
|
||||
exclude_border)
|
||||
|
||||
# For each label, extract a smaller image enclosing the object of
|
||||
# interest, identify num_peaks_per_label peaks and mark them in
|
||||
# variable out.
|
||||
for label_idx, obj in enumerate(ndi.find_objects(labels)):
|
||||
img_object = image[obj] * (labels[obj] == label_idx + 1)
|
||||
mask = _get_peak_mask(img_object, min_distance, footprint,
|
||||
threshold_abs, threshold_rel)
|
||||
if exclude_border:
|
||||
# remove peaks fall in the exclude region
|
||||
mask &= inner_mask[obj]
|
||||
coordinates = _get_high_intensity_peaks(img_object, mask,
|
||||
num_peaks_per_label)
|
||||
nd_indices = tuple(coordinates.T)
|
||||
mask.fill(False)
|
||||
mask[nd_indices] = True
|
||||
out[obj] += mask
|
||||
|
||||
if not indices and np.isinf(num_peaks):
|
||||
return out
|
||||
|
||||
coordinates = _get_high_intensity_peaks(image, out, num_peaks)
|
||||
if indices:
|
||||
return coordinates
|
||||
else:
|
||||
out.fill(False)
|
||||
nd_indices = tuple(coordinates.T)
|
||||
out[nd_indices] = True
|
||||
return out
|
||||
|
||||
# Non maximum filter
|
||||
mask = _get_peak_mask(image, min_distance, footprint, threshold_abs,
|
||||
threshold_rel)
|
||||
|
||||
mask = _exclude_border(mask, exclude_border)
|
||||
|
||||
# Select highest intensities (num_peaks)
|
||||
coordinates = _get_high_intensity_peaks(image, mask, num_peaks)
|
||||
|
||||
if indices is True:
|
||||
return coordinates
|
||||
else:
|
||||
nd_indices = tuple(coordinates.T)
|
||||
out[nd_indices] = True
|
||||
return out
|
||||
|
||||
|
||||
def _prominent_peaks(image, min_xdistance=1, min_ydistance=1,
|
||||
threshold=None, num_peaks=np.inf):
|
||||
"""Return peaks with non-maximum suppression.
|
||||
|
||||
Identifies most prominent features separated by certain distances.
|
||||
Non-maximum suppression with different sizes is applied separately
|
||||
in the first and second dimension of the image to identify peaks.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : (M, N) ndarray
|
||||
Input image.
|
||||
min_xdistance : int
|
||||
Minimum distance separating features in the x dimension.
|
||||
min_ydistance : int
|
||||
Minimum distance separating features in the y dimension.
|
||||
threshold : float
|
||||
Minimum intensity of peaks. Default is `0.5 * max(image)`.
|
||||
num_peaks : int
|
||||
Maximum number of peaks. When the number of peaks exceeds `num_peaks`,
|
||||
return `num_peaks` coordinates based on peak intensity.
|
||||
|
||||
Returns
|
||||
-------
|
||||
intensity, xcoords, ycoords : tuple of array
|
||||
Peak intensity values, x and y indices.
|
||||
"""
|
||||
|
||||
img = image.copy()
|
||||
rows, cols = img.shape
|
||||
|
||||
if threshold is None:
|
||||
threshold = 0.5 * np.max(img)
|
||||
|
||||
ycoords_size = 2 * min_ydistance + 1
|
||||
xcoords_size = 2 * min_xdistance + 1
|
||||
img_max = ndi.maximum_filter1d(img, size=ycoords_size, axis=0,
|
||||
mode='constant', cval=0)
|
||||
img_max = ndi.maximum_filter1d(img_max, size=xcoords_size, axis=1,
|
||||
mode='constant', cval=0)
|
||||
mask = (img == img_max)
|
||||
img *= mask
|
||||
img_t = img > threshold
|
||||
|
||||
label_img = measure.label(img_t)
|
||||
props = measure.regionprops(label_img, img_max)
|
||||
|
||||
# Sort the list of peaks by intensity, not left-right, so larger peaks
|
||||
# in Hough space cannot be arbitrarily suppressed by smaller neighbors
|
||||
props = sorted(props, key=lambda x: x.max_intensity)[::-1]
|
||||
coords = np.array([np.round(p.centroid) for p in props], dtype=int)
|
||||
|
||||
img_peaks = []
|
||||
ycoords_peaks = []
|
||||
xcoords_peaks = []
|
||||
|
||||
# relative coordinate grid for local neighbourhood suppression
|
||||
ycoords_ext, xcoords_ext = np.mgrid[-min_ydistance:min_ydistance + 1,
|
||||
-min_xdistance:min_xdistance + 1]
|
||||
|
||||
for ycoords_idx, xcoords_idx in coords:
|
||||
accum = img_max[ycoords_idx, xcoords_idx]
|
||||
if accum > threshold:
|
||||
# absolute coordinate grid for local neighbourhood suppression
|
||||
ycoords_nh = ycoords_idx + ycoords_ext
|
||||
xcoords_nh = xcoords_idx + xcoords_ext
|
||||
|
||||
# no reflection for distance neighbourhood
|
||||
ycoords_in = np.logical_and(ycoords_nh > 0, ycoords_nh < rows)
|
||||
ycoords_nh = ycoords_nh[ycoords_in]
|
||||
xcoords_nh = xcoords_nh[ycoords_in]
|
||||
|
||||
# reflect xcoords and assume xcoords are continuous,
|
||||
# e.g. for angles:
|
||||
# (..., 88, 89, -90, -89, ..., 89, -90, -89, ...)
|
||||
xcoords_low = xcoords_nh < 0
|
||||
ycoords_nh[xcoords_low] = rows - ycoords_nh[xcoords_low]
|
||||
xcoords_nh[xcoords_low] += cols
|
||||
xcoords_high = xcoords_nh >= cols
|
||||
ycoords_nh[xcoords_high] = rows - ycoords_nh[xcoords_high]
|
||||
xcoords_nh[xcoords_high] -= cols
|
||||
|
||||
# suppress neighbourhood
|
||||
img_max[ycoords_nh, xcoords_nh] = 0
|
||||
|
||||
# add current feature to peaks
|
||||
img_peaks.append(accum)
|
||||
ycoords_peaks.append(ycoords_idx)
|
||||
xcoords_peaks.append(xcoords_idx)
|
||||
|
||||
img_peaks = np.array(img_peaks)
|
||||
ycoords_peaks = np.array(ycoords_peaks)
|
||||
xcoords_peaks = np.array(xcoords_peaks)
|
||||
|
||||
if num_peaks < len(img_peaks):
|
||||
idx_maxsort = np.argsort(img_peaks)[::-1][:num_peaks]
|
||||
img_peaks = img_peaks[idx_maxsort]
|
||||
ycoords_peaks = ycoords_peaks[idx_maxsort]
|
||||
xcoords_peaks = xcoords_peaks[idx_maxsort]
|
||||
|
||||
return img_peaks, xcoords_peaks, ycoords_peaks
|
59
venv/Lib/site-packages/skimage/feature/setup.py
Normal file
59
venv/Lib/site-packages/skimage/feature/setup.py
Normal file
|
@ -0,0 +1,59 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
from skimage._build import cython
|
||||
|
||||
base_path = os.path.abspath(os.path.dirname(__file__))
|
||||
|
||||
|
||||
def configuration(parent_package='', top_path=None):
|
||||
from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
|
||||
|
||||
config = Configuration('feature', parent_package, top_path)
|
||||
config.add_data_files('orb_descriptor_positions.txt')
|
||||
|
||||
cython(['corner_cy.pyx',
|
||||
'censure_cy.pyx',
|
||||
'orb_cy.pyx',
|
||||
'brief_cy.pyx',
|
||||
'_texture.pyx',
|
||||
'_hessian_det_appx.pyx',
|
||||
'_hoghistogram.pyx',
|
||||
], working_path=base_path)
|
||||
# _haar uses c++, so it must be cythonized separately
|
||||
cython(['_cascade.pyx',
|
||||
'_haar.pyx'], working_path=base_path)
|
||||
|
||||
config.add_extension('_cascade', sources=['_cascade.cpp'],
|
||||
include_dirs=[get_numpy_include_dirs()],
|
||||
language="c++")
|
||||
config.add_extension('corner_cy', sources=['corner_cy.c'],
|
||||
include_dirs=[get_numpy_include_dirs()])
|
||||
config.add_extension('censure_cy', sources=['censure_cy.c'],
|
||||
include_dirs=[get_numpy_include_dirs()])
|
||||
config.add_extension('orb_cy', sources=['orb_cy.c'],
|
||||
include_dirs=[get_numpy_include_dirs()])
|
||||
config.add_extension('brief_cy', sources=['brief_cy.c'],
|
||||
include_dirs=[get_numpy_include_dirs()])
|
||||
config.add_extension('_texture', sources=['_texture.c'],
|
||||
include_dirs=[get_numpy_include_dirs(), '../_shared'])
|
||||
config.add_extension('_hessian_det_appx', sources=['_hessian_det_appx.c'],
|
||||
include_dirs=[get_numpy_include_dirs()])
|
||||
config.add_extension('_hoghistogram', sources=['_hoghistogram.c'],
|
||||
include_dirs=[get_numpy_include_dirs(), '../_shared'])
|
||||
config.add_extension('_haar', sources=['_haar.cpp'],
|
||||
include_dirs=[get_numpy_include_dirs(), '../_shared'],
|
||||
language="c++")
|
||||
|
||||
return config
|
||||
|
||||
if __name__ == '__main__':
|
||||
from numpy.distutils.core import setup
|
||||
setup(maintainer='scikit-image Developers',
|
||||
author='scikit-image Developers',
|
||||
maintainer_email='scikit-image@python.org',
|
||||
description='Features',
|
||||
url='https://github.com/scikit-image/scikit-image',
|
||||
license='SciPy License (BSD Style)',
|
||||
**(configuration(top_path='').todict())
|
||||
)
|
179
venv/Lib/site-packages/skimage/feature/template.py
Normal file
179
venv/Lib/site-packages/skimage/feature/template.py
Normal file
|
@ -0,0 +1,179 @@
|
|||
import numpy as np
|
||||
from scipy.signal import fftconvolve
|
||||
|
||||
from .._shared.utils import check_nD
|
||||
|
||||
|
||||
def _window_sum_2d(image, window_shape):
|
||||
|
||||
window_sum = np.cumsum(image, axis=0)
|
||||
window_sum = (window_sum[window_shape[0]:-1]
|
||||
- window_sum[:-window_shape[0] - 1])
|
||||
|
||||
window_sum = np.cumsum(window_sum, axis=1)
|
||||
window_sum = (window_sum[:, window_shape[1]:-1]
|
||||
- window_sum[:, :-window_shape[1] - 1])
|
||||
|
||||
return window_sum
|
||||
|
||||
|
||||
def _window_sum_3d(image, window_shape):
|
||||
|
||||
window_sum = _window_sum_2d(image, window_shape)
|
||||
|
||||
window_sum = np.cumsum(window_sum, axis=2)
|
||||
window_sum = (window_sum[:, :, window_shape[2]:-1]
|
||||
- window_sum[:, :, :-window_shape[2] - 1])
|
||||
|
||||
return window_sum
|
||||
|
||||
|
||||
def match_template(image, template, pad_input=False, mode='constant',
|
||||
constant_values=0):
|
||||
"""Match a template to a 2-D or 3-D image using normalized correlation.
|
||||
|
||||
The output is an array with values between -1.0 and 1.0. The value at a
|
||||
given position corresponds to the correlation coefficient between the image
|
||||
and the template.
|
||||
|
||||
For `pad_input=True` matches correspond to the center and otherwise to the
|
||||
top-left corner of the template. To find the best match you must search for
|
||||
peaks in the response (output) image.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : (M, N[, D]) array
|
||||
2-D or 3-D input image.
|
||||
template : (m, n[, d]) array
|
||||
Template to locate. It must be `(m <= M, n <= N[, d <= D])`.
|
||||
pad_input : bool
|
||||
If True, pad `image` so that output is the same size as the image, and
|
||||
output values correspond to the template center. Otherwise, the output
|
||||
is an array with shape `(M - m + 1, N - n + 1)` for an `(M, N)` image
|
||||
and an `(m, n)` template, and matches correspond to origin
|
||||
(top-left corner) of the template.
|
||||
mode : see `numpy.pad`, optional
|
||||
Padding mode.
|
||||
constant_values : see `numpy.pad`, optional
|
||||
Constant values used in conjunction with ``mode='constant'``.
|
||||
|
||||
Returns
|
||||
-------
|
||||
output : array
|
||||
Response image with correlation coefficients.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Details on the cross-correlation are presented in [1]_. This implementation
|
||||
uses FFT convolutions of the image and the template. Reference [2]_
|
||||
presents similar derivations but the approximation presented in this
|
||||
reference is not used in our implementation.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] J. P. Lewis, "Fast Normalized Cross-Correlation", Industrial Light
|
||||
and Magic.
|
||||
.. [2] Briechle and Hanebeck, "Template Matching using Fast Normalized
|
||||
Cross Correlation", Proceedings of the SPIE (2001).
|
||||
:DOI:`10.1117/12.421129`
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> template = np.zeros((3, 3))
|
||||
>>> template[1, 1] = 1
|
||||
>>> template
|
||||
array([[0., 0., 0.],
|
||||
[0., 1., 0.],
|
||||
[0., 0., 0.]])
|
||||
>>> image = np.zeros((6, 6))
|
||||
>>> image[1, 1] = 1
|
||||
>>> image[4, 4] = -1
|
||||
>>> image
|
||||
array([[ 0., 0., 0., 0., 0., 0.],
|
||||
[ 0., 1., 0., 0., 0., 0.],
|
||||
[ 0., 0., 0., 0., 0., 0.],
|
||||
[ 0., 0., 0., 0., 0., 0.],
|
||||
[ 0., 0., 0., 0., -1., 0.],
|
||||
[ 0., 0., 0., 0., 0., 0.]])
|
||||
>>> result = match_template(image, template)
|
||||
>>> np.round(result, 3)
|
||||
array([[ 1. , -0.125, 0. , 0. ],
|
||||
[-0.125, -0.125, 0. , 0. ],
|
||||
[ 0. , 0. , 0.125, 0.125],
|
||||
[ 0. , 0. , 0.125, -1. ]])
|
||||
>>> result = match_template(image, template, pad_input=True)
|
||||
>>> np.round(result, 3)
|
||||
array([[-0.125, -0.125, -0.125, 0. , 0. , 0. ],
|
||||
[-0.125, 1. , -0.125, 0. , 0. , 0. ],
|
||||
[-0.125, -0.125, -0.125, 0. , 0. , 0. ],
|
||||
[ 0. , 0. , 0. , 0.125, 0.125, 0.125],
|
||||
[ 0. , 0. , 0. , 0.125, -1. , 0.125],
|
||||
[ 0. , 0. , 0. , 0.125, 0.125, 0.125]])
|
||||
"""
|
||||
check_nD(image, (2, 3))
|
||||
|
||||
if image.ndim < template.ndim:
|
||||
raise ValueError("Dimensionality of template must be less than or "
|
||||
"equal to the dimensionality of image.")
|
||||
if np.any(np.less(image.shape, template.shape)):
|
||||
raise ValueError("Image must be larger than template.")
|
||||
|
||||
image_shape = image.shape
|
||||
|
||||
image = np.array(image, dtype=np.float64, copy=False)
|
||||
|
||||
pad_width = tuple((width, width) for width in template.shape)
|
||||
if mode == 'constant':
|
||||
image = np.pad(image, pad_width=pad_width, mode=mode,
|
||||
constant_values=constant_values)
|
||||
else:
|
||||
image = np.pad(image, pad_width=pad_width, mode=mode)
|
||||
|
||||
# Use special case for 2-D images for much better performance in
|
||||
# computation of integral images
|
||||
if image.ndim == 2:
|
||||
image_window_sum = _window_sum_2d(image, template.shape)
|
||||
image_window_sum2 = _window_sum_2d(image ** 2, template.shape)
|
||||
elif image.ndim == 3:
|
||||
image_window_sum = _window_sum_3d(image, template.shape)
|
||||
image_window_sum2 = _window_sum_3d(image ** 2, template.shape)
|
||||
|
||||
template_mean = template.mean()
|
||||
template_volume = np.prod(template.shape)
|
||||
template_ssd = np.sum((template - template_mean) ** 2)
|
||||
|
||||
if image.ndim == 2:
|
||||
xcorr = fftconvolve(image, template[::-1, ::-1],
|
||||
mode="valid")[1:-1, 1:-1]
|
||||
elif image.ndim == 3:
|
||||
xcorr = fftconvolve(image, template[::-1, ::-1, ::-1],
|
||||
mode="valid")[1:-1, 1:-1, 1:-1]
|
||||
|
||||
numerator = xcorr - image_window_sum * template_mean
|
||||
|
||||
denominator = image_window_sum2
|
||||
np.multiply(image_window_sum, image_window_sum, out=image_window_sum)
|
||||
np.divide(image_window_sum, template_volume, out=image_window_sum)
|
||||
denominator -= image_window_sum
|
||||
denominator *= template_ssd
|
||||
np.maximum(denominator, 0, out=denominator) # sqrt of negative number not allowed
|
||||
np.sqrt(denominator, out=denominator)
|
||||
|
||||
response = np.zeros_like(xcorr, dtype=np.float64)
|
||||
|
||||
# avoid zero-division
|
||||
mask = denominator > np.finfo(np.float64).eps
|
||||
|
||||
response[mask] = numerator[mask] / denominator[mask]
|
||||
|
||||
slices = []
|
||||
for i in range(template.ndim):
|
||||
if pad_input:
|
||||
d0 = (template.shape[i] - 1) // 2
|
||||
d1 = d0 + image_shape[i]
|
||||
else:
|
||||
d0 = template.shape[i] - 1
|
||||
d1 = d0 + image_shape[i] - template.shape[i] + 1
|
||||
slices.append(slice(d0, d1))
|
||||
|
||||
return response[tuple(slices)]
|
9
venv/Lib/site-packages/skimage/feature/tests/__init__.py
Normal file
9
venv/Lib/site-packages/skimage/feature/tests/__init__.py
Normal file
|
@ -0,0 +1,9 @@
|
|||
from ..._shared.testing import setup_test, teardown_test
|
||||
|
||||
|
||||
def setup():
|
||||
setup_test()
|
||||
|
||||
|
||||
def teardown():
|
||||
teardown_test()
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
445
venv/Lib/site-packages/skimage/feature/tests/test_blob.py
Normal file
445
venv/Lib/site-packages/skimage/feature/tests/test_blob.py
Normal file
|
@ -0,0 +1,445 @@
|
|||
import numpy as np
|
||||
from skimage.draw import disk
|
||||
from skimage.draw.draw3d import ellipsoid
|
||||
from skimage.feature import blob_dog, blob_log, blob_doh
|
||||
from skimage.feature.blob import _blob_overlap
|
||||
import math
|
||||
from numpy.testing import assert_almost_equal
|
||||
|
||||
|
||||
def test_blob_dog():
|
||||
r2 = math.sqrt(2)
|
||||
img = np.ones((512, 512))
|
||||
|
||||
xs, ys = disk((400, 130), 5)
|
||||
img[xs, ys] = 255
|
||||
|
||||
xs, ys = disk((100, 300), 25)
|
||||
img[xs, ys] = 255
|
||||
|
||||
xs, ys = disk((200, 350), 45)
|
||||
img[xs, ys] = 255
|
||||
|
||||
blobs = blob_dog(img, min_sigma=5, max_sigma=50)
|
||||
radius = lambda x: r2 * x[2]
|
||||
s = sorted(blobs, key=radius)
|
||||
thresh = 5
|
||||
|
||||
b = s[0]
|
||||
assert abs(b[0] - 400) <= thresh
|
||||
assert abs(b[1] - 130) <= thresh
|
||||
assert abs(radius(b) - 5) <= thresh
|
||||
|
||||
b = s[1]
|
||||
assert abs(b[0] - 100) <= thresh
|
||||
assert abs(b[1] - 300) <= thresh
|
||||
assert abs(radius(b) - 25) <= thresh
|
||||
|
||||
b = s[2]
|
||||
assert abs(b[0] - 200) <= thresh
|
||||
assert abs(b[1] - 350) <= thresh
|
||||
assert abs(radius(b) - 45) <= thresh
|
||||
|
||||
# Testing no peaks
|
||||
img_empty = np.zeros((100,100))
|
||||
assert blob_dog(img_empty).size == 0
|
||||
|
||||
# Testing 3D
|
||||
r = 10
|
||||
pad = 10
|
||||
im3 = ellipsoid(r, r, r)
|
||||
im3 = np.pad(im3, pad, mode='constant')
|
||||
|
||||
blobs = blob_dog(im3, min_sigma=3, max_sigma=10,
|
||||
sigma_ratio=1.2, threshold=0.1)
|
||||
b = blobs[0]
|
||||
|
||||
assert b.shape == (4,)
|
||||
assert b[0] == r + pad + 1
|
||||
assert b[1] == r + pad + 1
|
||||
assert b[2] == r + pad + 1
|
||||
assert abs(math.sqrt(3) * b[3] - r) < 1
|
||||
|
||||
# Testing 3D anisotropic
|
||||
r = 10
|
||||
pad = 10
|
||||
im3 = ellipsoid(r / 2, r, r)
|
||||
im3 = np.pad(im3, pad, mode='constant')
|
||||
|
||||
blobs = blob_dog(
|
||||
im3,
|
||||
min_sigma=[1.5, 3, 3],
|
||||
max_sigma=[5, 10, 10],
|
||||
sigma_ratio=1.2,
|
||||
threshold=0.1
|
||||
)
|
||||
b = blobs[0]
|
||||
|
||||
assert b.shape == (6,)
|
||||
assert b[0] == r / 2 + pad + 1
|
||||
assert b[1] == r + pad + 1
|
||||
assert b[2] == r + pad + 1
|
||||
assert abs(math.sqrt(3) * b[3] - r / 2) < 1
|
||||
assert abs(math.sqrt(3) * b[4] - r) < 1
|
||||
assert abs(math.sqrt(3) * b[5] - r) < 1
|
||||
|
||||
# Testing exclude border
|
||||
|
||||
# image where blob is 5 px from borders, radius 5
|
||||
img = np.ones((512, 512))
|
||||
xs, ys = disk((5, 5), 5)
|
||||
img[xs, ys] = 255
|
||||
|
||||
|
||||
def test_blob_dog_excl_border():
|
||||
img = np.ones((512, 512))
|
||||
xs, ys = disk((5, 5), 5)
|
||||
img[xs, ys] = 255
|
||||
blobs = blob_dog(
|
||||
img,
|
||||
min_sigma=1.5,
|
||||
max_sigma=5,
|
||||
sigma_ratio=1.2,
|
||||
)
|
||||
assert blobs.shape[0] == 1
|
||||
b = blobs[0]
|
||||
assert b[0] == b[1] == 5, "blob should be 5 px from x and y borders"
|
||||
|
||||
blobs = blob_dog(
|
||||
img,
|
||||
min_sigma=1.5,
|
||||
max_sigma=5,
|
||||
sigma_ratio=1.2,
|
||||
exclude_border=6,
|
||||
)
|
||||
msg = "zero blobs should be detected, as only blob is 5 px from border"
|
||||
assert blobs.shape[0] == 0, msg
|
||||
|
||||
|
||||
def test_blob_log():
|
||||
r2 = math.sqrt(2)
|
||||
img = np.ones((256, 256))
|
||||
|
||||
xs, ys = disk((200, 65), 5)
|
||||
img[xs, ys] = 255
|
||||
|
||||
xs, ys = disk((80, 25), 15)
|
||||
img[xs, ys] = 255
|
||||
|
||||
xs, ys = disk((50, 150), 25)
|
||||
img[xs, ys] = 255
|
||||
|
||||
xs, ys = disk((100, 175), 30)
|
||||
img[xs, ys] = 255
|
||||
|
||||
blobs = blob_log(img, min_sigma=5, max_sigma=20, threshold=1)
|
||||
|
||||
radius = lambda x: r2 * x[2]
|
||||
s = sorted(blobs, key=radius)
|
||||
thresh = 3
|
||||
|
||||
b = s[0]
|
||||
assert abs(b[0] - 200) <= thresh
|
||||
assert abs(b[1] - 65) <= thresh
|
||||
assert abs(radius(b) - 5) <= thresh
|
||||
|
||||
b = s[1]
|
||||
assert abs(b[0] - 80) <= thresh
|
||||
assert abs(b[1] - 25) <= thresh
|
||||
assert abs(radius(b) - 15) <= thresh
|
||||
|
||||
b = s[2]
|
||||
assert abs(b[0] - 50) <= thresh
|
||||
assert abs(b[1] - 150) <= thresh
|
||||
assert abs(radius(b) - 25) <= thresh
|
||||
|
||||
b = s[3]
|
||||
assert abs(b[0] - 100) <= thresh
|
||||
assert abs(b[1] - 175) <= thresh
|
||||
assert abs(radius(b) - 30) <= thresh
|
||||
|
||||
# Testing log scale
|
||||
blobs = blob_log(
|
||||
img,
|
||||
min_sigma=5,
|
||||
max_sigma=20,
|
||||
threshold=1,
|
||||
log_scale=True)
|
||||
|
||||
b = s[0]
|
||||
assert abs(b[0] - 200) <= thresh
|
||||
assert abs(b[1] - 65) <= thresh
|
||||
assert abs(radius(b) - 5) <= thresh
|
||||
|
||||
b = s[1]
|
||||
assert abs(b[0] - 80) <= thresh
|
||||
assert abs(b[1] - 25) <= thresh
|
||||
assert abs(radius(b) - 15) <= thresh
|
||||
|
||||
b = s[2]
|
||||
assert abs(b[0] - 50) <= thresh
|
||||
assert abs(b[1] - 150) <= thresh
|
||||
assert abs(radius(b) - 25) <= thresh
|
||||
|
||||
b = s[3]
|
||||
assert abs(b[0] - 100) <= thresh
|
||||
assert abs(b[1] - 175) <= thresh
|
||||
assert abs(radius(b) - 30) <= thresh
|
||||
|
||||
# Testing no peaks
|
||||
img_empty = np.zeros((100,100))
|
||||
assert blob_log(img_empty).size == 0
|
||||
|
||||
|
||||
def test_blob_log_3d():
|
||||
# Testing 3D
|
||||
r = 6
|
||||
pad = 10
|
||||
im3 = ellipsoid(r, r, r)
|
||||
im3 = np.pad(im3, pad, mode='constant')
|
||||
|
||||
blobs = blob_log(im3, min_sigma=3, max_sigma=10)
|
||||
b = blobs[0]
|
||||
|
||||
assert b.shape == (4,)
|
||||
assert b[0] == r + pad + 1
|
||||
assert b[1] == r + pad + 1
|
||||
assert b[2] == r + pad + 1
|
||||
assert abs(math.sqrt(3) * b[3] - r) < 1
|
||||
|
||||
|
||||
def test_blob_log_3d_anisotropic():
|
||||
# Testing 3D anisotropic
|
||||
r = 6
|
||||
pad = 10
|
||||
im3 = ellipsoid(r / 2, r, r)
|
||||
im3 = np.pad(im3, pad, mode='constant')
|
||||
|
||||
blobs = blob_log(
|
||||
im3,
|
||||
min_sigma=[1, 2, 2],
|
||||
max_sigma=[5, 10, 10],
|
||||
)
|
||||
|
||||
b = blobs[0]
|
||||
assert b.shape == (6,)
|
||||
assert b[0] == r / 2 + pad + 1
|
||||
assert b[1] == r + pad + 1
|
||||
assert b[2] == r + pad + 1
|
||||
assert abs(math.sqrt(3) * b[3] - r / 2) < 1
|
||||
assert abs(math.sqrt(3) * b[4] - r) < 1
|
||||
assert abs(math.sqrt(3) * b[5] - r) < 1
|
||||
|
||||
|
||||
def test_blob_log_exclude_border():
|
||||
# image where blob is 5 px from borders, radius 5
|
||||
img = np.ones((512, 512))
|
||||
xs, ys = disk((5, 5), 5)
|
||||
img[xs, ys] = 255
|
||||
|
||||
blobs = blob_log(
|
||||
img,
|
||||
min_sigma=1.5,
|
||||
max_sigma=5,
|
||||
)
|
||||
assert blobs.shape[0] == 1
|
||||
b = blobs[0]
|
||||
assert b[0] == b[1] == 5, "blob should be 5 px from x and y borders"
|
||||
|
||||
blobs = blob_log(
|
||||
img,
|
||||
min_sigma=1.5,
|
||||
max_sigma=5,
|
||||
exclude_border=6,
|
||||
)
|
||||
msg = "zero blobs should be detected, as only blob is 5 px from border"
|
||||
assert blobs.shape[0] == 0, msg
|
||||
|
||||
|
||||
def test_blob_doh():
|
||||
img = np.ones((512, 512), dtype=np.uint8)
|
||||
|
||||
xs, ys = disk((400, 130), 20)
|
||||
img[xs, ys] = 255
|
||||
|
||||
xs, ys = disk((460, 50), 30)
|
||||
img[xs, ys] = 255
|
||||
|
||||
xs, ys = disk((100, 300), 40)
|
||||
img[xs, ys] = 255
|
||||
|
||||
xs, ys = disk((200, 350), 50)
|
||||
img[xs, ys] = 255
|
||||
|
||||
blobs = blob_doh(
|
||||
img,
|
||||
min_sigma=1,
|
||||
max_sigma=60,
|
||||
num_sigma=10,
|
||||
threshold=.05)
|
||||
|
||||
radius = lambda x: x[2]
|
||||
s = sorted(blobs, key=radius)
|
||||
thresh = 4
|
||||
|
||||
b = s[0]
|
||||
assert abs(b[0] - 400) <= thresh
|
||||
assert abs(b[1] - 130) <= thresh
|
||||
assert abs(radius(b) - 20) <= thresh
|
||||
|
||||
b = s[1]
|
||||
assert abs(b[0] - 460) <= thresh
|
||||
assert abs(b[1] - 50) <= thresh
|
||||
assert abs(radius(b) - 30) <= thresh
|
||||
|
||||
b = s[2]
|
||||
assert abs(b[0] - 100) <= thresh
|
||||
assert abs(b[1] - 300) <= thresh
|
||||
assert abs(radius(b) - 40) <= thresh
|
||||
|
||||
b = s[3]
|
||||
assert abs(b[0] - 200) <= thresh
|
||||
assert abs(b[1] - 350) <= thresh
|
||||
assert abs(radius(b) - 50) <= thresh
|
||||
|
||||
|
||||
def test_blob_doh_log_scale():
|
||||
img = np.ones((512, 512), dtype=np.uint8)
|
||||
|
||||
xs, ys = disk((400, 130), 20)
|
||||
img[xs, ys] = 255
|
||||
|
||||
xs, ys = disk((460, 50), 30)
|
||||
img[xs, ys] = 255
|
||||
|
||||
xs, ys = disk((100, 300), 40)
|
||||
img[xs, ys] = 255
|
||||
|
||||
xs, ys = disk((200, 350), 50)
|
||||
img[xs, ys] = 255
|
||||
|
||||
blobs = blob_doh(
|
||||
img,
|
||||
min_sigma=1,
|
||||
max_sigma=60,
|
||||
num_sigma=10,
|
||||
log_scale=True,
|
||||
threshold=.05)
|
||||
|
||||
radius = lambda x: x[2]
|
||||
s = sorted(blobs, key=radius)
|
||||
thresh = 10
|
||||
|
||||
b = s[0]
|
||||
assert abs(b[0] - 400) <= thresh
|
||||
assert abs(b[1] - 130) <= thresh
|
||||
assert abs(radius(b) - 20) <= thresh
|
||||
|
||||
b = s[2]
|
||||
assert abs(b[0] - 460) <= thresh
|
||||
assert abs(b[1] - 50) <= thresh
|
||||
assert abs(radius(b) - 30) <= thresh
|
||||
|
||||
b = s[1]
|
||||
assert abs(b[0] - 100) <= thresh
|
||||
assert abs(b[1] - 300) <= thresh
|
||||
assert abs(radius(b) - 40) <= thresh
|
||||
|
||||
b = s[3]
|
||||
assert abs(b[0] - 200) <= thresh
|
||||
assert abs(b[1] - 350) <= thresh
|
||||
assert abs(radius(b) - 50) <= thresh
|
||||
|
||||
|
||||
def test_blob_doh_no_peaks():
|
||||
# Testing no peaks
|
||||
img_empty = np.zeros((100,100))
|
||||
assert blob_doh(img_empty).size == 0
|
||||
|
||||
|
||||
def test_blob_doh_overlap():
|
||||
img = np.ones((256, 256), dtype=np.uint8)
|
||||
|
||||
xs, ys = disk((100, 100), 20)
|
||||
img[xs, ys] = 255
|
||||
|
||||
xs, ys = disk((120, 100), 30)
|
||||
img[xs, ys] = 255
|
||||
|
||||
blobs = blob_doh(
|
||||
img,
|
||||
min_sigma=1,
|
||||
max_sigma=60,
|
||||
num_sigma=10,
|
||||
threshold=.05
|
||||
)
|
||||
|
||||
assert len(blobs) == 1
|
||||
|
||||
|
||||
def test_blob_log_overlap_3d():
|
||||
r1, r2 = 7, 6
|
||||
pad1, pad2 = 11, 12
|
||||
blob1 = ellipsoid(r1, r1, r1)
|
||||
blob1 = np.pad(blob1, pad1, mode='constant')
|
||||
blob2 = ellipsoid(r2, r2, r2)
|
||||
blob2 = np.pad(blob2, [(pad2, pad2), (pad2 - 9, pad2 + 9),
|
||||
(pad2, pad2)],
|
||||
mode='constant')
|
||||
im3 = np.logical_or(blob1, blob2)
|
||||
|
||||
blobs = blob_log(im3, min_sigma=2, max_sigma=10, overlap=0.1)
|
||||
assert len(blobs) == 1
|
||||
|
||||
|
||||
def test_blob_overlap_3d_anisotropic():
|
||||
# Two spheres with distance between centers equal to radius
|
||||
# One sphere is much smaller than the other so about half of it is within
|
||||
# the bigger sphere.
|
||||
s3 = math.sqrt(3)
|
||||
overlap = _blob_overlap(np.array([0, 0, 0, 2 / s3, 10 / s3, 10 / s3]),
|
||||
np.array([0, 0, 10, 0.2 / s3, 1 / s3, 1 / s3]),
|
||||
sigma_dim=3)
|
||||
assert_almost_equal(overlap, 0.48125)
|
||||
overlap = _blob_overlap(np.array([0, 0, 0, 2 / s3, 10 / s3, 10 / s3]),
|
||||
np.array([2, 0, 0, 0.2 / s3, 1 / s3, 1 / s3]),
|
||||
sigma_dim=3)
|
||||
assert_almost_equal(overlap, 0.48125)
|
||||
|
||||
|
||||
def test_blob_log_anisotropic():
|
||||
image = np.zeros((50, 50))
|
||||
image[20, 10:20] = 1
|
||||
isotropic_blobs = blob_log(image, min_sigma=0.5, max_sigma=2, num_sigma=3)
|
||||
assert len(isotropic_blobs) > 1 # many small blobs found in line
|
||||
ani_blobs = blob_log(image, min_sigma=[0.5, 5], max_sigma=[2, 20],
|
||||
num_sigma=3) # 10x anisotropy, line is 1x10
|
||||
assert len(ani_blobs) == 1 # single anisotropic blob found
|
||||
|
||||
|
||||
def test_blob_log_overlap_3d_anisotropic():
|
||||
r1, r2 = 7, 6
|
||||
pad1, pad2 = 11, 12
|
||||
blob1 = ellipsoid(r1, r1, r1)
|
||||
blob1 = np.pad(blob1, pad1, mode='constant')
|
||||
blob2 = ellipsoid(r2, r2, r2)
|
||||
blob2 = np.pad(blob2, [(pad2, pad2), (pad2 - 9, pad2 + 9),
|
||||
(pad2, pad2)],
|
||||
mode='constant')
|
||||
im3 = np.logical_or(blob1, blob2)
|
||||
|
||||
blobs = blob_log(im3, min_sigma=[2, 2.01, 2.005],
|
||||
max_sigma=10, overlap=0.1)
|
||||
assert len(blobs) == 1
|
||||
|
||||
# Two circles with distance between centers equal to radius
|
||||
overlap = _blob_overlap(np.array([0, 0, 10 / math.sqrt(2)]),
|
||||
np.array([0, 10, 10 / math.sqrt(2)]))
|
||||
assert_almost_equal(overlap,
|
||||
1./math.pi * (2 * math.acos(1./2) - math.sqrt(3)/2.))
|
||||
|
||||
|
||||
def test_no_blob():
|
||||
im = np.zeros((10, 10))
|
||||
blobs = blob_log(im, min_sigma=2, max_sigma=5, num_sigma=4)
|
||||
assert len(blobs) == 0
|
80
venv/Lib/site-packages/skimage/feature/tests/test_brief.py
Normal file
80
venv/Lib/site-packages/skimage/feature/tests/test_brief.py
Normal file
|
@ -0,0 +1,80 @@
|
|||
import pytest
|
||||
import numpy as np
|
||||
|
||||
from skimage._shared.testing import assert_array_equal
|
||||
from skimage import data
|
||||
from skimage.feature import BRIEF, corner_peaks, corner_harris
|
||||
from skimage._shared import testing
|
||||
|
||||
|
||||
def test_color_image_unsupported_error():
|
||||
"""Brief descriptors can be evaluated on gray-scale images only."""
|
||||
img = np.zeros((20, 20, 3))
|
||||
keypoints = np.asarray([[7, 5], [11, 13]])
|
||||
with testing.raises(ValueError):
|
||||
BRIEF().extract(img, keypoints)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('dtype', ['float32', 'float64', 'uint8', 'int'])
|
||||
def test_normal_mode(dtype):
|
||||
"""Verify the computed BRIEF descriptors with expected for normal mode."""
|
||||
img = data.coins().astype(dtype)
|
||||
|
||||
keypoints = corner_peaks(corner_harris(img), min_distance=5,
|
||||
threshold_abs=0, threshold_rel=0.1)
|
||||
|
||||
extractor = BRIEF(descriptor_size=8, sigma=2)
|
||||
|
||||
extractor.extract(img, keypoints[:8])
|
||||
|
||||
expected = np.array([[1, 0, 1, 0, 0, 1, 0, 1],
|
||||
[1, 1, 1, 0, 1, 0, 1, 1],
|
||||
[1, 0, 1, 0, 0, 1, 0, 1],
|
||||
[0, 1, 0, 0, 1, 0, 1, 0],
|
||||
[1, 1, 1, 0, 0, 0, 1, 1],
|
||||
[1, 1, 1, 0, 1, 1, 1, 1],
|
||||
[1, 0, 1, 0, 0, 1, 0, 1],
|
||||
[0, 0, 0, 0, 0, 1, 0, 0]], dtype=bool)
|
||||
|
||||
assert_array_equal(extractor.descriptors, expected)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('dtype', ['float32', 'float64', 'uint8', 'int'])
|
||||
def test_uniform_mode(dtype):
|
||||
"""Verify the computed BRIEF descriptors with expected for uniform mode."""
|
||||
img = data.coins().astype(dtype)
|
||||
|
||||
keypoints = corner_peaks(corner_harris(img), min_distance=5,
|
||||
threshold_abs=0, threshold_rel=0.1)
|
||||
|
||||
extractor = BRIEF(descriptor_size=8, sigma=2, mode='uniform')
|
||||
|
||||
extractor.extract(img, keypoints[:8])
|
||||
|
||||
expected = np.array([[1, 1, 0, 0, 0, 0, 0, 0],
|
||||
[1, 1, 1, 0, 0, 1, 0, 0],
|
||||
[1, 1, 0, 0, 1, 0, 0, 0],
|
||||
[0, 0, 0, 1, 1, 1, 1, 1],
|
||||
[1, 1, 1, 0, 0, 1, 0, 0],
|
||||
[1, 1, 1, 1, 0, 1, 0, 0],
|
||||
[1, 1, 0, 0, 0, 1, 0, 0],
|
||||
[0, 1, 1, 1, 0, 1, 1, 1]], dtype=bool)
|
||||
|
||||
assert_array_equal(extractor.descriptors, expected)
|
||||
|
||||
|
||||
def test_unsupported_mode():
|
||||
with testing.raises(ValueError):
|
||||
BRIEF(mode='foobar')
|
||||
|
||||
|
||||
@pytest.mark.parametrize('dtype', ['float32', 'float64', 'uint8', 'int'])
|
||||
def test_border(dtype):
|
||||
img = np.zeros((100, 100), dtype=dtype)
|
||||
keypoints = np.array([[1, 1], [20, 20], [50, 50], [80, 80]])
|
||||
|
||||
extractor = BRIEF(patch_size=41)
|
||||
extractor.extract(img, keypoints)
|
||||
|
||||
assert extractor.descriptors.shape[0] == 3
|
||||
assert_array_equal(extractor.mask, (False, True, True, True))
|
121
venv/Lib/site-packages/skimage/feature/tests/test_canny.py
Normal file
121
venv/Lib/site-packages/skimage/feature/tests/test_canny.py
Normal file
|
@ -0,0 +1,121 @@
|
|||
import unittest
|
||||
import numpy as np
|
||||
from skimage._shared.testing import assert_equal
|
||||
from scipy.ndimage import binary_dilation, binary_erosion
|
||||
import skimage.feature as F
|
||||
from skimage import data, img_as_float
|
||||
|
||||
|
||||
class TestCanny(unittest.TestCase):
|
||||
def test_00_00_zeros(self):
|
||||
'''Test that the Canny filter finds no points for a blank field'''
|
||||
result = F.canny(np.zeros((20, 20)), 4, 0, 0, np.ones((20, 20), bool))
|
||||
self.assertFalse(np.any(result))
|
||||
|
||||
def test_00_01_zeros_mask(self):
|
||||
'''Test that the Canny filter finds no points in a masked image'''
|
||||
result = (F.canny(np.random.uniform(size=(20, 20)), 4, 0, 0,
|
||||
np.zeros((20, 20), bool)))
|
||||
self.assertFalse(np.any(result))
|
||||
|
||||
def test_01_01_circle(self):
|
||||
'''Test that the Canny filter finds the outlines of a circle'''
|
||||
i, j = np.mgrid[-200:200, -200:200].astype(float) / 200
|
||||
c = np.abs(np.sqrt(i * i + j * j) - .5) < .02
|
||||
result = F.canny(c.astype(float), 4, 0, 0, np.ones(c.shape, bool))
|
||||
#
|
||||
# erode and dilate the circle to get rings that should contain the
|
||||
# outlines
|
||||
#
|
||||
cd = binary_dilation(c, iterations=3)
|
||||
ce = binary_erosion(c, iterations=3)
|
||||
cde = np.logical_and(cd, np.logical_not(ce))
|
||||
self.assertTrue(np.all(cde[result]))
|
||||
#
|
||||
# The circle has a radius of 100. There are two rings here, one
|
||||
# for the inside edge and one for the outside. So that's
|
||||
# 100 * 2 * 2 * 3 for those places where pi is still 3.
|
||||
# The edge contains both pixels if there's a tie, so we
|
||||
# bump the count a little.
|
||||
point_count = np.sum(result)
|
||||
self.assertTrue(point_count > 1200)
|
||||
self.assertTrue(point_count < 1600)
|
||||
|
||||
def test_01_02_circle_with_noise(self):
|
||||
'''Test that the Canny filter finds the circle outlines
|
||||
in a noisy image'''
|
||||
np.random.seed(0)
|
||||
i, j = np.mgrid[-200:200, -200:200].astype(float) / 200
|
||||
c = np.abs(np.sqrt(i * i + j * j) - .5) < .02
|
||||
cf = c.astype(float) * .5 + np.random.uniform(size=c.shape) * .5
|
||||
result = F.canny(cf, 4, .1, .2, np.ones(c.shape, bool))
|
||||
#
|
||||
# erode and dilate the circle to get rings that should contain the
|
||||
# outlines
|
||||
#
|
||||
cd = binary_dilation(c, iterations=4)
|
||||
ce = binary_erosion(c, iterations=4)
|
||||
cde = np.logical_and(cd, np.logical_not(ce))
|
||||
self.assertTrue(np.all(cde[result]))
|
||||
point_count = np.sum(result)
|
||||
self.assertTrue(point_count > 1200)
|
||||
self.assertTrue(point_count < 1600)
|
||||
|
||||
def test_image_shape(self):
|
||||
self.assertRaises(ValueError, F.canny, np.zeros((20, 20, 20)), 4, 0, 0)
|
||||
|
||||
def test_mask_none(self):
|
||||
result1 = F.canny(np.zeros((20, 20)), 4, 0, 0, np.ones((20, 20), bool))
|
||||
result2 = F.canny(np.zeros((20, 20)), 4, 0, 0)
|
||||
self.assertTrue(np.all(result1 == result2))
|
||||
|
||||
def test_use_quantiles(self):
|
||||
image = img_as_float(data.camera()[::50, ::50])
|
||||
|
||||
# Correct output produced manually with quantiles
|
||||
# of 0.8 and 0.6 for high and low respectively
|
||||
correct_output = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0],
|
||||
[0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0],
|
||||
[0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0],
|
||||
[0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0],
|
||||
[0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0],
|
||||
[0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0],
|
||||
[0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0],
|
||||
[0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=bool)
|
||||
|
||||
result = F.canny(image, low_threshold=0.6, high_threshold=0.8, use_quantiles=True)
|
||||
|
||||
assert_equal(result, correct_output)
|
||||
|
||||
def test_invalid_use_quantiles(self):
|
||||
image = img_as_float(data.camera()[::50, ::50])
|
||||
|
||||
self.assertRaises(ValueError, F.canny, image, use_quantiles=True,
|
||||
low_threshold=0.5, high_threshold=3.6)
|
||||
|
||||
self.assertRaises(ValueError, F.canny, image, use_quantiles=True,
|
||||
low_threshold=-5, high_threshold=0.5)
|
||||
|
||||
self.assertRaises(ValueError, F.canny, image, use_quantiles=True,
|
||||
low_threshold=99, high_threshold=0.9)
|
||||
|
||||
self.assertRaises(ValueError, F.canny, image, use_quantiles=True,
|
||||
low_threshold=0.5, high_threshold=-100)
|
||||
|
||||
# Example from issue #4282
|
||||
image = data.camera()
|
||||
self.assertRaises(ValueError, F.canny, image, use_quantiles=True,
|
||||
low_threshold=50, high_threshold=150)
|
||||
|
||||
def test_dtype(self):
|
||||
"""Check that the same output is produced regardless of image dtype."""
|
||||
image_uint8 = data.camera()
|
||||
image_float = img_as_float(image_uint8)
|
||||
|
||||
result_uint8 = F.canny(image_uint8)
|
||||
result_float = F.canny(image_float)
|
||||
|
||||
assert_equal(result_uint8, result_float)
|
23
venv/Lib/site-packages/skimage/feature/tests/test_cascade.py
Normal file
23
venv/Lib/site-packages/skimage/feature/tests/test_cascade.py
Normal file
|
@ -0,0 +1,23 @@
|
|||
import numpy as np
|
||||
|
||||
import skimage.data as data
|
||||
from skimage.feature import Cascade
|
||||
|
||||
|
||||
def test_detector_astronaut():
|
||||
|
||||
# Load the trained file from the module root.
|
||||
trained_file = data.lbp_frontal_face_cascade_filename()
|
||||
|
||||
# Initialize the detector cascade.
|
||||
detector = Cascade(trained_file)
|
||||
|
||||
img = data.astronaut()
|
||||
|
||||
detected = detector.detect_multi_scale(img=img,
|
||||
scale_factor=1.2,
|
||||
step_ratio=1,
|
||||
min_size=(60, 60),
|
||||
max_size=(123, 123))
|
||||
|
||||
assert len(detected) == 1, 'One face should be detected.'
|
105
venv/Lib/site-packages/skimage/feature/tests/test_censure.py
Normal file
105
venv/Lib/site-packages/skimage/feature/tests/test_censure.py
Normal file
|
@ -0,0 +1,105 @@
|
|||
import numpy as np
|
||||
from skimage._shared.testing import assert_array_equal
|
||||
from skimage.data import moon
|
||||
from skimage.feature import CENSURE
|
||||
from skimage._shared.testing import test_parallel
|
||||
from skimage._shared import testing
|
||||
from skimage.transform import rescale
|
||||
|
||||
|
||||
img = moon()
|
||||
np.random.seed(0)
|
||||
|
||||
|
||||
def test_censure_on_rectangular_images():
|
||||
"""Censure feature detector should work on 2D image of any shape."""
|
||||
rect_image = np.random.rand(300, 200)
|
||||
square_image = np.random.rand(200, 200)
|
||||
CENSURE().detect((square_image))
|
||||
CENSURE().detect((rect_image))
|
||||
|
||||
|
||||
def test_keypoints_censure_color_image_unsupported_error():
|
||||
"""Censure keypoints can be extracted from gray-scale images only."""
|
||||
with testing.raises(ValueError):
|
||||
CENSURE().detect(np.zeros((20, 20, 3)))
|
||||
|
||||
|
||||
def test_keypoints_censure_mode_validity_error():
|
||||
"""Mode argument in keypoints_censure can be either DoB, Octagon or
|
||||
STAR."""
|
||||
with testing.raises(ValueError):
|
||||
CENSURE(mode='dummy')
|
||||
|
||||
|
||||
def test_keypoints_censure_scale_range_error():
|
||||
"""Difference between the the max_scale and min_scale parameters in
|
||||
keypoints_censure should be greater than or equal to two."""
|
||||
with testing.raises(ValueError):
|
||||
CENSURE(min_scale=1, max_scale=2)
|
||||
|
||||
|
||||
def test_keypoints_censure_moon_image_dob():
|
||||
"""Verify the actual Censure keypoints and their corresponding scale with
|
||||
the expected values for DoB filter."""
|
||||
detector = CENSURE()
|
||||
detector.detect(img)
|
||||
expected_keypoints = np.array([[ 21, 497],
|
||||
[ 36, 46],
|
||||
[119, 350],
|
||||
[185, 177],
|
||||
[287, 250],
|
||||
[357, 239],
|
||||
[463, 116],
|
||||
[464, 132],
|
||||
[467, 260]])
|
||||
expected_scales = np.array([3, 4, 4, 2, 2, 3, 2, 2, 2])
|
||||
|
||||
assert_array_equal(expected_keypoints, detector.keypoints)
|
||||
assert_array_equal(expected_scales, detector.scales)
|
||||
|
||||
|
||||
@test_parallel()
|
||||
def test_keypoints_censure_moon_image_octagon():
|
||||
"""Verify the actual Censure keypoints and their corresponding scale with
|
||||
the expected values for Octagon filter."""
|
||||
|
||||
detector = CENSURE(mode='octagon')
|
||||
# quarter scale image for speed
|
||||
detector.detect(rescale(img, 0.25,
|
||||
multichannel=False,
|
||||
anti_aliasing=False,
|
||||
mode='constant'))
|
||||
expected_keypoints = np.array([[ 23, 27],
|
||||
[ 29, 89],
|
||||
[ 31, 87],
|
||||
[106, 59],
|
||||
[111, 67]])
|
||||
|
||||
expected_scales = np.array([3, 2, 5, 2, 4])
|
||||
|
||||
assert_array_equal(expected_keypoints, detector.keypoints)
|
||||
assert_array_equal(expected_scales, detector.scales)
|
||||
|
||||
|
||||
def test_keypoints_censure_moon_image_star():
|
||||
"""Verify the actual Censure keypoints and their corresponding scale with
|
||||
the expected values for STAR filter."""
|
||||
detector = CENSURE(mode='star')
|
||||
# quarter scale image for speed
|
||||
detector.detect(rescale(img, 0.25,
|
||||
multichannel=False,
|
||||
anti_aliasing=False,
|
||||
mode='constant'))
|
||||
expected_keypoints = np.array([[ 23, 27],
|
||||
[ 29, 89],
|
||||
[ 30, 86],
|
||||
[107, 59],
|
||||
[109, 64],
|
||||
[111, 67],
|
||||
[113, 70]])
|
||||
|
||||
expected_scales = np.array([3, 2, 4, 2, 5, 3, 2])
|
||||
|
||||
assert_array_equal(expected_keypoints, detector.keypoints)
|
||||
assert_array_equal(expected_scales, detector.scales)
|
493
venv/Lib/site-packages/skimage/feature/tests/test_corner.py
Normal file
493
venv/Lib/site-packages/skimage/feature/tests/test_corner.py
Normal file
|
@ -0,0 +1,493 @@
|
|||
import numpy as np
|
||||
from skimage._shared.testing import assert_array_equal, assert_almost_equal
|
||||
from skimage import data
|
||||
from skimage import img_as_float
|
||||
from skimage import draw
|
||||
from skimage.color import rgb2gray
|
||||
from skimage.morphology import octagon
|
||||
from skimage._shared.testing import test_parallel
|
||||
from skimage._shared._warnings import expected_warnings
|
||||
from skimage._shared import testing
|
||||
import pytest
|
||||
|
||||
from skimage.feature import (corner_moravec, corner_harris, corner_shi_tomasi,
|
||||
corner_subpix, peak_local_max, corner_peaks,
|
||||
corner_kitchen_rosenfeld, corner_foerstner,
|
||||
corner_fast, corner_orientations,
|
||||
structure_tensor, structure_tensor_eigvals,
|
||||
hessian_matrix, hessian_matrix_eigvals,
|
||||
hessian_matrix_det, shape_index)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def im3d():
|
||||
r = 10
|
||||
pad = 10
|
||||
im3 = draw.ellipsoid(r, r, r)
|
||||
im3 = np.pad(im3, pad, mode='constant').astype(np.uint8)
|
||||
return im3
|
||||
|
||||
|
||||
def test_structure_tensor():
|
||||
square = np.zeros((5, 5))
|
||||
square[2, 2] = 1
|
||||
Axx, Axy, Ayy = structure_tensor(square, sigma=0.1)
|
||||
assert_array_equal(Axx, np.array([[ 0, 0, 0, 0, 0],
|
||||
[ 0, 1, 0, 1, 0],
|
||||
[ 0, 4, 0, 4, 0],
|
||||
[ 0, 1, 0, 1, 0],
|
||||
[ 0, 0, 0, 0, 0]]))
|
||||
assert_array_equal(Axy, np.array([[ 0, 0, 0, 0, 0],
|
||||
[ 0, 1, 0, -1, 0],
|
||||
[ 0, 0, 0, -0, 0],
|
||||
[ 0, -1, -0, 1, 0],
|
||||
[ 0, 0, 0, 0, 0]]))
|
||||
assert_array_equal(Ayy, np.array([[ 0, 0, 0, 0, 0],
|
||||
[ 0, 1, 4, 1, 0],
|
||||
[ 0, 0, 0, 0, 0],
|
||||
[ 0, 1, 4, 1, 0],
|
||||
[ 0, 0, 0, 0, 0]]))
|
||||
|
||||
|
||||
def test_hessian_matrix():
|
||||
square = np.zeros((5, 5))
|
||||
square[2, 2] = 4
|
||||
Hrr, Hrc, Hcc = hessian_matrix(square, sigma=0.1, order='rc')
|
||||
assert_almost_equal(Hrr, np.array([[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[2, 0, -2, 0, 2],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0]]))
|
||||
|
||||
assert_almost_equal(Hrc, np.array([[0, 0, 0, 0, 0],
|
||||
[0, 1, 0, -1, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, -1, 0, 1, 0],
|
||||
[0, 0, 0, 0, 0]]))
|
||||
|
||||
assert_almost_equal(Hcc, np.array([[0, 0, 2, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, -2, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, 2, 0, 0]]))
|
||||
|
||||
|
||||
def test_hessian_matrix_3d():
|
||||
cube = np.zeros((5, 5, 5))
|
||||
cube[2, 2, 2] = 4
|
||||
Hs = hessian_matrix(cube, sigma=0.1, order='rc')
|
||||
assert len(Hs) == 6, ("incorrect number of Hessian images (%i) for 3D" %
|
||||
len(Hs))
|
||||
assert_almost_equal(Hs[2][:, 2, :], np.array([[0, 0, 0, 0, 0],
|
||||
[0, 1, 0, -1, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, -1, 0, 1, 0],
|
||||
[0, 0, 0, 0, 0]]))
|
||||
|
||||
|
||||
def test_structure_tensor_eigvals():
|
||||
square = np.zeros((5, 5))
|
||||
square[2, 2] = 1
|
||||
Axx, Axy, Ayy = structure_tensor(square, sigma=0.1)
|
||||
l1, l2 = structure_tensor_eigvals(Axx, Axy, Ayy)
|
||||
assert_array_equal(l1, np.array([[0, 0, 0, 0, 0],
|
||||
[0, 2, 4, 2, 0],
|
||||
[0, 4, 0, 4, 0],
|
||||
[0, 2, 4, 2, 0],
|
||||
[0, 0, 0, 0, 0]]))
|
||||
assert_array_equal(l2, np.array([[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0]]))
|
||||
|
||||
|
||||
def test_hessian_matrix_eigvals():
|
||||
square = np.zeros((5, 5))
|
||||
square[2, 2] = 4
|
||||
H = hessian_matrix(square, sigma=0.1, order='rc')
|
||||
l1, l2 = hessian_matrix_eigvals(H)
|
||||
assert_almost_equal(l1, np.array([[0, 0, 2, 0, 0],
|
||||
[0, 1, 0, 1, 0],
|
||||
[2, 0, -2, 0, 2],
|
||||
[0, 1, 0, 1, 0],
|
||||
[0, 0, 2, 0, 0]]))
|
||||
assert_almost_equal(l2, np.array([[0, 0, 0, 0, 0],
|
||||
[0, -1, 0, -1, 0],
|
||||
[0, 0, -2, 0, 0],
|
||||
[0, -1, 0, -1, 0],
|
||||
[0, 0, 0, 0, 0]]))
|
||||
|
||||
|
||||
def test_hessian_matrix_eigvals_3d(im3d):
|
||||
H = hessian_matrix(im3d)
|
||||
E = hessian_matrix_eigvals(H)
|
||||
# test descending order:
|
||||
e0, e1, e2 = E
|
||||
assert np.all(e0 >= e1) and np.all(e1 >= e2)
|
||||
|
||||
E0, E1, E2 = E[:, E.shape[1] // 2] # cross section
|
||||
row_center, col_center = np.array(E0.shape) // 2
|
||||
circles = [draw.circle_perimeter(row_center, col_center, radius,
|
||||
shape=E0.shape)
|
||||
for radius in range(1, E0.shape[1] // 2 - 1)]
|
||||
response0 = np.array([np.mean(E0[c]) for c in circles])
|
||||
response2 = np.array([np.mean(E2[c]) for c in circles])
|
||||
# eigenvalues are negative just inside the sphere, positive just outside
|
||||
assert np.argmin(response2) < np.argmax(response0)
|
||||
assert np.min(response2) < 0
|
||||
assert np.max(response0) > 0
|
||||
|
||||
|
||||
@test_parallel()
|
||||
def test_hessian_matrix_det():
|
||||
image = np.zeros((5, 5))
|
||||
image[2, 2] = 1
|
||||
det = hessian_matrix_det(image, 5)
|
||||
assert_almost_equal(det, 0, decimal=3)
|
||||
|
||||
|
||||
def test_hessian_matrix_det_3d(im3d):
|
||||
D = hessian_matrix_det(im3d)
|
||||
D0 = D[D.shape[0] // 2]
|
||||
row_center, col_center = np.array(D0.shape) // 2
|
||||
# testing in 3D is hard. We test this by showing that you get the
|
||||
# expected flat-then-low-then-high 2nd derivative response in a circle
|
||||
# around the midplane of the sphere.
|
||||
circles = [draw.circle_perimeter(row_center, col_center, r, shape=D0.shape)
|
||||
for r in range(1, D0.shape[1] // 2 - 1)]
|
||||
response = np.array([np.mean(D0[c]) for c in circles])
|
||||
lowest = np.argmin(response)
|
||||
highest = np.argmax(response)
|
||||
assert lowest < highest
|
||||
assert response[lowest] < 0
|
||||
assert response[highest] > 0
|
||||
|
||||
|
||||
def test_shape_index():
|
||||
# software floating point arm doesn't raise a warning on divide by zero
|
||||
# https://github.com/scikit-image/scikit-image/issues/3335
|
||||
square = np.zeros((5, 5))
|
||||
square[2, 2] = 4
|
||||
with expected_warnings([r'divide by zero|\A\Z', r'invalid value|\A\Z']):
|
||||
s = shape_index(square, sigma=0.1)
|
||||
assert_almost_equal(
|
||||
s, np.array([[ np.nan, np.nan, -0.5, np.nan, np.nan],
|
||||
[ np.nan, 0, np.nan, 0, np.nan],
|
||||
[ -0.5, np.nan, -1, np.nan, -0.5],
|
||||
[ np.nan, 0, np.nan, 0, np.nan],
|
||||
[ np.nan, np.nan, -0.5, np.nan, np.nan]])
|
||||
)
|
||||
|
||||
|
||||
@test_parallel()
|
||||
def test_square_image():
|
||||
im = np.zeros((50, 50)).astype(float)
|
||||
im[:25, :25] = 1.
|
||||
|
||||
# Moravec
|
||||
results = peak_local_max(corner_moravec(im),
|
||||
min_distance=10, threshold_rel=0)
|
||||
# interest points along edge
|
||||
assert len(results) == 57
|
||||
|
||||
# Harris
|
||||
results = peak_local_max(corner_harris(im, method='k'),
|
||||
min_distance=10, threshold_rel=0)
|
||||
# interest at corner
|
||||
assert len(results) == 1
|
||||
|
||||
results = peak_local_max(corner_harris(im, method='eps'),
|
||||
min_distance=10, threshold_rel=0)
|
||||
# interest at corner
|
||||
assert len(results) == 1
|
||||
|
||||
# Shi-Tomasi
|
||||
results = peak_local_max(corner_shi_tomasi(im),
|
||||
min_distance=10, threshold_rel=0)
|
||||
# interest at corner
|
||||
assert len(results) == 1
|
||||
|
||||
|
||||
def test_noisy_square_image():
|
||||
im = np.zeros((50, 50)).astype(float)
|
||||
im[:25, :25] = 1.
|
||||
np.random.seed(seed=1234)
|
||||
im = im + np.random.uniform(size=im.shape) * .2
|
||||
|
||||
# Moravec
|
||||
results = peak_local_max(corner_moravec(im),
|
||||
min_distance=10, threshold_rel=0)
|
||||
# undefined number of interest points
|
||||
assert results.any()
|
||||
|
||||
# Harris
|
||||
results = peak_local_max(corner_harris(im, method='k'),
|
||||
min_distance=10, threshold_rel=0)
|
||||
assert len(results) == 1
|
||||
results = peak_local_max(corner_harris(im, method='eps'),
|
||||
min_distance=10, threshold_rel=0)
|
||||
assert len(results) == 1
|
||||
|
||||
# Shi-Tomasi
|
||||
results = peak_local_max(corner_shi_tomasi(im, sigma=1.5),
|
||||
min_distance=10, threshold_rel=0)
|
||||
assert len(results) == 1
|
||||
|
||||
|
||||
def test_squared_dot():
|
||||
im = np.zeros((50, 50))
|
||||
im[4:8, 4:8] = 1
|
||||
im = img_as_float(im)
|
||||
|
||||
# Moravec fails
|
||||
|
||||
# Harris
|
||||
results = peak_local_max(corner_harris(im),
|
||||
min_distance=10, threshold_rel=0)
|
||||
assert (results == np.array([[6, 6]])).all()
|
||||
|
||||
# Shi-Tomasi
|
||||
results = peak_local_max(corner_shi_tomasi(im),
|
||||
min_distance=10, threshold_rel=0)
|
||||
assert (results == np.array([[6, 6]])).all()
|
||||
|
||||
|
||||
def test_rotated_img():
|
||||
"""
|
||||
The harris filter should yield the same results with an image and it's
|
||||
rotation.
|
||||
"""
|
||||
im = img_as_float(data.astronaut().mean(axis=2))
|
||||
im_rotated = im.T
|
||||
|
||||
# Moravec
|
||||
results = peak_local_max(corner_moravec(im),
|
||||
min_distance=10, threshold_rel=0)
|
||||
results_rotated = peak_local_max(corner_moravec(im_rotated),
|
||||
min_distance=10, threshold_rel=0)
|
||||
assert (np.sort(results[:, 0]) == np.sort(results_rotated[:, 1])).all()
|
||||
assert (np.sort(results[:, 1]) == np.sort(results_rotated[:, 0])).all()
|
||||
|
||||
# Harris
|
||||
results = peak_local_max(corner_harris(im),
|
||||
min_distance=10, threshold_rel=0)
|
||||
results_rotated = peak_local_max(corner_harris(im_rotated),
|
||||
min_distance=10, threshold_rel=0)
|
||||
assert (np.sort(results[:, 0]) == np.sort(results_rotated[:, 1])).all()
|
||||
assert (np.sort(results[:, 1]) == np.sort(results_rotated[:, 0])).all()
|
||||
|
||||
# Shi-Tomasi
|
||||
results = peak_local_max(corner_shi_tomasi(im),
|
||||
min_distance=10, threshold_rel=0)
|
||||
results_rotated = peak_local_max(corner_shi_tomasi(im_rotated),
|
||||
min_distance=10, threshold_rel=0)
|
||||
assert (np.sort(results[:, 0]) == np.sort(results_rotated[:, 1])).all()
|
||||
assert (np.sort(results[:, 1]) == np.sort(results_rotated[:, 0])).all()
|
||||
|
||||
|
||||
def test_subpix_edge():
|
||||
img = np.zeros((50, 50))
|
||||
img[:25, :25] = 255
|
||||
img[25:, 25:] = 255
|
||||
corner = peak_local_max(corner_harris(img),
|
||||
min_distance=10, threshold_rel=0, num_peaks=1)
|
||||
subpix = corner_subpix(img, corner)
|
||||
assert_array_equal(subpix[0], (24.5, 24.5))
|
||||
|
||||
|
||||
def test_subpix_dot():
|
||||
img = np.zeros((50, 50))
|
||||
img[25, 25] = 255
|
||||
corner = peak_local_max(corner_harris(img),
|
||||
min_distance=10, threshold_rel=0, num_peaks=1)
|
||||
subpix = corner_subpix(img, corner)
|
||||
assert_array_equal(subpix[0], (25, 25))
|
||||
|
||||
|
||||
def test_subpix_no_class():
|
||||
img = np.zeros((50, 50))
|
||||
subpix = corner_subpix(img, np.array([[25, 25]]))
|
||||
assert_array_equal(subpix[0], (np.nan, np.nan))
|
||||
|
||||
img[25, 25] = 1e-10
|
||||
corner = peak_local_max(corner_harris(img),
|
||||
min_distance=10, threshold_rel=0, num_peaks=1)
|
||||
subpix = corner_subpix(img, np.array([[25, 25]]))
|
||||
assert_array_equal(subpix[0], (np.nan, np.nan))
|
||||
|
||||
|
||||
def test_subpix_border():
|
||||
img = np.zeros((50, 50))
|
||||
img[1:25, 1:25] = 255
|
||||
img[25:-1, 25:-1] = 255
|
||||
corner = corner_peaks(corner_harris(img), threshold_rel=0)
|
||||
subpix = corner_subpix(img, corner, window_size=11)
|
||||
ref = np.array([[24.5, 24.5],
|
||||
[0.52040816, 0.52040816],
|
||||
[0.52040816, 24.47959184],
|
||||
[24.47959184, 0.52040816],
|
||||
[24.52040816, 48.47959184],
|
||||
[48.47959184, 24.52040816],
|
||||
[48.47959184, 48.47959184]])
|
||||
|
||||
assert_almost_equal(subpix, ref)
|
||||
|
||||
|
||||
def test_num_peaks():
|
||||
"""For a bunch of different values of num_peaks, check that
|
||||
peak_local_max returns exactly the right amount of peaks. Test
|
||||
is run on the astronaut image in order to produce a sufficient number of corners"""
|
||||
|
||||
img_corners = corner_harris(rgb2gray(data.astronaut()))
|
||||
|
||||
for i in range(20):
|
||||
n = np.random.randint(1, 21)
|
||||
results = peak_local_max(img_corners,
|
||||
min_distance=10, threshold_rel=0, num_peaks=n)
|
||||
assert (results.shape[0] == n)
|
||||
|
||||
|
||||
def test_corner_peaks():
|
||||
response = np.zeros((10, 10))
|
||||
response[2:5, 2:5] = 1
|
||||
response[8:10, 0:2] = 1
|
||||
|
||||
corners = corner_peaks(response, exclude_border=False, min_distance=10,
|
||||
threshold_rel=0)
|
||||
assert corners.shape == (1, 2)
|
||||
|
||||
corners = corner_peaks(response, exclude_border=False, min_distance=5,
|
||||
threshold_rel=0)
|
||||
assert corners.shape == (2, 2)
|
||||
|
||||
with pytest.warns(FutureWarning,
|
||||
match="Until version 0.16, threshold_rel.*"):
|
||||
corners = corner_peaks(response, exclude_border=False, min_distance=1)
|
||||
assert corners.shape == (5, 2)
|
||||
|
||||
corners = corner_peaks(response, exclude_border=False, min_distance=1,
|
||||
indices=False)
|
||||
assert np.sum(corners) == 5
|
||||
|
||||
|
||||
def test_blank_image_nans():
|
||||
"""Some of the corner detectors had a weakness in terms of returning
|
||||
NaN when presented with regions of constant intensity. This should
|
||||
be fixed by now. We test whether each detector returns something
|
||||
finite in the case of constant input"""
|
||||
|
||||
detectors = [corner_moravec, corner_harris, corner_shi_tomasi,
|
||||
corner_kitchen_rosenfeld, corner_foerstner]
|
||||
constant_image = np.zeros((20, 20))
|
||||
|
||||
for det in detectors:
|
||||
response = det(constant_image)
|
||||
assert np.all(np.isfinite(response))
|
||||
|
||||
|
||||
def test_corner_fast_image_unsupported_error():
|
||||
img = np.zeros((20, 20, 3))
|
||||
with testing.raises(ValueError):
|
||||
corner_fast(img)
|
||||
|
||||
|
||||
@test_parallel()
|
||||
def test_corner_fast_astronaut():
|
||||
img = rgb2gray(data.astronaut())
|
||||
expected = np.array([[444, 310],
|
||||
[374, 171],
|
||||
[249, 171],
|
||||
[492, 139],
|
||||
[403, 162],
|
||||
[496, 266],
|
||||
[362, 328],
|
||||
[476, 250],
|
||||
[353, 172],
|
||||
[346, 279],
|
||||
[494, 169],
|
||||
[177, 156],
|
||||
[413, 181],
|
||||
[213, 117],
|
||||
[390, 149],
|
||||
[140, 205],
|
||||
[232, 266],
|
||||
[489, 155],
|
||||
[387, 195],
|
||||
[101, 198],
|
||||
[363, 192],
|
||||
[364, 147],
|
||||
[300, 244],
|
||||
[325, 245],
|
||||
[141, 242],
|
||||
[401, 197],
|
||||
[197, 148],
|
||||
[339, 242],
|
||||
[188, 113],
|
||||
[362, 252],
|
||||
[379, 183],
|
||||
[358, 307],
|
||||
[245, 137],
|
||||
[369, 159],
|
||||
[464, 251],
|
||||
[305, 57],
|
||||
[223, 375]])
|
||||
actual = corner_peaks(corner_fast(img, 12, 0.3),
|
||||
min_distance=10, threshold_rel=0)
|
||||
assert_array_equal(actual, expected)
|
||||
|
||||
|
||||
def test_corner_orientations_image_unsupported_error():
|
||||
img = np.zeros((20, 20, 3))
|
||||
with testing.raises(ValueError):
|
||||
corner_orientations(
|
||||
img,
|
||||
np.asarray([[7, 7]]), np.ones((3, 3)))
|
||||
|
||||
|
||||
def test_corner_orientations_even_shape_error():
|
||||
img = np.zeros((20, 20))
|
||||
with testing.raises(ValueError):
|
||||
corner_orientations(
|
||||
img,
|
||||
np.asarray([[7, 7]]), np.ones((4, 4)))
|
||||
|
||||
|
||||
@test_parallel()
|
||||
def test_corner_orientations_astronaut():
|
||||
img = rgb2gray(data.astronaut())
|
||||
corners = corner_peaks(corner_fast(img, 11, 0.35),
|
||||
min_distance=10, threshold_abs=0, threshold_rel=0.1)
|
||||
expected = np.array([-4.40598471e-01, -1.46554357e+00,
|
||||
2.39291733e+00, -1.63869275e+00,
|
||||
1.45931342e+00, -1.64397304e+00,
|
||||
-1.76069982e+00, 1.09650167e+00,
|
||||
-1.65449964e+00, 1.19134149e+00,
|
||||
5.46905279e-02, 2.17103132e+00,
|
||||
8.12701702e-01, -1.22091334e-01,
|
||||
-2.01162417e+00, 1.25854853e+00,
|
||||
3.05330950e+00, 2.01197383e+00,
|
||||
1.07812134e+00, 3.09780364e+00,
|
||||
-3.49561988e-01, 2.43573659e+00,
|
||||
3.14918803e-01, -9.88548213e-01,
|
||||
-1.88247204e-01, 2.47305654e+00,
|
||||
-2.99143370e+00, 1.47154532e+00,
|
||||
-6.61151410e-01, -1.68885773e+00,
|
||||
-3.09279990e-01, -2.81524886e+00,
|
||||
-1.75220190e+00, -1.69230287e+00,
|
||||
-7.52950306e-04])
|
||||
|
||||
actual = corner_orientations(img, corners, octagon(3, 2))
|
||||
assert_almost_equal(actual, expected)
|
||||
|
||||
|
||||
def test_corner_orientations_square():
|
||||
square = np.zeros((12, 12))
|
||||
square[3:9, 3:9] = 1
|
||||
corners = corner_peaks(corner_fast(square, 9),
|
||||
min_distance=1, threshold_rel=0)
|
||||
actual_orientations = corner_orientations(square, corners, octagon(3, 2))
|
||||
actual_orientations_degrees = np.rad2deg(actual_orientations)
|
||||
expected_orientations_degree = np.array([45, 135, -45, -135])
|
||||
assert_array_equal(actual_orientations_degrees,
|
||||
expected_orientations_degree)
|
102
venv/Lib/site-packages/skimage/feature/tests/test_daisy.py
Normal file
102
venv/Lib/site-packages/skimage/feature/tests/test_daisy.py
Normal file
|
@ -0,0 +1,102 @@
|
|||
import numpy as np
|
||||
from skimage._shared.testing import assert_almost_equal
|
||||
from numpy import sqrt, ceil
|
||||
|
||||
from skimage import data
|
||||
from skimage import img_as_float
|
||||
from skimage.feature import daisy
|
||||
from skimage._shared import testing
|
||||
|
||||
|
||||
def test_daisy_color_image_unsupported_error():
|
||||
img = np.zeros((20, 20, 3))
|
||||
with testing.raises(ValueError):
|
||||
daisy(img)
|
||||
|
||||
|
||||
def test_daisy_desc_dims():
|
||||
img = img_as_float(data.astronaut()[:128, :128].mean(axis=2))
|
||||
rings = 2
|
||||
histograms = 4
|
||||
orientations = 3
|
||||
descs = daisy(img, rings=rings, histograms=histograms,
|
||||
orientations=orientations)
|
||||
assert(descs.shape[2] == (rings * histograms + 1) * orientations)
|
||||
|
||||
rings = 4
|
||||
histograms = 5
|
||||
orientations = 13
|
||||
descs = daisy(img, rings=rings, histograms=histograms,
|
||||
orientations=orientations)
|
||||
assert(descs.shape[2] == (rings * histograms + 1) * orientations)
|
||||
|
||||
|
||||
def test_descs_shape():
|
||||
img = img_as_float(data.astronaut()[:256, :256].mean(axis=2))
|
||||
radius = 20
|
||||
step = 8
|
||||
descs = daisy(img, radius=radius, step=step)
|
||||
assert(descs.shape[0] == ceil((img.shape[0] - radius * 2) / float(step)))
|
||||
assert(descs.shape[1] == ceil((img.shape[1] - radius * 2) / float(step)))
|
||||
|
||||
img = img[:-1, :-2]
|
||||
radius = 5
|
||||
step = 3
|
||||
descs = daisy(img, radius=radius, step=step)
|
||||
assert(descs.shape[0] == ceil((img.shape[0] - radius * 2) / float(step)))
|
||||
assert(descs.shape[1] == ceil((img.shape[1] - radius * 2) / float(step)))
|
||||
|
||||
|
||||
def test_daisy_sigmas_and_radii():
|
||||
img = img_as_float(data.astronaut()[:64, :64].mean(axis=2))
|
||||
sigmas = [1, 2, 3]
|
||||
radii = [1, 2]
|
||||
daisy(img, sigmas=sigmas, ring_radii=radii)
|
||||
|
||||
|
||||
def test_daisy_incompatible_sigmas_and_radii():
|
||||
img = img_as_float(data.astronaut()[:64, :64].mean(axis=2))
|
||||
sigmas = [1, 2]
|
||||
radii = [1, 2]
|
||||
with testing.raises(ValueError):
|
||||
daisy(img, sigmas=sigmas, ring_radii=radii)
|
||||
|
||||
|
||||
def test_daisy_normalization():
|
||||
img = img_as_float(data.astronaut()[:64, :64].mean(axis=2))
|
||||
|
||||
descs = daisy(img, normalization='l1')
|
||||
for i in range(descs.shape[0]):
|
||||
for j in range(descs.shape[1]):
|
||||
assert_almost_equal(np.sum(descs[i, j, :]), 1)
|
||||
descs_ = daisy(img)
|
||||
assert_almost_equal(descs, descs_)
|
||||
|
||||
descs = daisy(img, normalization='l2')
|
||||
for i in range(descs.shape[0]):
|
||||
for j in range(descs.shape[1]):
|
||||
assert_almost_equal(sqrt(np.sum(descs[i, j, :] ** 2)), 1)
|
||||
|
||||
orientations = 8
|
||||
descs = daisy(img, orientations=orientations, normalization='daisy')
|
||||
desc_dims = descs.shape[2]
|
||||
for i in range(descs.shape[0]):
|
||||
for j in range(descs.shape[1]):
|
||||
for k in range(0, desc_dims, orientations):
|
||||
assert_almost_equal(sqrt(np.sum(
|
||||
descs[i, j, k:k + orientations] ** 2)), 1)
|
||||
|
||||
img = np.zeros((50, 50))
|
||||
descs = daisy(img, normalization='off')
|
||||
for i in range(descs.shape[0]):
|
||||
for j in range(descs.shape[1]):
|
||||
assert_almost_equal(np.sum(descs[i, j, :]), 0)
|
||||
|
||||
with testing.raises(ValueError):
|
||||
daisy(img, normalization='does_not_exist')
|
||||
|
||||
|
||||
def test_daisy_visualization():
|
||||
img = img_as_float(data.astronaut()[:32, :32].mean(axis=2))
|
||||
descs, descs_img = daisy(img, visualize=True)
|
||||
assert(descs_img.shape == (32, 32, 3))
|
153
venv/Lib/site-packages/skimage/feature/tests/test_haar.py
Normal file
153
venv/Lib/site-packages/skimage/feature/tests/test_haar.py
Normal file
|
@ -0,0 +1,153 @@
|
|||
from random import shuffle
|
||||
from itertools import chain
|
||||
|
||||
import pytest
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import assert_allclose
|
||||
from numpy.testing import assert_array_equal
|
||||
|
||||
from skimage.transform import integral_image
|
||||
from skimage.feature import haar_like_feature
|
||||
from skimage.feature import haar_like_feature_coord
|
||||
from skimage.feature import draw_haar_like_feature
|
||||
|
||||
|
||||
def test_haar_like_feature_error():
|
||||
img = np.ones((5, 5), dtype=np.float32)
|
||||
img_ii = integral_image(img)
|
||||
|
||||
feature_type = 'unknown_type'
|
||||
with pytest.raises(ValueError):
|
||||
haar_like_feature(img_ii, 0, 0, 5, 5, feature_type=feature_type)
|
||||
haar_like_feature_coord(5, 5, feature_type=feature_type)
|
||||
draw_haar_like_feature(img, 0, 0, 5, 5, feature_type=feature_type)
|
||||
|
||||
feat_coord, feat_type = haar_like_feature_coord(5, 5, 'type-2-x')
|
||||
with pytest.raises(ValueError):
|
||||
haar_like_feature(img_ii, 0, 0, 5, 5, feature_type=feat_type[:3],
|
||||
feature_coord=feat_coord)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dtype", [np.uint8, np.int8,
|
||||
np.float32, np.float64])
|
||||
@pytest.mark.parametrize("feature_type,shape_feature,expected_feature_value",
|
||||
[('type-2-x', (84,), [0.]),
|
||||
('type-2-y', (84,), [0.]),
|
||||
('type-3-x', (42,), [-4., -3., -2., -1.]),
|
||||
('type-3-y', (42,), [-4., -3., -2., -1.]),
|
||||
('type-4', (36,), [0.])])
|
||||
def test_haar_like_feature(feature_type, shape_feature,
|
||||
expected_feature_value, dtype):
|
||||
# test Haar-like feature on a basic one image
|
||||
img = np.ones((5, 5), dtype=dtype)
|
||||
img_ii = integral_image(img)
|
||||
haar_feature = haar_like_feature(img_ii, 0, 0, 5, 5,
|
||||
feature_type=feature_type)
|
||||
assert_allclose(np.sort(np.unique(haar_feature)), expected_feature_value)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dtype", [np.uint8, np.int8,
|
||||
np.float32, np.float64])
|
||||
@pytest.mark.parametrize("feature_type", ['type-2-x', 'type-2-y',
|
||||
'type-3-x', 'type-3-y',
|
||||
'type-4'])
|
||||
def test_haar_like_feature_fused_type(dtype, feature_type):
|
||||
# check that the input type is kept
|
||||
img = np.ones((5, 5), dtype=dtype)
|
||||
img_ii = integral_image(img)
|
||||
expected_dtype = img_ii.dtype
|
||||
# to avoid overflow, unsigned type are converted to signed
|
||||
if 'uint' in expected_dtype.name:
|
||||
expected_dtype = np.dtype(expected_dtype.name.replace('u', ''))
|
||||
haar_feature = haar_like_feature(img_ii, 0, 0, 5, 5,
|
||||
feature_type=feature_type)
|
||||
assert haar_feature.dtype == expected_dtype
|
||||
|
||||
|
||||
def test_haar_like_feature_list():
|
||||
img = np.ones((5, 5), dtype=np.int8)
|
||||
img_ii = integral_image(img)
|
||||
feature_type = ['type-2-x', 'type-2-y', 'type-3-x', 'type-3-y', 'type-4']
|
||||
haar_list = haar_like_feature(img_ii, 0, 0, 5, 5,
|
||||
feature_type=feature_type)
|
||||
haar_all = haar_like_feature(img_ii, 0, 0, 5, 5)
|
||||
assert_array_equal(haar_list, haar_all)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("feature_type", ['type-2-x', 'type-2-y',
|
||||
'type-3-x', 'type-3-y',
|
||||
'type-4',
|
||||
['type-2-y', 'type-3-x',
|
||||
'type-4']])
|
||||
def test_haar_like_feature_precomputed(feature_type):
|
||||
img = np.ones((5, 5), dtype=np.int8)
|
||||
img_ii = integral_image(img)
|
||||
if isinstance(feature_type, list):
|
||||
# shuffle the index of the feature to be sure that we are output
|
||||
# the features in the same order
|
||||
shuffle(feature_type)
|
||||
feat_coord, feat_type = zip(*[haar_like_feature_coord(5, 5, feat_t)
|
||||
for feat_t in feature_type])
|
||||
feat_coord = np.concatenate(feat_coord)
|
||||
feat_type = np.concatenate(feat_type)
|
||||
else:
|
||||
feat_coord, feat_type = haar_like_feature_coord(5, 5, feature_type)
|
||||
haar_feature_precomputed = haar_like_feature(img_ii, 0, 0, 5, 5,
|
||||
feature_type=feat_type,
|
||||
feature_coord=feat_coord)
|
||||
haar_feature = haar_like_feature(img_ii, 0, 0, 5, 5, feature_type)
|
||||
assert_array_equal(haar_feature_precomputed, haar_feature)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("feature_type,height,width,expected_coord",
|
||||
[('type-2-x', 2, 2,
|
||||
[[[(0, 0), (0, 0)], [(0, 1), (0, 1)]],
|
||||
[[(1, 0), (1, 0)], [(1, 1), (1, 1)]]]),
|
||||
('type-2-y', 2, 2,
|
||||
[[[(0, 0), (0, 0)], [(1, 0), (1, 0)]],
|
||||
[[(0, 1), (0, 1)], [(1, 1), (1, 1)]]]),
|
||||
('type-3-x', 3, 3,
|
||||
[[[(0, 0), (0, 0)], [(0, 1), (0, 1)],
|
||||
[(0, 2), (0, 2)]],
|
||||
[[(0, 0), (1, 0)], [(0, 1), (1, 1)],
|
||||
[(0, 2), (1, 2)]],
|
||||
[[(1, 0), (1, 0)], [(1, 1), (1, 1)],
|
||||
[(1, 2), (1, 2)]],
|
||||
[[(1, 0), (2, 0)], [(1, 1), (2, 1)],
|
||||
[(1, 2), (2, 2)]],
|
||||
[[(2, 0), (2, 0)], [(2, 1), (2, 1)],
|
||||
[(2, 2), (2, 2)]]]),
|
||||
('type-3-y', 3, 3,
|
||||
[[[(0, 0), (0, 0)], [(1, 0), (1, 0)],
|
||||
[(2, 0), (2, 0)]],
|
||||
[[(0, 0), (0, 1)], [(1, 0), (1, 1)],
|
||||
[(2, 0), (2, 1)]],
|
||||
[[(0, 1), (0, 1)], [(1, 1), (1, 1)],
|
||||
[(2, 1), (2, 1)]],
|
||||
[[(0, 1), (0, 2)], [(1, 1), (1, 2)],
|
||||
[(2, 1), (2, 2)]],
|
||||
[[(0, 2), (0, 2)], [(1, 2), (1, 2)],
|
||||
[(2, 2), (2, 2)]]]),
|
||||
('type-4', 2, 2,
|
||||
[[[(0, 0), (0, 0)], [(0, 1), (0, 1)],
|
||||
[(1, 1), (1, 1)], [(1, 0), (1, 0)]]])])
|
||||
def test_haar_like_feature_coord(feature_type, height, width, expected_coord):
|
||||
feat_coord, feat_type = haar_like_feature_coord(width, height,
|
||||
feature_type)
|
||||
# convert the output to a full numpy array just for comparison
|
||||
feat_coord = np.array([hf for hf in feat_coord])
|
||||
assert_array_equal(feat_coord, expected_coord)
|
||||
assert np.all(feat_type == feature_type)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("max_n_features,nnz_values", [(None, 46),
|
||||
(1, 8)])
|
||||
def test_draw_haar_like_feature(max_n_features, nnz_values):
|
||||
img = np.zeros((5, 5), dtype=np.float32)
|
||||
coord, _ = haar_like_feature_coord(5, 5, 'type-4')
|
||||
image = draw_haar_like_feature(img, 0, 0, 5, 5, coord,
|
||||
max_n_features=max_n_features,
|
||||
random_state=0)
|
||||
assert image.shape == (5, 5, 3)
|
||||
assert np.count_nonzero(image) == nnz_values
|
256
venv/Lib/site-packages/skimage/feature/tests/test_hog.py
Normal file
256
venv/Lib/site-packages/skimage/feature/tests/test_hog.py
Normal file
|
@ -0,0 +1,256 @@
|
|||
import os
|
||||
import numpy as np
|
||||
from scipy import ndimage as ndi
|
||||
from skimage import color
|
||||
from skimage import data
|
||||
from skimage import feature
|
||||
from skimage import img_as_float
|
||||
from skimage import draw
|
||||
from skimage._shared.testing import assert_almost_equal, fetch
|
||||
from skimage._shared import testing
|
||||
|
||||
|
||||
def test_hog_output_size():
|
||||
img = img_as_float(data.astronaut()[:256, :].mean(axis=2))
|
||||
|
||||
fd = feature.hog(img, orientations=9, pixels_per_cell=(8, 8),
|
||||
cells_per_block=(1, 1), block_norm='L1')
|
||||
|
||||
assert len(fd) == 9 * (256 // 8) * (512 // 8)
|
||||
|
||||
|
||||
def test_hog_output_correctness_l1_norm():
|
||||
img = color.rgb2gray(data.astronaut())
|
||||
correct_output = np.load(fetch('data/astronaut_GRAY_hog_L1.npy'))
|
||||
|
||||
output = feature.hog(img, orientations=9, pixels_per_cell=(8, 8),
|
||||
cells_per_block=(3, 3), block_norm='L1',
|
||||
feature_vector=True, transform_sqrt=False,
|
||||
visualize=False)
|
||||
assert_almost_equal(output, correct_output)
|
||||
|
||||
|
||||
def test_hog_output_correctness_l2hys_norm():
|
||||
img = color.rgb2gray(data.astronaut())
|
||||
correct_output = np.load(fetch('data/astronaut_GRAY_hog_L2-Hys.npy'))
|
||||
|
||||
output = feature.hog(img, orientations=9, pixels_per_cell=(8, 8),
|
||||
cells_per_block=(3, 3), block_norm='L2-Hys',
|
||||
feature_vector=True, transform_sqrt=False,
|
||||
visualize=False)
|
||||
assert_almost_equal(output, correct_output)
|
||||
|
||||
|
||||
def test_hog_image_size_cell_size_mismatch():
|
||||
image = data.camera()[:150, :200]
|
||||
fd = feature.hog(image, orientations=9, pixels_per_cell=(8, 8),
|
||||
cells_per_block=(1, 1), block_norm='L1')
|
||||
assert len(fd) == 9 * (150 // 8) * (200 // 8)
|
||||
|
||||
|
||||
def test_hog_basic_orientations_and_data_types():
|
||||
# scenario:
|
||||
# 1) create image (with float values) where upper half is filled by
|
||||
# zeros, bottom half by 100
|
||||
# 2) create unsigned integer version of this image
|
||||
# 3) calculate feature.hog() for both images, both with 'transform_sqrt'
|
||||
# option enabled and disabled
|
||||
# 4) verify that all results are equal where expected
|
||||
# 5) verify that computed feature vector is as expected
|
||||
# 6) repeat the scenario for 90, 180 and 270 degrees rotated images
|
||||
|
||||
# size of testing image
|
||||
width = height = 35
|
||||
|
||||
image0 = np.zeros((height, width), dtype='float')
|
||||
image0[height // 2:] = 100
|
||||
|
||||
for rot in range(4):
|
||||
# rotate by 0, 90, 180 and 270 degrees
|
||||
image_float = np.rot90(image0, rot)
|
||||
|
||||
# create uint8 image from image_float
|
||||
image_uint8 = image_float.astype('uint8')
|
||||
|
||||
(hog_float, hog_img_float) = feature.hog(
|
||||
image_float, orientations=4, pixels_per_cell=(8, 8),
|
||||
cells_per_block=(1, 1), visualize=True, transform_sqrt=False,
|
||||
block_norm='L1')
|
||||
(hog_uint8, hog_img_uint8) = feature.hog(
|
||||
image_uint8, orientations=4, pixels_per_cell=(8, 8),
|
||||
cells_per_block=(1, 1), visualize=True, transform_sqrt=False,
|
||||
block_norm='L1')
|
||||
(hog_float_norm, hog_img_float_norm) = feature.hog(
|
||||
image_float, orientations=4, pixels_per_cell=(8, 8),
|
||||
cells_per_block=(1, 1), visualize=True, transform_sqrt=True,
|
||||
block_norm='L1')
|
||||
(hog_uint8_norm, hog_img_uint8_norm) = feature.hog(
|
||||
image_uint8, orientations=4, pixels_per_cell=(8, 8),
|
||||
cells_per_block=(1, 1), visualize=True, transform_sqrt=True,
|
||||
block_norm='L1')
|
||||
|
||||
# set to True to enable manual debugging with graphical output,
|
||||
# must be False for automatic testing
|
||||
if False:
|
||||
import matplotlib.pyplot as plt
|
||||
plt.figure()
|
||||
plt.subplot(2, 3, 1)
|
||||
plt.imshow(image_float)
|
||||
plt.colorbar()
|
||||
plt.title('image')
|
||||
plt.subplot(2, 3, 2)
|
||||
plt.imshow(hog_img_float)
|
||||
plt.colorbar()
|
||||
plt.title('HOG result visualisation (float img)')
|
||||
plt.subplot(2, 3, 5)
|
||||
plt.imshow(hog_img_uint8)
|
||||
plt.colorbar()
|
||||
plt.title('HOG result visualisation (uint8 img)')
|
||||
plt.subplot(2, 3, 3)
|
||||
plt.imshow(hog_img_float_norm)
|
||||
plt.colorbar()
|
||||
plt.title('HOG result (transform_sqrt) visualisation (float img)')
|
||||
plt.subplot(2, 3, 6)
|
||||
plt.imshow(hog_img_uint8_norm)
|
||||
plt.colorbar()
|
||||
plt.title('HOG result (transform_sqrt) visualisation (uint8 img)')
|
||||
plt.show()
|
||||
|
||||
# results (features and visualisation) for float and uint8 images must
|
||||
# be almost equal
|
||||
assert_almost_equal(hog_float, hog_uint8)
|
||||
assert_almost_equal(hog_img_float, hog_img_uint8)
|
||||
|
||||
# resulting features should be almost equal
|
||||
# when 'transform_sqrt' is enabled
|
||||
# or disabled (for current simple testing image)
|
||||
assert_almost_equal(hog_float, hog_float_norm, decimal=4)
|
||||
assert_almost_equal(hog_float, hog_uint8_norm, decimal=4)
|
||||
|
||||
# reshape resulting feature vector to matrix with 4 columns (each
|
||||
# corresponding to one of 4 directions); only one direction should
|
||||
# contain nonzero values (this is manually determined for testing
|
||||
# image)
|
||||
actual = np.max(hog_float.reshape(-1, 4), axis=0)
|
||||
|
||||
if rot in [0, 2]:
|
||||
# image is rotated by 0 and 180 degrees
|
||||
desired = [0, 0, 1, 0]
|
||||
elif rot in [1, 3]:
|
||||
# image is rotated by 90 and 270 degrees
|
||||
desired = [1, 0, 0, 0]
|
||||
else:
|
||||
raise Exception('Result is not determined for this rotation.')
|
||||
|
||||
assert_almost_equal(actual, desired, decimal=2)
|
||||
|
||||
|
||||
def test_hog_orientations_circle():
|
||||
# scenario:
|
||||
# 1) create image with blurred circle in the middle
|
||||
# 2) calculate feature.hog()
|
||||
# 3) verify that the resulting feature vector contains uniformly
|
||||
# distributed values for all orientations, i.e. no orientation is
|
||||
# lost or emphasized
|
||||
# 4) repeat the scenario for other 'orientations' option
|
||||
|
||||
# size of testing image
|
||||
width = height = 100
|
||||
|
||||
image = np.zeros((height, width))
|
||||
rr, cc = draw.disk((int(height / 2), int(width / 2)), int(width / 3))
|
||||
image[rr, cc] = 100
|
||||
image = ndi.gaussian_filter(image, 2)
|
||||
|
||||
for orientations in range(2, 15):
|
||||
(hog, hog_img) = feature.hog(image, orientations=orientations,
|
||||
pixels_per_cell=(8, 8),
|
||||
cells_per_block=(1, 1), visualize=True,
|
||||
transform_sqrt=False,
|
||||
block_norm='L1')
|
||||
|
||||
# set to True to enable manual debugging with graphical output,
|
||||
# must be False for automatic testing
|
||||
if False:
|
||||
import matplotlib.pyplot as plt
|
||||
plt.figure()
|
||||
plt.subplot(1, 2, 1)
|
||||
plt.imshow(image)
|
||||
plt.colorbar()
|
||||
plt.title('image_float')
|
||||
plt.subplot(1, 2, 2)
|
||||
plt.imshow(hog_img)
|
||||
plt.colorbar()
|
||||
plt.title('HOG result visualisation, '
|
||||
'orientations=%d' % (orientations))
|
||||
plt.show()
|
||||
|
||||
# reshape resulting feature vector to matrix with N columns (each
|
||||
# column corresponds to one direction),
|
||||
hog_matrix = hog.reshape(-1, orientations)
|
||||
|
||||
# compute mean values in the resulting feature vector for each
|
||||
# direction, these values should be almost equal to the global mean
|
||||
# value (since the image contains a circle), i.e., all directions have
|
||||
# same contribution to the result
|
||||
actual = np.mean(hog_matrix, axis=0)
|
||||
desired = np.mean(hog_matrix)
|
||||
assert_almost_equal(actual, desired, decimal=1)
|
||||
|
||||
|
||||
def test_hog_visualization_orientation():
|
||||
"""Test that the visualization produces a line with correct orientation
|
||||
|
||||
The hog visualization is expected to draw line segments perpendicular to
|
||||
the midpoints of orientation bins. This example verifies that when
|
||||
orientations=3 and the gradient is entirely in the middle bin (bisected
|
||||
by the y-axis), the line segment drawn by the visualization is horizontal.
|
||||
"""
|
||||
|
||||
width = height = 11
|
||||
|
||||
image = np.zeros((height, width), dtype='float')
|
||||
image[height // 2:] = 1
|
||||
|
||||
_, hog_image = feature.hog(
|
||||
image,
|
||||
orientations=3,
|
||||
pixels_per_cell=(width, height),
|
||||
cells_per_block=(1, 1),
|
||||
visualize=True,
|
||||
block_norm='L1'
|
||||
)
|
||||
|
||||
middle_index = height // 2
|
||||
indices_excluding_middle = [x for x in range(height) if x != middle_index]
|
||||
|
||||
assert (hog_image[indices_excluding_middle, :] == 0).all()
|
||||
assert (hog_image[middle_index, 1:-1] > 0).all()
|
||||
|
||||
|
||||
def test_hog_block_normalization_incorrect_error():
|
||||
img = np.eye(4)
|
||||
with testing.raises(ValueError):
|
||||
feature.hog(img, block_norm='Linf')
|
||||
|
||||
|
||||
@testing.parametrize("shape,multichannel", [
|
||||
((3, 3, 3), False),
|
||||
((3, 3), True),
|
||||
((3, 3, 3, 3), True),
|
||||
])
|
||||
def test_hog_incorrect_dimensions(shape, multichannel):
|
||||
img = np.zeros(shape)
|
||||
with testing.raises(ValueError):
|
||||
feature.hog(img, multichannel=multichannel, block_norm='L1')
|
||||
|
||||
|
||||
def test_hog_output_equivariance_multichannel():
|
||||
img = data.astronaut()
|
||||
img[:, :, (1, 2)] = 0
|
||||
hog_ref = feature.hog(img, multichannel=True, block_norm='L1')
|
||||
|
||||
for n in (1, 2):
|
||||
hog_fact = feature.hog(np.roll(img, n, axis=2), multichannel=True,
|
||||
block_norm='L1')
|
||||
assert_almost_equal(hog_ref, hog_fact)
|
179
venv/Lib/site-packages/skimage/feature/tests/test_match.py
Normal file
179
venv/Lib/site-packages/skimage/feature/tests/test_match.py
Normal file
|
@ -0,0 +1,179 @@
|
|||
import numpy as np
|
||||
from skimage._shared.testing import assert_equal
|
||||
from skimage import data
|
||||
from skimage import transform
|
||||
from skimage.color import rgb2gray
|
||||
from skimage.feature import (BRIEF, match_descriptors,
|
||||
corner_peaks, corner_harris)
|
||||
from skimage._shared import testing
|
||||
|
||||
|
||||
def test_binary_descriptors_unequal_descriptor_sizes_error():
|
||||
"""Sizes of descriptors of keypoints to be matched should be equal."""
|
||||
descs1 = np.array([[True, True, False, True],
|
||||
[False, True, False, True]])
|
||||
descs2 = np.array([[True, False, False, True, False],
|
||||
[False, True, True, True, False]])
|
||||
with testing.raises(ValueError):
|
||||
match_descriptors(descs1, descs2)
|
||||
|
||||
|
||||
def test_binary_descriptors():
|
||||
descs1 = np.array([[True, True, False, True, True],
|
||||
[False, True, False, True, True]])
|
||||
descs2 = np.array([[True, False, False, True, False],
|
||||
[False, False, True, True, True]])
|
||||
matches = match_descriptors(descs1, descs2)
|
||||
assert_equal(matches, [[0, 0], [1, 1]])
|
||||
|
||||
|
||||
def test_binary_descriptors_rotation_crosscheck_false():
|
||||
"""Verify matched keypoints and their corresponding masks results between
|
||||
image and its rotated version with the expected keypoint pairs with
|
||||
cross_check disabled."""
|
||||
img = data.astronaut()
|
||||
img = rgb2gray(img)
|
||||
tform = transform.SimilarityTransform(scale=1, rotation=0.15, translation=(0, 0))
|
||||
rotated_img = transform.warp(img, tform, clip=False)
|
||||
|
||||
extractor = BRIEF(descriptor_size=512)
|
||||
|
||||
keypoints1 = corner_peaks(corner_harris(img), min_distance=5,
|
||||
threshold_abs=0, threshold_rel=0.1)
|
||||
extractor.extract(img, keypoints1)
|
||||
descriptors1 = extractor.descriptors
|
||||
|
||||
keypoints2 = corner_peaks(corner_harris(rotated_img), min_distance=5,
|
||||
threshold_abs=0, threshold_rel=0.1)
|
||||
extractor.extract(rotated_img, keypoints2)
|
||||
descriptors2 = extractor.descriptors
|
||||
|
||||
matches = match_descriptors(descriptors1, descriptors2, cross_check=False)
|
||||
|
||||
exp_matches1 = np.arange(47)
|
||||
exp_matches2 = np.array([0, 2, 1, 3, 4, 5, 7, 8, 14, 9, 11, 13,
|
||||
23, 15, 16, 22, 17, 19, 34, 18, 24, 27,
|
||||
30, 25, 26, 32, 28, 35, 37, 42, 29, 38,
|
||||
33, 40, 36, 3, 10, 32, 43, 15, 29, 41,
|
||||
1, 18, 32, 24, 11])
|
||||
|
||||
assert_equal(matches[:, 0], exp_matches1)
|
||||
assert_equal(matches[:, 1], exp_matches2)
|
||||
|
||||
# minkowski takes a different code path, therefore we test it explicitly
|
||||
matches = match_descriptors(descriptors1, descriptors2,
|
||||
metric='minkowski', cross_check=False)
|
||||
assert_equal(matches[:, 0], exp_matches1)
|
||||
assert_equal(matches[:, 1], exp_matches2)
|
||||
|
||||
# it also has an extra parameter
|
||||
matches = match_descriptors(descriptors1, descriptors2,
|
||||
metric='minkowski', p=4, cross_check=False)
|
||||
assert_equal(matches[:, 0], exp_matches1)
|
||||
assert_equal(matches[:, 1], exp_matches2)
|
||||
|
||||
|
||||
def test_binary_descriptors_rotation_crosscheck_true():
|
||||
"""Verify matched keypoints and their corresponding masks results between
|
||||
image and its rotated version with the expected keypoint pairs with
|
||||
cross_check enabled."""
|
||||
img = data.astronaut()
|
||||
img = rgb2gray(img)
|
||||
tform = transform.SimilarityTransform(scale=1, rotation=0.15, translation=(0, 0))
|
||||
rotated_img = transform.warp(img, tform, clip=False)
|
||||
|
||||
extractor = BRIEF(descriptor_size=512)
|
||||
|
||||
keypoints1 = corner_peaks(corner_harris(img), min_distance=5,
|
||||
threshold_abs=0, threshold_rel=0.1)
|
||||
extractor.extract(img, keypoints1)
|
||||
descriptors1 = extractor.descriptors
|
||||
|
||||
keypoints2 = corner_peaks(corner_harris(rotated_img), min_distance=5,
|
||||
threshold_abs=0, threshold_rel=0.1)
|
||||
extractor.extract(rotated_img, keypoints2)
|
||||
descriptors2 = extractor.descriptors
|
||||
|
||||
matches = match_descriptors(descriptors1, descriptors2, cross_check=True)
|
||||
|
||||
exp_matches1 = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
|
||||
13, 14, 15, 16, 17, 19, 20, 21, 22, 23,
|
||||
24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
|
||||
34, 38, 41])
|
||||
exp_matches2 = np.array([0, 2, 1, 3, 4, 5, 7, 8, 14, 9, 11, 13,
|
||||
23, 15, 16, 22, 17, 19, 18, 24, 27, 30,
|
||||
25, 26, 32, 28, 35, 37, 42, 29, 38, 33,
|
||||
40, 36, 43, 41])
|
||||
assert_equal(matches[:, 0], exp_matches1)
|
||||
assert_equal(matches[:, 1], exp_matches2)
|
||||
|
||||
|
||||
def test_max_distance():
|
||||
descs1 = np.zeros((10, 128))
|
||||
descs2 = np.zeros((15, 128))
|
||||
|
||||
descs1[0, :] = 1
|
||||
|
||||
matches = match_descriptors(descs1, descs2, metric='euclidean',
|
||||
max_distance=0.1, cross_check=False)
|
||||
assert len(matches) == 9
|
||||
|
||||
matches = match_descriptors(descs1, descs2, metric='euclidean',
|
||||
max_distance=np.sqrt(128.1),
|
||||
cross_check=False)
|
||||
assert len(matches) == 10
|
||||
|
||||
matches = match_descriptors(descs1, descs2, metric='euclidean',
|
||||
max_distance=0.1,
|
||||
cross_check=True)
|
||||
assert_equal(matches, [[1, 0]])
|
||||
|
||||
matches = match_descriptors(descs1, descs2, metric='euclidean',
|
||||
max_distance=np.sqrt(128.1),
|
||||
cross_check=True)
|
||||
assert_equal(matches, [[1, 0]])
|
||||
|
||||
|
||||
def test_max_ratio():
|
||||
descs1 = 10 * np.arange(10)[:, None].astype(np.float32)
|
||||
descs2 = 10 * np.arange(15)[:, None].astype(np.float32)
|
||||
|
||||
descs2[0] = 5.0
|
||||
|
||||
matches = match_descriptors(descs1, descs2, metric='euclidean',
|
||||
max_ratio=1.0, cross_check=False)
|
||||
assert_equal(len(matches), 10)
|
||||
|
||||
matches = match_descriptors(descs1, descs2, metric='euclidean',
|
||||
max_ratio=0.6, cross_check=False)
|
||||
assert_equal(len(matches), 10)
|
||||
|
||||
matches = match_descriptors(descs1, descs2, metric='euclidean',
|
||||
max_ratio=0.5, cross_check=False)
|
||||
assert_equal(len(matches), 9)
|
||||
|
||||
descs1[0] = 7.5
|
||||
|
||||
matches = match_descriptors(descs1, descs2, metric='euclidean',
|
||||
max_ratio=0.5, cross_check=False)
|
||||
assert_equal(len(matches), 9)
|
||||
|
||||
descs2 = 10 * np.arange(1)[:, None].astype(np.float32)
|
||||
|
||||
matches = match_descriptors(descs1, descs2, metric='euclidean',
|
||||
max_ratio=1.0, cross_check=False)
|
||||
assert_equal(len(matches), 10)
|
||||
|
||||
matches = match_descriptors(descs1, descs2, metric='euclidean',
|
||||
max_ratio=0.5, cross_check=False)
|
||||
assert_equal(len(matches), 10)
|
||||
|
||||
descs1 = 10 * np.arange(1)[:, None].astype(np.float32)
|
||||
|
||||
matches = match_descriptors(descs1, descs2, metric='euclidean',
|
||||
max_ratio=1.0, cross_check=False)
|
||||
assert_equal(len(matches), 1)
|
||||
|
||||
matches = match_descriptors(descs1, descs2, metric='euclidean',
|
||||
max_ratio=0.5, cross_check=False)
|
||||
assert_equal(len(matches), 1)
|
131
venv/Lib/site-packages/skimage/feature/tests/test_orb.py
Normal file
131
venv/Lib/site-packages/skimage/feature/tests/test_orb.py
Normal file
|
@ -0,0 +1,131 @@
|
|||
import pytest
|
||||
import numpy as np
|
||||
|
||||
from skimage._shared.testing import assert_equal, assert_almost_equal
|
||||
from skimage.feature import ORB
|
||||
from skimage._shared import testing
|
||||
from skimage import data
|
||||
from skimage._shared.testing import test_parallel, xfail, arch32
|
||||
from skimage.util.dtype import _convert
|
||||
|
||||
|
||||
img = data.coins()
|
||||
|
||||
|
||||
@test_parallel()
|
||||
@pytest.mark.parametrize('dtype', ['float32', 'float64', 'uint8',
|
||||
'uint16', 'int64'])
|
||||
def test_keypoints_orb_desired_no_of_keypoints(dtype):
|
||||
_img = _convert(img, dtype)
|
||||
detector_extractor = ORB(n_keypoints=10, fast_n=12, fast_threshold=0.20)
|
||||
detector_extractor.detect(_img)
|
||||
|
||||
exp_rows = np.array([141., 108., 214.56, 131., 214.272, 67.,
|
||||
206., 177., 108., 141.])
|
||||
exp_cols = np.array([323., 328., 282.24, 292., 281.664, 85.,
|
||||
260., 284., 328.8, 267.])
|
||||
|
||||
exp_scales = np.array([1, 1, 1.44, 1, 1.728, 1, 1, 1, 1.2, 1])
|
||||
|
||||
exp_orientations = np.array([-53.97446153, 59.5055285, -96.01885186,
|
||||
-149.70789506, -94.70171899, -45.76429535,
|
||||
-51.49752849, 113.57081195, 63.30428063,
|
||||
-79.56091118])
|
||||
exp_response = np.array([1.01168357, 0.82934145, 0.67784179, 0.57176438,
|
||||
0.56637459, 0.52248355, 0.43696175, 0.42992376,
|
||||
0.37700486, 0.36126832])
|
||||
|
||||
assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0])
|
||||
assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
|
||||
assert_almost_equal(exp_scales, detector_extractor.scales)
|
||||
assert_almost_equal(exp_response, detector_extractor.responses, 5)
|
||||
assert_almost_equal(exp_orientations,
|
||||
np.rad2deg(detector_extractor.orientations), 4)
|
||||
|
||||
detector_extractor.detect_and_extract(img)
|
||||
assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0])
|
||||
assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
|
||||
|
||||
|
||||
@pytest.mark.parametrize('dtype', ['float32', 'float64', 'uint8',
|
||||
'uint16', 'int64'])
|
||||
def test_keypoints_orb_less_than_desired_no_of_keypoints(dtype):
|
||||
_img = _convert(img, dtype)
|
||||
detector_extractor = ORB(n_keypoints=15, fast_n=12,
|
||||
fast_threshold=0.33, downscale=2, n_scales=2)
|
||||
detector_extractor.detect(_img)
|
||||
|
||||
exp_rows = np.array([108., 203., 140., 65., 58.])
|
||||
exp_cols = np.array([293., 267., 202., 130., 291.])
|
||||
|
||||
exp_scales = np.array([1., 1., 1., 1., 1.])
|
||||
|
||||
exp_orientations = np.array([151.93906, -56.90052, -79.46341,
|
||||
-59.42996, -158.26941])
|
||||
|
||||
exp_response = np.array([-0.1764169, 0.2652126, -0.0324343,
|
||||
0.0400902, 0.2667641])
|
||||
|
||||
assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0])
|
||||
assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
|
||||
assert_almost_equal(exp_scales, detector_extractor.scales)
|
||||
assert_almost_equal(exp_response, detector_extractor.responses)
|
||||
assert_almost_equal(exp_orientations,
|
||||
np.rad2deg(detector_extractor.orientations), 3)
|
||||
|
||||
detector_extractor.detect_and_extract(img)
|
||||
assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0])
|
||||
assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
|
||||
|
||||
|
||||
@xfail(condition=arch32,
|
||||
reason=('Known test failure on 32-bit platforms. See links for '
|
||||
'details: '
|
||||
'https://github.com/scikit-image/scikit-image/issues/3091 '
|
||||
'https://github.com/scikit-image/scikit-image/issues/2529'))
|
||||
def test_descriptor_orb():
|
||||
detector_extractor = ORB(fast_n=12, fast_threshold=0.20)
|
||||
exp_descriptors = np.array([[0, 0, 0, 1, 0, 0, 0, 1, 0, 1],
|
||||
[1, 1, 0, 1, 0, 0, 0, 1, 0, 1],
|
||||
[1, 1, 0, 0, 1, 0, 0, 0, 1, 1],
|
||||
[1, 1, 1, 0, 0, 0, 1, 1, 1, 0],
|
||||
[0, 0, 0, 1, 0, 1, 1, 1, 1, 1],
|
||||
[1, 0, 0, 1, 1, 0, 0, 0, 1, 0],
|
||||
[0, 1, 1, 1, 1, 1, 1, 1, 1, 0],
|
||||
[1, 1, 1, 0, 1, 1, 1, 1, 0, 0],
|
||||
[1, 1, 1, 1, 0, 0, 0, 1, 1, 1],
|
||||
[0, 1, 1, 0, 0, 1, 1, 0, 1, 1],
|
||||
[1, 1, 0, 0, 0, 0, 0, 0, 1, 1],
|
||||
[1, 0, 0, 0, 0, 1, 0, 1, 1, 1],
|
||||
[1, 0, 1, 1, 1, 0, 1, 0, 1, 0],
|
||||
[0, 0, 1, 1, 0, 0, 0, 0, 1, 1],
|
||||
[0, 1, 1, 0, 0, 0, 1, 0, 0, 1],
|
||||
[0, 1, 1, 0, 0, 0, 1, 1, 1, 1],
|
||||
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
|
||||
[0, 0, 1, 1, 1, 1, 0, 1, 1, 0],
|
||||
[0, 0, 1, 1, 1, 0, 1, 0, 0, 1],
|
||||
[0, 1, 0, 0, 0, 0, 0, 0, 1, 0]], dtype=bool)
|
||||
|
||||
detector_extractor.detect(img)
|
||||
detector_extractor.extract(img, detector_extractor.keypoints,
|
||||
detector_extractor.scales,
|
||||
detector_extractor.orientations)
|
||||
|
||||
assert_equal(exp_descriptors,
|
||||
detector_extractor.descriptors[100:120, 10:20])
|
||||
|
||||
detector_extractor.detect_and_extract(img)
|
||||
assert_equal(exp_descriptors,
|
||||
detector_extractor.descriptors[100:120, 10:20])
|
||||
keypoints_count = detector_extractor.keypoints.shape[0]
|
||||
assert keypoints_count == detector_extractor.descriptors.shape[0]
|
||||
assert keypoints_count == detector_extractor.orientations.shape[0]
|
||||
assert keypoints_count == detector_extractor.responses.shape[0]
|
||||
assert keypoints_count == detector_extractor.scales.shape[0]
|
||||
|
||||
|
||||
def test_no_descriptors_extracted_orb():
|
||||
img = np.ones((128, 128))
|
||||
detector_extractor = ORB()
|
||||
with testing.raises(RuntimeError):
|
||||
detector_extractor.detect_and_extract(img)
|
537
venv/Lib/site-packages/skimage/feature/tests/test_peak.py
Normal file
537
venv/Lib/site-packages/skimage/feature/tests/test_peak.py
Normal file
|
@ -0,0 +1,537 @@
|
|||
import itertools
|
||||
import numpy as np
|
||||
import pytest
|
||||
import unittest
|
||||
from skimage._shared.testing import assert_array_almost_equal
|
||||
from skimage._shared.testing import assert_equal
|
||||
from scipy import ndimage as ndi
|
||||
from skimage.feature import peak
|
||||
|
||||
|
||||
np.random.seed(21)
|
||||
|
||||
class TestPeakLocalMax():
|
||||
def test_trivial_case(self):
|
||||
trivial = np.zeros((25, 25))
|
||||
peak_indices = peak.peak_local_max(trivial, min_distance=1, indices=True)
|
||||
assert type(peak_indices) is np.ndarray
|
||||
assert peak_indices.size == 0
|
||||
peaks = peak.peak_local_max(trivial, min_distance=1, indices=False)
|
||||
assert (peaks.astype(np.bool) == trivial).all()
|
||||
|
||||
def test_noisy_peaks(self):
|
||||
peak_locations = [(7, 7), (7, 13), (13, 7), (13, 13)]
|
||||
|
||||
# image with noise of amplitude 0.8 and peaks of amplitude 1
|
||||
image = 0.8 * np.random.rand(20, 20)
|
||||
for r, c in peak_locations:
|
||||
image[r, c] = 1
|
||||
|
||||
peaks_detected = peak.peak_local_max(image, min_distance=5)
|
||||
|
||||
assert len(peaks_detected) == len(peak_locations)
|
||||
for loc in peaks_detected:
|
||||
assert tuple(loc) in peak_locations
|
||||
|
||||
def test_relative_threshold(self):
|
||||
image = np.zeros((5, 5), dtype=np.uint8)
|
||||
image[1, 1] = 10
|
||||
image[3, 3] = 20
|
||||
peaks = peak.peak_local_max(image, min_distance=1, threshold_rel=0.5)
|
||||
assert len(peaks) == 1
|
||||
assert_array_almost_equal(peaks, [(3, 3)])
|
||||
|
||||
def test_absolute_threshold(self):
|
||||
image = np.zeros((5, 5), dtype=np.uint8)
|
||||
image[1, 1] = 10
|
||||
image[3, 3] = 20
|
||||
peaks = peak.peak_local_max(image, min_distance=1, threshold_abs=10)
|
||||
assert len(peaks) == 1
|
||||
assert_array_almost_equal(peaks, [(3, 3)])
|
||||
|
||||
def test_constant_image(self):
|
||||
image = np.full((20, 20), 128, dtype=np.uint8)
|
||||
peaks = peak.peak_local_max(image, min_distance=1)
|
||||
assert len(peaks) == 0
|
||||
|
||||
def test_flat_peak(self):
|
||||
image = np.zeros((5, 5), dtype=np.uint8)
|
||||
image[1:3, 1:3] = 10
|
||||
peaks = peak.peak_local_max(image, min_distance=1)
|
||||
assert len(peaks) == 4
|
||||
|
||||
def test_sorted_peaks(self):
|
||||
image = np.zeros((5, 5), dtype=np.uint8)
|
||||
image[1, 1] = 20
|
||||
image[3, 3] = 10
|
||||
peaks = peak.peak_local_max(image, min_distance=1)
|
||||
assert peaks.tolist() == [[1, 1], [3, 3]]
|
||||
|
||||
image = np.zeros((3, 10))
|
||||
image[1, (1, 3, 5, 7)] = (1, 2, 3, 4)
|
||||
peaks = peak.peak_local_max(image, min_distance=1)
|
||||
assert peaks.tolist() == [[1, 7], [1, 5], [1, 3], [1, 1]]
|
||||
|
||||
def test_num_peaks(self):
|
||||
image = np.zeros((7, 7), dtype=np.uint8)
|
||||
image[1, 1] = 10
|
||||
image[1, 3] = 11
|
||||
image[1, 5] = 12
|
||||
image[3, 5] = 8
|
||||
image[5, 3] = 7
|
||||
assert len(peak.peak_local_max(image, min_distance=1, threshold_abs=0)) == 5
|
||||
peaks_limited = peak.peak_local_max(
|
||||
image, min_distance=1, threshold_abs=0, num_peaks=2)
|
||||
assert len(peaks_limited) == 2
|
||||
assert (1, 3) in peaks_limited
|
||||
assert (1, 5) in peaks_limited
|
||||
peaks_limited = peak.peak_local_max(
|
||||
image, min_distance=1, threshold_abs=0, num_peaks=4)
|
||||
assert len(peaks_limited) == 4
|
||||
assert (1, 3) in peaks_limited
|
||||
assert (1, 5) in peaks_limited
|
||||
assert (1, 1) in peaks_limited
|
||||
assert (3, 5) in peaks_limited
|
||||
|
||||
def test_num_peaks_and_labels(self):
|
||||
image = np.zeros((7, 7), dtype=np.uint8)
|
||||
labels = np.zeros((7, 7), dtype=np.uint8) + 20
|
||||
image[1, 1] = 10
|
||||
image[1, 3] = 11
|
||||
image[1, 5] = 12
|
||||
image[3, 5] = 8
|
||||
image[5, 3] = 7
|
||||
peaks_limited = peak.peak_local_max(
|
||||
image, min_distance=1, threshold_abs=0, labels=labels)
|
||||
assert len(peaks_limited) == 5
|
||||
peaks_limited = peak.peak_local_max(
|
||||
image, min_distance=1, threshold_abs=0, labels=labels, num_peaks=2)
|
||||
assert len(peaks_limited) == 2
|
||||
|
||||
|
||||
def test_num_peaks_tot_vs_labels_4quadrants(self):
|
||||
np.random.seed(21)
|
||||
image = np.random.uniform(size=(20, 30))
|
||||
i, j = np.mgrid[0:20, 0:30]
|
||||
labels = 1 + (i >= 10) + (j >= 15) * 2
|
||||
result = peak.peak_local_max(image, labels=labels,
|
||||
min_distance=1, threshold_rel=0,
|
||||
indices=True,
|
||||
num_peaks=np.inf,
|
||||
num_peaks_per_label=2)
|
||||
assert len(result) == 8
|
||||
result = peak.peak_local_max(image, labels=labels,
|
||||
min_distance=1, threshold_rel=0,
|
||||
indices=True,
|
||||
num_peaks=np.inf,
|
||||
num_peaks_per_label=1)
|
||||
assert len(result) == 4
|
||||
result = peak.peak_local_max(image, labels=labels,
|
||||
min_distance=1, threshold_rel=0,
|
||||
indices=True,
|
||||
num_peaks=2,
|
||||
num_peaks_per_label=2)
|
||||
assert len(result) == 2
|
||||
|
||||
|
||||
def test_num_peaks3D(self):
|
||||
# Issue 1354: the old code only hold for 2D arrays
|
||||
# and this code would die with IndexError
|
||||
image = np.zeros((10, 10, 100))
|
||||
image[5,5,::5] = np.arange(20)
|
||||
peaks_limited = peak.peak_local_max(image, min_distance=1, num_peaks=2)
|
||||
assert len(peaks_limited) == 2
|
||||
|
||||
def test_reorder_labels(self):
|
||||
image = np.random.uniform(size=(40, 60))
|
||||
i, j = np.mgrid[0:40, 0:60]
|
||||
labels = 1 + (i >= 20) + (j >= 30) * 2
|
||||
labels[labels == 4] = 5
|
||||
i, j = np.mgrid[-3:4, -3:4]
|
||||
footprint = (i * i + j * j <= 9)
|
||||
expected = np.zeros(image.shape, float)
|
||||
for imin, imax in ((0, 20), (20, 40)):
|
||||
for jmin, jmax in ((0, 30), (30, 60)):
|
||||
expected[imin:imax, jmin:jmax] = ndi.maximum_filter(
|
||||
image[imin:imax, jmin:jmax], footprint=footprint)
|
||||
expected = (expected == image)
|
||||
result = peak.peak_local_max(image, labels=labels, min_distance=1,
|
||||
threshold_rel=0, footprint=footprint,
|
||||
indices=False, exclude_border=False)
|
||||
assert (result == expected).all()
|
||||
|
||||
def test_indices_with_labels(self):
|
||||
image = np.random.uniform(size=(40, 60))
|
||||
i, j = np.mgrid[0:40, 0:60]
|
||||
labels = 1 + (i >= 20) + (j >= 30) * 2
|
||||
i, j = np.mgrid[-3:4, -3:4]
|
||||
footprint = (i * i + j * j <= 9)
|
||||
expected = np.zeros(image.shape, float)
|
||||
for imin, imax in ((0, 20), (20, 40)):
|
||||
for jmin, jmax in ((0, 30), (30, 60)):
|
||||
expected[imin:imax, jmin:jmax] = ndi.maximum_filter(
|
||||
image[imin:imax, jmin:jmax], footprint=footprint)
|
||||
expected = np.transpose(np.nonzero(expected == image))
|
||||
expected = expected[np.argsort(image[tuple(expected.T)])[::-1]]
|
||||
result = peak.peak_local_max(image, labels=labels, min_distance=1,
|
||||
threshold_rel=0, footprint=footprint,
|
||||
indices=True, exclude_border=False)
|
||||
result = result[np.argsort(image[tuple(result.T)])[::-1]]
|
||||
assert (result == expected).all()
|
||||
|
||||
def test_ndarray_indices_false(self):
|
||||
nd_image = np.zeros((5, 5, 5))
|
||||
nd_image[2, 2, 2] = 1
|
||||
peaks = peak.peak_local_max(nd_image, min_distance=1, indices=False)
|
||||
assert (peaks == nd_image.astype(np.bool)).all()
|
||||
|
||||
def test_ndarray_exclude_border(self):
|
||||
nd_image = np.zeros((5, 5, 5))
|
||||
nd_image[[1, 0, 0], [0, 1, 0], [0, 0, 1]] = 1
|
||||
nd_image[3, 0, 0] = 1
|
||||
nd_image[2, 2, 2] = 1
|
||||
expected = np.zeros_like(nd_image, dtype=np.bool)
|
||||
expected[2, 2, 2] = True
|
||||
expectedNoBorder = nd_image > 0
|
||||
result = peak.peak_local_max(nd_image, min_distance=2,
|
||||
exclude_border=2, indices=False)
|
||||
assert_equal(result, expected)
|
||||
# Check that bools work as expected
|
||||
assert_equal(
|
||||
peak.peak_local_max(nd_image, min_distance=2,
|
||||
exclude_border=2, indices=False),
|
||||
peak.peak_local_max(nd_image, min_distance=2,
|
||||
exclude_border=True, indices=False)
|
||||
)
|
||||
assert_equal(
|
||||
peak.peak_local_max(nd_image, min_distance=2,
|
||||
exclude_border=0, indices=False),
|
||||
peak.peak_local_max(nd_image, min_distance=2,
|
||||
exclude_border=False, indices=False)
|
||||
)
|
||||
# Check both versions with no border
|
||||
assert_equal(
|
||||
peak.peak_local_max(nd_image, min_distance=2,
|
||||
exclude_border=0, indices=False),
|
||||
expectedNoBorder,
|
||||
)
|
||||
assert_equal(
|
||||
peak.peak_local_max(nd_image,
|
||||
exclude_border=False, indices=False),
|
||||
expectedNoBorder,
|
||||
)
|
||||
|
||||
def test_empty(self):
|
||||
image = np.zeros((10, 20))
|
||||
labels = np.zeros((10, 20), int)
|
||||
result = peak.peak_local_max(image, labels=labels,
|
||||
footprint=np.ones((3, 3), bool),
|
||||
min_distance=1, threshold_rel=0,
|
||||
indices=False, exclude_border=False)
|
||||
assert np.all(~ result)
|
||||
|
||||
def test_empty_non2d_indices(self):
|
||||
image = np.zeros((10, 10, 10))
|
||||
result = peak.peak_local_max(image,
|
||||
footprint=np.ones((3, 3), bool),
|
||||
min_distance=1, threshold_rel=0,
|
||||
indices=True, exclude_border=False)
|
||||
assert result.shape == (0, image.ndim)
|
||||
|
||||
def test_one_point(self):
|
||||
image = np.zeros((10, 20))
|
||||
labels = np.zeros((10, 20), int)
|
||||
image[5, 5] = 1
|
||||
labels[5, 5] = 1
|
||||
result = peak.peak_local_max(image, labels=labels,
|
||||
footprint=np.ones((3, 3), bool),
|
||||
min_distance=1, threshold_rel=0,
|
||||
indices=False, exclude_border=False)
|
||||
assert np.all(result == (labels == 1))
|
||||
|
||||
def test_adjacent_and_same(self):
|
||||
image = np.zeros((10, 20))
|
||||
labels = np.zeros((10, 20), int)
|
||||
image[5, 5:6] = 1
|
||||
labels[5, 5:6] = 1
|
||||
result = peak.peak_local_max(image, labels=labels,
|
||||
footprint=np.ones((3, 3), bool),
|
||||
min_distance=1, threshold_rel=0,
|
||||
indices=False, exclude_border=False)
|
||||
assert np.all(result == (labels == 1))
|
||||
|
||||
def test_adjacent_and_different(self):
|
||||
image = np.zeros((10, 20))
|
||||
labels = np.zeros((10, 20), int)
|
||||
image[5, 5] = 1
|
||||
image[5, 6] = .5
|
||||
labels[5, 5:6] = 1
|
||||
expected = (image == 1)
|
||||
result = peak.peak_local_max(image, labels=labels,
|
||||
footprint=np.ones((3, 3), bool),
|
||||
min_distance=1, threshold_rel=0,
|
||||
indices=False, exclude_border=False)
|
||||
assert np.all(result == expected)
|
||||
result = peak.peak_local_max(image, labels=labels,
|
||||
min_distance=1, threshold_rel=0,
|
||||
indices=False, exclude_border=False)
|
||||
assert np.all(result == expected)
|
||||
|
||||
def test_not_adjacent_and_different(self):
|
||||
image = np.zeros((10, 20))
|
||||
labels = np.zeros((10, 20), int)
|
||||
image[5, 5] = 1
|
||||
image[5, 8] = .5
|
||||
labels[image > 0] = 1
|
||||
expected = (labels == 1)
|
||||
result = peak.peak_local_max(image, labels=labels,
|
||||
footprint=np.ones((3, 3), bool),
|
||||
min_distance=1, threshold_rel=0,
|
||||
indices=False, exclude_border=False)
|
||||
assert np.all(result == expected)
|
||||
|
||||
def test_two_objects(self):
|
||||
image = np.zeros((10, 20))
|
||||
labels = np.zeros((10, 20), int)
|
||||
image[5, 5] = 1
|
||||
image[5, 15] = .5
|
||||
labels[5, 5] = 1
|
||||
labels[5, 15] = 2
|
||||
expected = (labels > 0)
|
||||
result = peak.peak_local_max(image, labels=labels,
|
||||
footprint=np.ones((3, 3), bool),
|
||||
min_distance=1, threshold_rel=0,
|
||||
indices=False, exclude_border=False)
|
||||
assert np.all(result == expected)
|
||||
|
||||
def test_adjacent_different_objects(self):
|
||||
image = np.zeros((10, 20))
|
||||
labels = np.zeros((10, 20), int)
|
||||
image[5, 5] = 1
|
||||
image[5, 6] = .5
|
||||
labels[5, 5] = 1
|
||||
labels[5, 6] = 2
|
||||
expected = (labels > 0)
|
||||
result = peak.peak_local_max(image, labels=labels,
|
||||
footprint=np.ones((3, 3), bool),
|
||||
min_distance=1, threshold_rel=0,
|
||||
indices=False, exclude_border=False)
|
||||
assert np.all(result == expected)
|
||||
|
||||
def test_four_quadrants(self):
|
||||
image = np.random.uniform(size=(20, 30))
|
||||
i, j = np.mgrid[0:20, 0:30]
|
||||
labels = 1 + (i >= 10) + (j >= 15) * 2
|
||||
i, j = np.mgrid[-3:4, -3:4]
|
||||
footprint = (i * i + j * j <= 9)
|
||||
expected = np.zeros(image.shape, float)
|
||||
for imin, imax in ((0, 10), (10, 20)):
|
||||
for jmin, jmax in ((0, 15), (15, 30)):
|
||||
expected[imin:imax, jmin:jmax] = ndi.maximum_filter(
|
||||
image[imin:imax, jmin:jmax], footprint=footprint)
|
||||
expected = (expected == image)
|
||||
result = peak.peak_local_max(image, labels=labels, footprint=footprint,
|
||||
min_distance=1, threshold_rel=0,
|
||||
indices=False, exclude_border=False)
|
||||
assert np.all(result == expected)
|
||||
|
||||
def test_disk(self):
|
||||
'''regression test of img-1194, footprint = [1]
|
||||
Test peak.peak_local_max when every point is a local maximum
|
||||
'''
|
||||
image = np.random.uniform(size=(10, 20))
|
||||
footprint = np.array([[1]])
|
||||
result = peak.peak_local_max(image, labels=np.ones((10, 20)),
|
||||
footprint=footprint,
|
||||
min_distance=1, threshold_rel=0,
|
||||
threshold_abs=-1, indices=False,
|
||||
exclude_border=False)
|
||||
assert np.all(result)
|
||||
result = peak.peak_local_max(image, footprint=footprint, threshold_abs=-1,
|
||||
indices=False, exclude_border=False)
|
||||
assert np.all(result)
|
||||
|
||||
def test_3D(self):
|
||||
image = np.zeros((30, 30, 30))
|
||||
image[15, 15, 15] = 1
|
||||
image[5, 5, 5] = 1
|
||||
assert_equal(peak.peak_local_max(image, min_distance=10, threshold_rel=0),
|
||||
[[15, 15, 15]])
|
||||
assert_equal(peak.peak_local_max(image, min_distance=6, threshold_rel=0),
|
||||
[[15, 15, 15]])
|
||||
assert sorted(peak.peak_local_max(image, min_distance=10, threshold_rel=0,
|
||||
exclude_border=False).tolist()) == \
|
||||
[[5, 5, 5], [15, 15, 15]]
|
||||
assert sorted(peak.peak_local_max(image, min_distance=5,
|
||||
threshold_rel=0).tolist()) == \
|
||||
[[5, 5, 5], [15, 15, 15]]
|
||||
|
||||
def test_4D(self):
|
||||
image = np.zeros((30, 30, 30, 30))
|
||||
image[15, 15, 15, 15] = 1
|
||||
image[5, 5, 5, 5] = 1
|
||||
assert_equal(peak.peak_local_max(image, min_distance=10, threshold_rel=0),
|
||||
[[15, 15, 15, 15]])
|
||||
assert_equal(peak.peak_local_max(image, min_distance=6, threshold_rel=0),
|
||||
[[15, 15, 15, 15]])
|
||||
assert sorted(peak.peak_local_max(image, min_distance=10, threshold_rel=0,
|
||||
exclude_border=False).tolist()) == \
|
||||
[[5, 5, 5, 5], [15, 15, 15, 15]]
|
||||
assert sorted(peak.peak_local_max(image, min_distance=5,
|
||||
threshold_rel=0).tolist()) == \
|
||||
[[5, 5, 5, 5], [15, 15, 15, 15]]
|
||||
|
||||
def test_threshold_rel_default(self):
|
||||
image = np.ones((5, 5))
|
||||
|
||||
image[2, 2] = 1
|
||||
assert len(peak.peak_local_max(image)) == 0
|
||||
|
||||
image[2, 2] = 2
|
||||
assert_equal(peak.peak_local_max(image), [[2, 2]])
|
||||
|
||||
image[2, 2] = 0
|
||||
assert len(peak.peak_local_max(image, min_distance=0)) == image.size - 1
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
["indices"],
|
||||
[[indices] for indices in itertools.product(range(5), range(5))],
|
||||
)
|
||||
def test_exclude_border(indices):
|
||||
image = np.zeros((5, 5))
|
||||
image[indices] = 1
|
||||
|
||||
# exclude_border = False, means it will always be found.
|
||||
assert len(peak.peak_local_max(image, exclude_border=False)) == 1
|
||||
|
||||
# exclude_border = 0, means it will always be found.
|
||||
assert len(peak.peak_local_max(image, exclude_border=0)) == 1
|
||||
|
||||
# exclude_border = True, min_distance=1 means it will be found unless it's
|
||||
# on the edge.
|
||||
if indices[0] in (0, 4) or indices[1] in (0, 4):
|
||||
expected_peaks = 0
|
||||
else:
|
||||
expected_peaks = 1
|
||||
assert len(peak.peak_local_max(
|
||||
image, min_distance=1, exclude_border=True)) == expected_peaks
|
||||
|
||||
# exclude_border = (1, 0) means it will be found unless it's on the edge of
|
||||
# the first dimension.
|
||||
if indices[0] in (0, 4):
|
||||
expected_peaks = 0
|
||||
else:
|
||||
expected_peaks = 1
|
||||
assert len(peak.peak_local_max(
|
||||
image, exclude_border=(1, 0))) == expected_peaks
|
||||
|
||||
# exclude_border = (0, 1) means it will be found unless it's on the edge of
|
||||
# the second dimension.
|
||||
if indices[1] in (0, 4):
|
||||
expected_peaks = 0
|
||||
else:
|
||||
expected_peaks = 1
|
||||
assert len(peak.peak_local_max(
|
||||
image, exclude_border=(0, 1))) == expected_peaks
|
||||
|
||||
|
||||
def test_exclude_border_errors():
|
||||
image = np.zeros((5, 5))
|
||||
|
||||
# exclude_border doesn't have the right cardinality.
|
||||
with pytest.raises(ValueError):
|
||||
assert peak.peak_local_max(image, exclude_border=(1,))
|
||||
|
||||
# exclude_border doesn't have the right type
|
||||
with pytest.raises(TypeError):
|
||||
assert peak.peak_local_max(image, exclude_border=1.0)
|
||||
|
||||
# exclude_border is a tuple of the right cardinality but contains
|
||||
# non-integer values.
|
||||
with pytest.raises(ValueError):
|
||||
assert peak.peak_local_max(image, exclude_border=(1, 'a'))
|
||||
|
||||
# exclude_border is a tuple of the right cardinality but contains a
|
||||
# negative value.
|
||||
with pytest.raises(ValueError):
|
||||
assert peak.peak_local_max(image, exclude_border=(1, -1))
|
||||
|
||||
# exclude_border is a negative value.
|
||||
with pytest.raises(ValueError):
|
||||
assert peak.peak_local_max(image, exclude_border=-1)
|
||||
|
||||
|
||||
class TestProminentPeaks(unittest.TestCase):
|
||||
def test_isolated_peaks(self):
|
||||
image = np.zeros((15, 15))
|
||||
x0, y0, i0 = (12, 8, 1)
|
||||
x1, y1, i1 = (2, 2, 1)
|
||||
x2, y2, i2 = (5, 13, 1)
|
||||
image[y0, x0] = i0
|
||||
image[y1, x1] = i1
|
||||
image[y2, x2] = i2
|
||||
out = peak._prominent_peaks(image)
|
||||
assert len(out[0]) == 3
|
||||
for i, x, y in zip (out[0], out[1], out[2]):
|
||||
self.assertTrue(i in (i0, i1, i2))
|
||||
self.assertTrue(x in (x0, x1, x2))
|
||||
self.assertTrue(y in (y0, y1, y2))
|
||||
|
||||
def test_threshold(self):
|
||||
image = np.zeros((15, 15))
|
||||
x0, y0, i0 = (12, 8, 10)
|
||||
x1, y1, i1 = (2, 2, 8)
|
||||
x2, y2, i2 = (5, 13, 10)
|
||||
image[y0, x0] = i0
|
||||
image[y1, x1] = i1
|
||||
image[y2, x2] = i2
|
||||
out = peak._prominent_peaks(image, threshold=None)
|
||||
assert len(out[0]) == 3
|
||||
for i, x, y in zip (out[0], out[1], out[2]):
|
||||
self.assertTrue(i in (i0, i1, i2))
|
||||
self.assertTrue(x in (x0, x1, x2))
|
||||
out = peak._prominent_peaks(image, threshold=9)
|
||||
assert len(out[0]) == 2
|
||||
for i, x, y in zip (out[0], out[1], out[2]):
|
||||
self.assertTrue(i in (i0, i2))
|
||||
self.assertTrue(x in (x0, x2))
|
||||
self.assertTrue(y in (y0, y2))
|
||||
|
||||
def test_peaks_in_contact(self):
|
||||
image = np.zeros((15, 15))
|
||||
x0, y0, i0 = (8, 8, 1)
|
||||
x1, y1, i1 = (7, 7, 1) # prominent peak
|
||||
x2, y2, i2 = (6, 6, 1)
|
||||
image[y0, x0] = i0
|
||||
image[y1, x1] = i1
|
||||
image[y2, x2] = i2
|
||||
out = peak._prominent_peaks(image, min_xdistance=3,
|
||||
min_ydistance=3,)
|
||||
assert_equal(out[0], np.array((i1,)))
|
||||
assert_equal(out[1], np.array((x1,)))
|
||||
assert_equal(out[2], np.array((y1,)))
|
||||
|
||||
def test_input_labels_unmodified(self):
|
||||
image = np.zeros((10, 20))
|
||||
labels = np.zeros((10, 20), int)
|
||||
image[5, 5] = 1
|
||||
labels[5, 5] = 1
|
||||
labelsin = labels.copy()
|
||||
result = peak.peak_local_max(image, labels=labels,
|
||||
footprint=np.ones((3, 3), bool),
|
||||
min_distance=1, threshold_rel=0,
|
||||
indices=False, exclude_border=False)
|
||||
assert np.all(labels == labelsin)
|
||||
|
||||
def test_many_objects(self):
|
||||
mask = np.zeros([500, 500], dtype=bool)
|
||||
x, y = np.indices((500, 500))
|
||||
x_c = x // 20 * 20 + 10
|
||||
y_c = y // 20 * 20 + 10
|
||||
mask[(x - x_c) ** 2 + (y - y_c) ** 2 < 8 ** 2] = True
|
||||
labels, num_objs = ndi.label(mask)
|
||||
dist = ndi.distance_transform_edt(mask)
|
||||
local_max = peak.peak_local_max(dist, min_distance=20, indices=True,
|
||||
exclude_border=False, labels=labels)
|
||||
assert len(local_max) == 625
|
186
venv/Lib/site-packages/skimage/feature/tests/test_template.py
Normal file
186
venv/Lib/site-packages/skimage/feature/tests/test_template.py
Normal file
|
@ -0,0 +1,186 @@
|
|||
import numpy as np
|
||||
from skimage._shared.testing import assert_almost_equal, assert_equal
|
||||
|
||||
from skimage import data, img_as_float
|
||||
from skimage.morphology import diamond
|
||||
from skimage.feature import match_template, peak_local_max
|
||||
from skimage._shared import testing
|
||||
|
||||
|
||||
def test_template():
|
||||
size = 100
|
||||
# Float prefactors ensure that image range is between 0 and 1
|
||||
image = np.full((400, 400), 0.5)
|
||||
target = 0.1 * (np.tri(size) + np.tri(size)[::-1])
|
||||
target_positions = [(50, 50), (200, 200)]
|
||||
for x, y in target_positions:
|
||||
image[x:x + size, y:y + size] = target
|
||||
np.random.seed(1)
|
||||
image += 0.1 * np.random.uniform(size=(400, 400))
|
||||
|
||||
result = match_template(image, target)
|
||||
delta = 5
|
||||
|
||||
positions = peak_local_max(result, min_distance=delta)
|
||||
|
||||
if len(positions) > 2:
|
||||
# Keep the two maximum peaks.
|
||||
intensities = result[tuple(positions.T)]
|
||||
i_maxsort = np.argsort(intensities)[::-1]
|
||||
positions = positions[i_maxsort][:2]
|
||||
|
||||
# Sort so that order matches `target_positions`.
|
||||
positions = positions[np.argsort(positions[:, 0])]
|
||||
|
||||
for xy_target, xy in zip(target_positions, positions):
|
||||
assert_almost_equal(xy, xy_target)
|
||||
|
||||
|
||||
def test_normalization():
|
||||
"""Test that `match_template` gives the correct normalization.
|
||||
|
||||
Normalization gives 1 for a perfect match and -1 for an inverted-match.
|
||||
This test adds positive and negative squares to a zero-array and matches
|
||||
the array with a positive template.
|
||||
"""
|
||||
n = 5
|
||||
N = 20
|
||||
ipos, jpos = (2, 3)
|
||||
ineg, jneg = (12, 11)
|
||||
image = np.full((N, N), 0.5)
|
||||
image[ipos:ipos + n, jpos:jpos + n] = 1
|
||||
image[ineg:ineg + n, jneg:jneg + n] = 0
|
||||
|
||||
# white square with a black border
|
||||
template = np.zeros((n + 2, n + 2))
|
||||
template[1:1 + n, 1:1 + n] = 1
|
||||
|
||||
result = match_template(image, template)
|
||||
|
||||
# get the max and min results.
|
||||
sorted_result = np.argsort(result.flat)
|
||||
iflat_min = sorted_result[0]
|
||||
iflat_max = sorted_result[-1]
|
||||
min_result = np.unravel_index(iflat_min, result.shape)
|
||||
max_result = np.unravel_index(iflat_max, result.shape)
|
||||
|
||||
# shift result by 1 because of template border
|
||||
assert np.all((np.array(min_result) + 1) == (ineg, jneg))
|
||||
assert np.all((np.array(max_result) + 1) == (ipos, jpos))
|
||||
|
||||
assert np.allclose(result.flat[iflat_min], -1)
|
||||
assert np.allclose(result.flat[iflat_max], 1)
|
||||
|
||||
|
||||
def test_no_nans():
|
||||
"""Test that `match_template` doesn't return NaN values.
|
||||
|
||||
When image values are only slightly different, floating-point errors can
|
||||
cause a subtraction inside of a square root to go negative (without an
|
||||
explicit check that was added to `match_template`).
|
||||
"""
|
||||
np.random.seed(1)
|
||||
image = 0.5 + 1e-9 * np.random.normal(size=(20, 20))
|
||||
template = np.ones((6, 6))
|
||||
template[:3, :] = 0
|
||||
result = match_template(image, template)
|
||||
assert not np.any(np.isnan(result))
|
||||
|
||||
|
||||
def test_switched_arguments():
|
||||
image = np.ones((5, 5))
|
||||
template = np.ones((3, 3))
|
||||
with testing.raises(ValueError):
|
||||
match_template(template, image)
|
||||
|
||||
|
||||
def test_pad_input():
|
||||
"""Test `match_template` when `pad_input=True`.
|
||||
|
||||
This test places two full templates (one with values lower than the image
|
||||
mean, the other higher) and two half templates, which are on the edges of
|
||||
the image. The two full templates should score the top (positive and
|
||||
negative) matches and the centers of the half templates should score 2nd.
|
||||
"""
|
||||
# Float prefactors ensure that image range is between 0 and 1
|
||||
template = 0.5 * diamond(2)
|
||||
image = 0.5 * np.ones((9, 19))
|
||||
mid = slice(2, 7)
|
||||
image[mid, :3] -= template[:, -3:] # half min template centered at 0
|
||||
image[mid, 4:9] += template # full max template centered at 6
|
||||
image[mid, -9:-4] -= template # full min template centered at 12
|
||||
image[mid, -3:] += template[:, :3] # half max template centered at 18
|
||||
|
||||
result = match_template(image, template, pad_input=True,
|
||||
constant_values=image.mean())
|
||||
|
||||
# get the max and min results.
|
||||
sorted_result = np.argsort(result.flat)
|
||||
i, j = np.unravel_index(sorted_result[:2], result.shape)
|
||||
assert_equal(j, (12, 0))
|
||||
i, j = np.unravel_index(sorted_result[-2:], result.shape)
|
||||
assert_equal(j, (18, 6))
|
||||
|
||||
|
||||
def test_3d():
|
||||
np.random.seed(1)
|
||||
template = np.random.rand(3, 3, 3)
|
||||
image = np.zeros((12, 12, 12))
|
||||
|
||||
image[3:6, 5:8, 4:7] = template
|
||||
|
||||
result = match_template(image, template)
|
||||
|
||||
assert_equal(result.shape, (10, 10, 10))
|
||||
assert_equal(np.unravel_index(result.argmax(), result.shape), (3, 5, 4))
|
||||
|
||||
|
||||
def test_3d_pad_input():
|
||||
np.random.seed(1)
|
||||
template = np.random.rand(3, 3, 3)
|
||||
image = np.zeros((12, 12, 12))
|
||||
|
||||
image[3:6, 5:8, 4:7] = template
|
||||
|
||||
result = match_template(image, template, pad_input=True)
|
||||
|
||||
assert_equal(result.shape, (12, 12, 12))
|
||||
assert_equal(np.unravel_index(result.argmax(), result.shape), (4, 6, 5))
|
||||
|
||||
|
||||
def test_padding_reflect():
|
||||
template = diamond(2)
|
||||
image = np.zeros((10, 10))
|
||||
image[2:7, :3] = template[:, -3:]
|
||||
|
||||
result = match_template(image, template, pad_input=True,
|
||||
mode='reflect')
|
||||
|
||||
assert_equal(np.unravel_index(result.argmax(), result.shape), (4, 0))
|
||||
|
||||
|
||||
def test_wrong_input():
|
||||
image = np.ones((5, 5, 1))
|
||||
template = np.ones((3, 3))
|
||||
with testing.raises(ValueError):
|
||||
match_template(template, image)
|
||||
|
||||
image = np.ones((5, 5))
|
||||
template = np.ones((3, 3, 2))
|
||||
with testing.raises(ValueError):
|
||||
match_template(template, image)
|
||||
|
||||
image = np.ones((5, 5, 3, 3))
|
||||
template = np.ones((3, 3, 2))
|
||||
with testing.raises(ValueError):
|
||||
match_template(template, image)
|
||||
|
||||
|
||||
def test_bounding_values():
|
||||
image = img_as_float(data.page())
|
||||
template = np.zeros((3, 3))
|
||||
template[1, 1] = 1
|
||||
result = match_template(img_as_float(data.page()), template)
|
||||
print(result.max())
|
||||
assert result.max() < 1 + 1e-7
|
||||
assert result.min() > -1 - 1e-7
|
294
venv/Lib/site-packages/skimage/feature/tests/test_texture.py
Normal file
294
venv/Lib/site-packages/skimage/feature/tests/test_texture.py
Normal file
|
@ -0,0 +1,294 @@
|
|||
import numpy as np
|
||||
from skimage.feature import (greycomatrix,
|
||||
greycoprops,
|
||||
local_binary_pattern,
|
||||
multiblock_lbp)
|
||||
from skimage._shared.testing import test_parallel
|
||||
from skimage.transform import integral_image
|
||||
from skimage._shared import testing
|
||||
|
||||
|
||||
class TestGLCM():
|
||||
|
||||
def setup(self):
|
||||
self.image = np.array([[0, 0, 1, 1],
|
||||
[0, 0, 1, 1],
|
||||
[0, 2, 2, 2],
|
||||
[2, 2, 3, 3]], dtype=np.uint8)
|
||||
|
||||
@test_parallel()
|
||||
def test_output_angles(self):
|
||||
result = greycomatrix(self.image, [1], [0, np.pi / 4, np.pi / 2, 3 * np.pi / 4], 4)
|
||||
assert result.shape == (4, 4, 1, 4)
|
||||
expected1 = np.array([[2, 2, 1, 0],
|
||||
[0, 2, 0, 0],
|
||||
[0, 0, 3, 1],
|
||||
[0, 0, 0, 1]], dtype=np.uint32)
|
||||
np.testing.assert_array_equal(result[:, :, 0, 0], expected1)
|
||||
expected2 = np.array([[1, 1, 3, 0],
|
||||
[0, 1, 1, 0],
|
||||
[0, 0, 0, 2],
|
||||
[0, 0, 0, 0]], dtype=np.uint32)
|
||||
np.testing.assert_array_equal(result[:, :, 0, 1], expected2)
|
||||
expected3 = np.array([[3, 0, 2, 0],
|
||||
[0, 2, 2, 0],
|
||||
[0, 0, 1, 2],
|
||||
[0, 0, 0, 0]], dtype=np.uint32)
|
||||
np.testing.assert_array_equal(result[:, :, 0, 2], expected3)
|
||||
expected4 = np.array([[2, 0, 0, 0],
|
||||
[1, 1, 2, 0],
|
||||
[0, 0, 2, 1],
|
||||
[0, 0, 0, 0]], dtype=np.uint32)
|
||||
np.testing.assert_array_equal(result[:, :, 0, 3], expected4)
|
||||
|
||||
def test_output_symmetric_1(self):
|
||||
result = greycomatrix(self.image, [1], [np.pi / 2], 4,
|
||||
symmetric=True)
|
||||
assert result.shape == (4, 4, 1, 1)
|
||||
expected = np.array([[6, 0, 2, 0],
|
||||
[0, 4, 2, 0],
|
||||
[2, 2, 2, 2],
|
||||
[0, 0, 2, 0]], dtype=np.uint32)
|
||||
np.testing.assert_array_equal(result[:, :, 0, 0], expected)
|
||||
|
||||
def test_error_raise_float(self):
|
||||
for dtype in [np.float, np.double, np.float16, np.float32, np.float64]:
|
||||
with testing.raises(ValueError):
|
||||
greycomatrix(self.image.astype(dtype), [1], [np.pi], 4)
|
||||
|
||||
def test_error_raise_int_types(self):
|
||||
for dtype in [np.int16, np.int32, np.int64, np.uint16, np.uint32, np.uint64]:
|
||||
with testing.raises(ValueError):
|
||||
greycomatrix(self.image.astype(dtype), [1], [np.pi])
|
||||
|
||||
def test_error_raise_negative(self):
|
||||
with testing.raises(ValueError):
|
||||
greycomatrix(self.image.astype(np.int16) - 1, [1], [np.pi], 4)
|
||||
|
||||
def test_error_raise_levels_smaller_max(self):
|
||||
with testing.raises(ValueError):
|
||||
greycomatrix(self.image - 1, [1], [np.pi], 3)
|
||||
|
||||
def test_image_data_types(self):
|
||||
for dtype in [np.uint16, np.uint32, np.uint64, np.int16, np.int32, np.int64]:
|
||||
img = self.image.astype(dtype)
|
||||
result = greycomatrix(img, [1], [np.pi / 2], 4,
|
||||
symmetric=True)
|
||||
assert result.shape == (4, 4, 1, 1)
|
||||
expected = np.array([[6, 0, 2, 0],
|
||||
[0, 4, 2, 0],
|
||||
[2, 2, 2, 2],
|
||||
[0, 0, 2, 0]], dtype=np.uint32)
|
||||
np.testing.assert_array_equal(result[:, :, 0, 0], expected)
|
||||
|
||||
return
|
||||
|
||||
def test_output_distance(self):
|
||||
im = np.array([[0, 0, 0, 0],
|
||||
[1, 0, 0, 1],
|
||||
[2, 0, 0, 2],
|
||||
[3, 0, 0, 3]], dtype=np.uint8)
|
||||
result = greycomatrix(im, [3], [0], 4, symmetric=False)
|
||||
expected = np.array([[1, 0, 0, 0],
|
||||
[0, 1, 0, 0],
|
||||
[0, 0, 1, 0],
|
||||
[0, 0, 0, 1]], dtype=np.uint32)
|
||||
np.testing.assert_array_equal(result[:, :, 0, 0], expected)
|
||||
|
||||
def test_output_combo(self):
|
||||
im = np.array([[0],
|
||||
[1],
|
||||
[2],
|
||||
[3]], dtype=np.uint8)
|
||||
result = greycomatrix(im, [1, 2], [0, np.pi / 2], 4)
|
||||
assert result.shape == (4, 4, 2, 2)
|
||||
|
||||
z = np.zeros((4, 4), dtype=np.uint32)
|
||||
e1 = np.array([[0, 1, 0, 0],
|
||||
[0, 0, 1, 0],
|
||||
[0, 0, 0, 1],
|
||||
[0, 0, 0, 0]], dtype=np.uint32)
|
||||
e2 = np.array([[0, 0, 1, 0],
|
||||
[0, 0, 0, 1],
|
||||
[0, 0, 0, 0],
|
||||
[0, 0, 0, 0]], dtype=np.uint32)
|
||||
|
||||
np.testing.assert_array_equal(result[:, :, 0, 0], z)
|
||||
np.testing.assert_array_equal(result[:, :, 1, 0], z)
|
||||
np.testing.assert_array_equal(result[:, :, 0, 1], e1)
|
||||
np.testing.assert_array_equal(result[:, :, 1, 1], e2)
|
||||
|
||||
def test_output_empty(self):
|
||||
result = greycomatrix(self.image, [10], [0], 4)
|
||||
np.testing.assert_array_equal(result[:, :, 0, 0],
|
||||
np.zeros((4, 4), dtype=np.uint32))
|
||||
result = greycomatrix(self.image, [10], [0], 4, normed=True)
|
||||
np.testing.assert_array_equal(result[:, :, 0, 0],
|
||||
np.zeros((4, 4), dtype=np.uint32))
|
||||
|
||||
def test_normed_symmetric(self):
|
||||
result = greycomatrix(self.image, [1, 2, 3],
|
||||
[0, np.pi / 2, np.pi], 4,
|
||||
normed=True, symmetric=True)
|
||||
for d in range(result.shape[2]):
|
||||
for a in range(result.shape[3]):
|
||||
np.testing.assert_almost_equal(result[:, :, d, a].sum(),
|
||||
1.0)
|
||||
np.testing.assert_array_equal(result[:, :, d, a],
|
||||
result[:, :, d, a].transpose())
|
||||
|
||||
def test_contrast(self):
|
||||
result = greycomatrix(self.image, [1, 2], [0], 4,
|
||||
normed=True, symmetric=True)
|
||||
result = np.round(result, 3)
|
||||
contrast = greycoprops(result, 'contrast')
|
||||
np.testing.assert_almost_equal(contrast[0, 0], 0.585, decimal=3)
|
||||
|
||||
def test_dissimilarity(self):
|
||||
result = greycomatrix(self.image, [1], [0, np.pi / 2], 4,
|
||||
normed=True, symmetric=True)
|
||||
result = np.round(result, 3)
|
||||
dissimilarity = greycoprops(result, 'dissimilarity')
|
||||
np.testing.assert_almost_equal(dissimilarity[0, 0], 0.418, decimal=3)
|
||||
|
||||
def test_dissimilarity_2(self):
|
||||
result = greycomatrix(self.image, [1, 3], [np.pi / 2], 4,
|
||||
normed=True, symmetric=True)
|
||||
result = np.round(result, 3)
|
||||
dissimilarity = greycoprops(result, 'dissimilarity')[0, 0]
|
||||
np.testing.assert_almost_equal(dissimilarity, 0.665, decimal=3)
|
||||
|
||||
def test_non_normalized_glcm(self):
|
||||
img = (np.random.random((100, 100)) * 8).astype(np.uint8)
|
||||
p = greycomatrix(img, [1, 2, 4, 5], [0, 0.25, 1, 1.5], levels=8)
|
||||
np.testing.assert_(np.max(greycoprops(p, 'correlation')) < 1.0)
|
||||
|
||||
def test_invalid_property(self):
|
||||
result = greycomatrix(self.image, [1], [0], 4)
|
||||
with testing.raises(ValueError):
|
||||
greycoprops(result, 'ABC')
|
||||
|
||||
def test_homogeneity(self):
|
||||
result = greycomatrix(self.image, [1], [0, 6], 4, normed=True,
|
||||
symmetric=True)
|
||||
homogeneity = greycoprops(result, 'homogeneity')[0, 0]
|
||||
np.testing.assert_almost_equal(homogeneity, 0.80833333)
|
||||
|
||||
def test_energy(self):
|
||||
result = greycomatrix(self.image, [1], [0, 4], 4, normed=True,
|
||||
symmetric=True)
|
||||
energy = greycoprops(result, 'energy')[0, 0]
|
||||
np.testing.assert_almost_equal(energy, 0.38188131)
|
||||
|
||||
def test_correlation(self):
|
||||
result = greycomatrix(self.image, [1, 2], [0], 4, normed=True,
|
||||
symmetric=True)
|
||||
energy = greycoprops(result, 'correlation')
|
||||
np.testing.assert_almost_equal(energy[0, 0], 0.71953255)
|
||||
np.testing.assert_almost_equal(energy[1, 0], 0.41176470)
|
||||
|
||||
def test_uniform_properties(self):
|
||||
im = np.ones((4, 4), dtype=np.uint8)
|
||||
result = greycomatrix(im, [1, 2, 8], [0, np.pi / 2], 4, normed=True,
|
||||
symmetric=True)
|
||||
for prop in ['contrast', 'dissimilarity', 'homogeneity',
|
||||
'energy', 'correlation', 'ASM']:
|
||||
greycoprops(result, prop)
|
||||
|
||||
|
||||
class TestLBP():
|
||||
|
||||
def setup(self):
|
||||
self.image = np.array([[255, 6, 255, 0, 141, 0],
|
||||
[ 48, 250, 204, 166, 223, 63],
|
||||
[ 8, 0, 159, 50, 255, 30],
|
||||
[167, 255, 63, 40, 128, 255],
|
||||
[ 0, 255, 30, 34, 255, 24],
|
||||
[146, 241, 255, 0, 189, 126]], dtype='double')
|
||||
|
||||
@test_parallel()
|
||||
def test_default(self):
|
||||
lbp = local_binary_pattern(self.image, 8, 1, 'default')
|
||||
ref = np.array([[ 0, 251, 0, 255, 96, 255],
|
||||
[143, 0, 20, 153, 64, 56],
|
||||
[238, 255, 12, 191, 0, 252],
|
||||
[129, 64., 62, 159, 199, 0],
|
||||
[255, 4, 255, 175, 0, 254],
|
||||
[ 3, 5, 0, 255, 4, 24]])
|
||||
np.testing.assert_array_equal(lbp, ref)
|
||||
|
||||
def test_ror(self):
|
||||
lbp = local_binary_pattern(self.image, 8, 1, 'ror')
|
||||
ref = np.array([[ 0, 127, 0, 255, 3, 255],
|
||||
[ 31, 0, 5, 51, 1, 7],
|
||||
[119, 255, 3, 127, 0, 63],
|
||||
[ 3, 1, 31, 63, 31, 0],
|
||||
[255, 1, 255, 95, 0, 127],
|
||||
[ 3, 5, 0, 255, 1, 3]])
|
||||
np.testing.assert_array_equal(lbp, ref)
|
||||
|
||||
def test_uniform(self):
|
||||
lbp = local_binary_pattern(self.image, 8, 1, 'uniform')
|
||||
ref = np.array([[0, 7, 0, 8, 2, 8],
|
||||
[5, 0, 9, 9, 1, 3],
|
||||
[9, 8, 2, 7, 0, 6],
|
||||
[2, 1, 5, 6, 5, 0],
|
||||
[8, 1, 8, 9, 0, 7],
|
||||
[2, 9, 0, 8, 1, 2]])
|
||||
np.testing.assert_array_equal(lbp, ref)
|
||||
|
||||
def test_var(self):
|
||||
# Test idea: mean of variance is estimate of overall variance.
|
||||
|
||||
# Fix random seed for test stability.
|
||||
np.random.seed(13141516)
|
||||
|
||||
# Create random image with known variance.
|
||||
image = np.random.rand(500, 500)
|
||||
target_std = 0.3
|
||||
image = image / image.std() * target_std
|
||||
|
||||
# Use P=4 to avoid interpolation effects
|
||||
P, R = 4, 1
|
||||
lbp = local_binary_pattern(image, P, R, 'var')
|
||||
|
||||
# Take central part to avoid border effect.
|
||||
lbp = lbp[5:-5, 5:-5]
|
||||
|
||||
# The LBP variance is biased (ddof=0), correct for that.
|
||||
expected = target_std**2 * (P-1)/P
|
||||
|
||||
np.testing.assert_almost_equal(lbp.mean(), expected, 4)
|
||||
|
||||
def test_nri_uniform(self):
|
||||
lbp = local_binary_pattern(self.image, 8, 1, 'nri_uniform')
|
||||
ref = np.array([[ 0, 54, 0, 57, 12, 57],
|
||||
[34, 0, 58, 58, 3, 22],
|
||||
[58, 57, 15, 50, 0, 47],
|
||||
[10, 3, 40, 42, 35, 0],
|
||||
[57, 7, 57, 58, 0, 56],
|
||||
[ 9, 58, 0, 57, 7, 14]])
|
||||
np.testing.assert_array_almost_equal(lbp, ref)
|
||||
|
||||
|
||||
class TestMBLBP():
|
||||
|
||||
def test_single_mblbp(self):
|
||||
|
||||
# Create dummy matrix where first and fifth rectangles have greater
|
||||
# value than the central one. Therefore, the following bits
|
||||
# should be 1.
|
||||
test_img = np.zeros((9, 9), dtype='uint8')
|
||||
test_img[3:6, 3:6] = 1
|
||||
test_img[:3, :3] = 255
|
||||
test_img[6:, 6:] = 255
|
||||
|
||||
# MB-LBP is filled in reverse order. So the first and fifth bits from
|
||||
# the end should be filled.
|
||||
correct_answer = 0b10001000
|
||||
|
||||
int_img = integral_image(test_img)
|
||||
|
||||
lbp_code = multiblock_lbp(int_img, 0, 0, 3, 3)
|
||||
|
||||
np.testing.assert_equal(lbp_code, correct_answer)
|
81
venv/Lib/site-packages/skimage/feature/tests/test_util.py
Normal file
81
venv/Lib/site-packages/skimage/feature/tests/test_util.py
Normal file
|
@ -0,0 +1,81 @@
|
|||
import numpy as np
|
||||
try:
|
||||
import matplotlib.pyplot as plt
|
||||
except ImportError:
|
||||
plt = None
|
||||
|
||||
from skimage._shared.testing import assert_equal
|
||||
|
||||
from skimage.feature.util import (FeatureDetector, DescriptorExtractor,
|
||||
_prepare_grayscale_input_2D,
|
||||
_mask_border_keypoints, plot_matches)
|
||||
|
||||
from skimage._shared import testing
|
||||
|
||||
|
||||
def test_feature_detector():
|
||||
with testing.raises(NotImplementedError):
|
||||
FeatureDetector().detect(None)
|
||||
|
||||
|
||||
def test_descriptor_extractor():
|
||||
with testing.raises(NotImplementedError):
|
||||
DescriptorExtractor().extract(None, None)
|
||||
|
||||
|
||||
def test_prepare_grayscale_input_2D():
|
||||
with testing.raises(ValueError):
|
||||
_prepare_grayscale_input_2D(np.zeros((3, 3, 3)))
|
||||
with testing.raises(ValueError):
|
||||
_prepare_grayscale_input_2D(np.zeros((3, 1)))
|
||||
with testing.raises(ValueError):
|
||||
_prepare_grayscale_input_2D(np.zeros((3, 1, 1)))
|
||||
img = _prepare_grayscale_input_2D(np.zeros((3, 3)))
|
||||
img = _prepare_grayscale_input_2D(np.zeros((3, 3, 1)))
|
||||
img = _prepare_grayscale_input_2D(np.zeros((1, 3, 3)))
|
||||
|
||||
|
||||
def test_mask_border_keypoints():
|
||||
keypoints = np.array([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]])
|
||||
assert_equal(_mask_border_keypoints((10, 10), keypoints, 0),
|
||||
[1, 1, 1, 1, 1])
|
||||
assert_equal(_mask_border_keypoints((10, 10), keypoints, 2),
|
||||
[0, 0, 1, 1, 1])
|
||||
assert_equal(_mask_border_keypoints((4, 4), keypoints, 2),
|
||||
[0, 0, 1, 0, 0])
|
||||
assert_equal(_mask_border_keypoints((10, 10), keypoints, 5),
|
||||
[0, 0, 0, 0, 0])
|
||||
assert_equal(_mask_border_keypoints((10, 10), keypoints, 4),
|
||||
[0, 0, 0, 0, 1])
|
||||
|
||||
|
||||
@testing.skipif(plt is None, reason="Matplotlib not installed")
|
||||
def test_plot_matches():
|
||||
fig, ax = plt.subplots(nrows=1, ncols=1)
|
||||
|
||||
shapes = (((10, 10), (10, 10)),
|
||||
((10, 10), (12, 10)),
|
||||
((10, 10), (10, 12)),
|
||||
((10, 10), (12, 12)),
|
||||
((12, 10), (10, 10)),
|
||||
((10, 12), (10, 10)),
|
||||
((12, 12), (10, 10)))
|
||||
|
||||
keypoints1 = 10 * np.random.rand(10, 2)
|
||||
keypoints2 = 10 * np.random.rand(10, 2)
|
||||
idxs1 = np.random.randint(10, size=10)
|
||||
idxs2 = np.random.randint(10, size=10)
|
||||
matches = np.column_stack((idxs1, idxs2))
|
||||
|
||||
for shape1, shape2 in shapes:
|
||||
img1 = np.zeros(shape1)
|
||||
img2 = np.zeros(shape2)
|
||||
plot_matches(ax, img1, img2, keypoints1, keypoints2, matches)
|
||||
plot_matches(ax, img1, img2, keypoints1, keypoints2, matches,
|
||||
only_matches=True)
|
||||
plot_matches(ax, img1, img2, keypoints1, keypoints2, matches,
|
||||
keypoints_color='r')
|
||||
plot_matches(ax, img1, img2, keypoints1, keypoints2, matches,
|
||||
matches_color='r')
|
||||
plot_matches(ax, img1, img2, keypoints1, keypoints2, matches,
|
||||
alignment='vertical')
|
493
venv/Lib/site-packages/skimage/feature/texture.py
Normal file
493
venv/Lib/site-packages/skimage/feature/texture.py
Normal file
|
@ -0,0 +1,493 @@
|
|||
"""
|
||||
Methods to characterize image textures.
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
import warnings
|
||||
from .._shared.utils import check_nD
|
||||
from ..util import img_as_float
|
||||
from ..color import gray2rgb
|
||||
from ._texture import (_glcm_loop,
|
||||
_local_binary_pattern,
|
||||
_multiblock_lbp)
|
||||
|
||||
|
||||
def greycomatrix(image, distances, angles, levels=None, symmetric=False,
|
||||
normed=False):
|
||||
"""Calculate the grey-level co-occurrence matrix.
|
||||
|
||||
A grey level co-occurrence matrix is a histogram of co-occurring
|
||||
greyscale values at a given offset over an image.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : array_like
|
||||
Integer typed input image. Only positive valued images are supported.
|
||||
If type is other than uint8, the argument `levels` needs to be set.
|
||||
distances : array_like
|
||||
List of pixel pair distance offsets.
|
||||
angles : array_like
|
||||
List of pixel pair angles in radians.
|
||||
levels : int, optional
|
||||
The input image should contain integers in [0, `levels`-1],
|
||||
where levels indicate the number of grey-levels counted
|
||||
(typically 256 for an 8-bit image). This argument is required for
|
||||
16-bit images or higher and is typically the maximum of the image.
|
||||
As the output matrix is at least `levels` x `levels`, it might
|
||||
be preferable to use binning of the input image rather than
|
||||
large values for `levels`.
|
||||
symmetric : bool, optional
|
||||
If True, the output matrix `P[:, :, d, theta]` is symmetric. This
|
||||
is accomplished by ignoring the order of value pairs, so both
|
||||
(i, j) and (j, i) are accumulated when (i, j) is encountered
|
||||
for a given offset. The default is False.
|
||||
normed : bool, optional
|
||||
If True, normalize each matrix `P[:, :, d, theta]` by dividing
|
||||
by the total number of accumulated co-occurrences for the given
|
||||
offset. The elements of the resulting matrix sum to 1. The
|
||||
default is False.
|
||||
|
||||
Returns
|
||||
-------
|
||||
P : 4-D ndarray
|
||||
The grey-level co-occurrence histogram. The value
|
||||
`P[i,j,d,theta]` is the number of times that grey-level `j`
|
||||
occurs at a distance `d` and at an angle `theta` from
|
||||
grey-level `i`. If `normed` is `False`, the output is of
|
||||
type uint32, otherwise it is float64. The dimensions are:
|
||||
levels x levels x number of distances x number of angles.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] The GLCM Tutorial Home Page,
|
||||
http://www.fp.ucalgary.ca/mhallbey/tutorial.htm
|
||||
.. [2] Haralick, RM.; Shanmugam, K.,
|
||||
"Textural features for image classification"
|
||||
IEEE Transactions on systems, man, and cybernetics 6 (1973): 610-621.
|
||||
:DOI:`10.1109/TSMC.1973.4309314`
|
||||
.. [3] Pattern Recognition Engineering, Morton Nadler & Eric P.
|
||||
Smith
|
||||
.. [4] Wikipedia, https://en.wikipedia.org/wiki/Co-occurrence_matrix
|
||||
|
||||
|
||||
Examples
|
||||
--------
|
||||
Compute 2 GLCMs: One for a 1-pixel offset to the right, and one
|
||||
for a 1-pixel offset upwards.
|
||||
|
||||
>>> image = np.array([[0, 0, 1, 1],
|
||||
... [0, 0, 1, 1],
|
||||
... [0, 2, 2, 2],
|
||||
... [2, 2, 3, 3]], dtype=np.uint8)
|
||||
>>> result = greycomatrix(image, [1], [0, np.pi/4, np.pi/2, 3*np.pi/4],
|
||||
... levels=4)
|
||||
>>> result[:, :, 0, 0]
|
||||
array([[2, 2, 1, 0],
|
||||
[0, 2, 0, 0],
|
||||
[0, 0, 3, 1],
|
||||
[0, 0, 0, 1]], dtype=uint32)
|
||||
>>> result[:, :, 0, 1]
|
||||
array([[1, 1, 3, 0],
|
||||
[0, 1, 1, 0],
|
||||
[0, 0, 0, 2],
|
||||
[0, 0, 0, 0]], dtype=uint32)
|
||||
>>> result[:, :, 0, 2]
|
||||
array([[3, 0, 2, 0],
|
||||
[0, 2, 2, 0],
|
||||
[0, 0, 1, 2],
|
||||
[0, 0, 0, 0]], dtype=uint32)
|
||||
>>> result[:, :, 0, 3]
|
||||
array([[2, 0, 0, 0],
|
||||
[1, 1, 2, 0],
|
||||
[0, 0, 2, 1],
|
||||
[0, 0, 0, 0]], dtype=uint32)
|
||||
|
||||
"""
|
||||
check_nD(image, 2)
|
||||
check_nD(distances, 1, 'distances')
|
||||
check_nD(angles, 1, 'angles')
|
||||
|
||||
image = np.ascontiguousarray(image)
|
||||
|
||||
image_max = image.max()
|
||||
|
||||
if np.issubdtype(image.dtype, np.floating):
|
||||
raise ValueError("Float images are not supported by greycomatrix. "
|
||||
"Convert the image to an unsigned integer type.")
|
||||
|
||||
# for image type > 8bit, levels must be set.
|
||||
if image.dtype not in (np.uint8, np.int8) and levels is None:
|
||||
raise ValueError("The levels argument is required for data types "
|
||||
"other than uint8. The resulting matrix will be at "
|
||||
"least levels ** 2 in size.")
|
||||
|
||||
if np.issubdtype(image.dtype, np.signedinteger) and np.any(image < 0):
|
||||
raise ValueError("Negative-valued images are not supported.")
|
||||
|
||||
if levels is None:
|
||||
levels = 256
|
||||
|
||||
if image_max >= levels:
|
||||
raise ValueError("The maximum grayscale value in the image should be "
|
||||
"smaller than the number of levels.")
|
||||
|
||||
distances = np.ascontiguousarray(distances, dtype=np.float64)
|
||||
angles = np.ascontiguousarray(angles, dtype=np.float64)
|
||||
|
||||
P = np.zeros((levels, levels, len(distances), len(angles)),
|
||||
dtype=np.uint32, order='C')
|
||||
|
||||
# count co-occurences
|
||||
_glcm_loop(image, distances, angles, levels, P)
|
||||
|
||||
# make each GLMC symmetric
|
||||
if symmetric:
|
||||
Pt = np.transpose(P, (1, 0, 2, 3))
|
||||
P = P + Pt
|
||||
|
||||
# normalize each GLCM
|
||||
if normed:
|
||||
P = P.astype(np.float64)
|
||||
glcm_sums = np.apply_over_axes(np.sum, P, axes=(0, 1))
|
||||
glcm_sums[glcm_sums == 0] = 1
|
||||
P /= glcm_sums
|
||||
|
||||
return P
|
||||
|
||||
|
||||
def greycoprops(P, prop='contrast'):
|
||||
"""Calculate texture properties of a GLCM.
|
||||
|
||||
Compute a feature of a grey level co-occurrence matrix to serve as
|
||||
a compact summary of the matrix. The properties are computed as
|
||||
follows:
|
||||
|
||||
- 'contrast': :math:`\\sum_{i,j=0}^{levels-1} P_{i,j}(i-j)^2`
|
||||
- 'dissimilarity': :math:`\\sum_{i,j=0}^{levels-1}P_{i,j}|i-j|`
|
||||
- 'homogeneity': :math:`\\sum_{i,j=0}^{levels-1}\\frac{P_{i,j}}{1+(i-j)^2}`
|
||||
- 'ASM': :math:`\\sum_{i,j=0}^{levels-1} P_{i,j}^2`
|
||||
- 'energy': :math:`\\sqrt{ASM}`
|
||||
- 'correlation':
|
||||
.. math:: \\sum_{i,j=0}^{levels-1} P_{i,j}\\left[\\frac{(i-\\mu_i) \\
|
||||
(j-\\mu_j)}{\\sqrt{(\\sigma_i^2)(\\sigma_j^2)}}\\right]
|
||||
|
||||
Each GLCM is normalized to have a sum of 1 before the computation of texture
|
||||
properties.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
P : ndarray
|
||||
Input array. `P` is the grey-level co-occurrence histogram
|
||||
for which to compute the specified property. The value
|
||||
`P[i,j,d,theta]` is the number of times that grey-level j
|
||||
occurs at a distance d and at an angle theta from
|
||||
grey-level i.
|
||||
prop : {'contrast', 'dissimilarity', 'homogeneity', 'energy', \
|
||||
'correlation', 'ASM'}, optional
|
||||
The property of the GLCM to compute. The default is 'contrast'.
|
||||
|
||||
Returns
|
||||
-------
|
||||
results : 2-D ndarray
|
||||
2-dimensional array. `results[d, a]` is the property 'prop' for
|
||||
the d'th distance and the a'th angle.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] The GLCM Tutorial Home Page,
|
||||
http://www.fp.ucalgary.ca/mhallbey/tutorial.htm
|
||||
|
||||
Examples
|
||||
--------
|
||||
Compute the contrast for GLCMs with distances [1, 2] and angles
|
||||
[0 degrees, 90 degrees]
|
||||
|
||||
>>> image = np.array([[0, 0, 1, 1],
|
||||
... [0, 0, 1, 1],
|
||||
... [0, 2, 2, 2],
|
||||
... [2, 2, 3, 3]], dtype=np.uint8)
|
||||
>>> g = greycomatrix(image, [1, 2], [0, np.pi/2], levels=4,
|
||||
... normed=True, symmetric=True)
|
||||
>>> contrast = greycoprops(g, 'contrast')
|
||||
>>> contrast
|
||||
array([[0.58333333, 1. ],
|
||||
[1.25 , 2.75 ]])
|
||||
|
||||
"""
|
||||
check_nD(P, 4, 'P')
|
||||
|
||||
(num_level, num_level2, num_dist, num_angle) = P.shape
|
||||
if num_level != num_level2:
|
||||
raise ValueError('num_level and num_level2 must be equal.')
|
||||
if num_dist <= 0:
|
||||
raise ValueError('num_dist must be positive.')
|
||||
if num_angle <= 0:
|
||||
raise ValueError('num_angle must be positive.')
|
||||
|
||||
# normalize each GLCM
|
||||
P = P.astype(np.float64)
|
||||
glcm_sums = np.apply_over_axes(np.sum, P, axes=(0, 1))
|
||||
glcm_sums[glcm_sums == 0] = 1
|
||||
P /= glcm_sums
|
||||
|
||||
# create weights for specified property
|
||||
I, J = np.ogrid[0:num_level, 0:num_level]
|
||||
if prop == 'contrast':
|
||||
weights = (I - J) ** 2
|
||||
elif prop == 'dissimilarity':
|
||||
weights = np.abs(I - J)
|
||||
elif prop == 'homogeneity':
|
||||
weights = 1. / (1. + (I - J) ** 2)
|
||||
elif prop in ['ASM', 'energy', 'correlation']:
|
||||
pass
|
||||
else:
|
||||
raise ValueError('%s is an invalid property' % (prop))
|
||||
|
||||
# compute property for each GLCM
|
||||
if prop == 'energy':
|
||||
asm = np.apply_over_axes(np.sum, (P ** 2), axes=(0, 1))[0, 0]
|
||||
results = np.sqrt(asm)
|
||||
elif prop == 'ASM':
|
||||
results = np.apply_over_axes(np.sum, (P ** 2), axes=(0, 1))[0, 0]
|
||||
elif prop == 'correlation':
|
||||
results = np.zeros((num_dist, num_angle), dtype=np.float64)
|
||||
I = np.array(range(num_level)).reshape((num_level, 1, 1, 1))
|
||||
J = np.array(range(num_level)).reshape((1, num_level, 1, 1))
|
||||
diff_i = I - np.apply_over_axes(np.sum, (I * P), axes=(0, 1))[0, 0]
|
||||
diff_j = J - np.apply_over_axes(np.sum, (J * P), axes=(0, 1))[0, 0]
|
||||
|
||||
std_i = np.sqrt(np.apply_over_axes(np.sum, (P * (diff_i) ** 2),
|
||||
axes=(0, 1))[0, 0])
|
||||
std_j = np.sqrt(np.apply_over_axes(np.sum, (P * (diff_j) ** 2),
|
||||
axes=(0, 1))[0, 0])
|
||||
cov = np.apply_over_axes(np.sum, (P * (diff_i * diff_j)),
|
||||
axes=(0, 1))[0, 0]
|
||||
|
||||
# handle the special case of standard deviations near zero
|
||||
mask_0 = std_i < 1e-15
|
||||
mask_0[std_j < 1e-15] = True
|
||||
results[mask_0] = 1
|
||||
|
||||
# handle the standard case
|
||||
mask_1 = mask_0 == False
|
||||
results[mask_1] = cov[mask_1] / (std_i[mask_1] * std_j[mask_1])
|
||||
elif prop in ['contrast', 'dissimilarity', 'homogeneity']:
|
||||
weights = weights.reshape((num_level, num_level, 1, 1))
|
||||
results = np.apply_over_axes(np.sum, (P * weights), axes=(0, 1))[0, 0]
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def local_binary_pattern(image, P, R, method='default'):
|
||||
"""Gray scale and rotation invariant LBP (Local Binary Patterns).
|
||||
|
||||
LBP is an invariant descriptor that can be used for texture classification.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : (N, M) array
|
||||
Graylevel image.
|
||||
P : int
|
||||
Number of circularly symmetric neighbour set points (quantization of
|
||||
the angular space).
|
||||
R : float
|
||||
Radius of circle (spatial resolution of the operator).
|
||||
method : {'default', 'ror', 'uniform', 'var'}
|
||||
Method to determine the pattern.
|
||||
|
||||
* 'default': original local binary pattern which is gray scale but not
|
||||
rotation invariant.
|
||||
* 'ror': extension of default implementation which is gray scale and
|
||||
rotation invariant.
|
||||
* 'uniform': improved rotation invariance with uniform patterns and
|
||||
finer quantization of the angular space which is gray scale and
|
||||
rotation invariant.
|
||||
* 'nri_uniform': non rotation-invariant uniform patterns variant
|
||||
which is only gray scale invariant [2]_.
|
||||
* 'var': rotation invariant variance measures of the contrast of local
|
||||
image texture which is rotation but not gray scale invariant.
|
||||
|
||||
Returns
|
||||
-------
|
||||
output : (N, M) array
|
||||
LBP image.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Multiresolution Gray-Scale and Rotation Invariant Texture
|
||||
Classification with Local Binary Patterns.
|
||||
Timo Ojala, Matti Pietikainen, Topi Maenpaa.
|
||||
http://www.ee.oulu.fi/research/mvmp/mvg/files/pdf/pdf_94.pdf, 2002.
|
||||
.. [2] Face recognition with local binary patterns.
|
||||
Timo Ahonen, Abdenour Hadid, Matti Pietikainen,
|
||||
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.214.6851,
|
||||
2004.
|
||||
"""
|
||||
check_nD(image, 2)
|
||||
|
||||
methods = {
|
||||
'default': ord('D'),
|
||||
'ror': ord('R'),
|
||||
'uniform': ord('U'),
|
||||
'nri_uniform': ord('N'),
|
||||
'var': ord('V')
|
||||
}
|
||||
image = np.ascontiguousarray(image, dtype=np.double)
|
||||
output = _local_binary_pattern(image, P, R, methods[method.lower()])
|
||||
return output
|
||||
|
||||
|
||||
def multiblock_lbp(int_image, r, c, width, height):
|
||||
"""Multi-block local binary pattern (MB-LBP).
|
||||
|
||||
The features are calculated similarly to local binary patterns (LBPs),
|
||||
(See :py:meth:`local_binary_pattern`) except that summed blocks are
|
||||
used instead of individual pixel values.
|
||||
|
||||
MB-LBP is an extension of LBP that can be computed on multiple scales
|
||||
in constant time using the integral image. Nine equally-sized rectangles
|
||||
are used to compute a feature. For each rectangle, the sum of the pixel
|
||||
intensities is computed. Comparisons of these sums to that of the central
|
||||
rectangle determine the feature, similarly to LBP.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
int_image : (N, M) array
|
||||
Integral image.
|
||||
r : int
|
||||
Row-coordinate of top left corner of a rectangle containing feature.
|
||||
c : int
|
||||
Column-coordinate of top left corner of a rectangle containing feature.
|
||||
width : int
|
||||
Width of one of the 9 equal rectangles that will be used to compute
|
||||
a feature.
|
||||
height : int
|
||||
Height of one of the 9 equal rectangles that will be used to compute
|
||||
a feature.
|
||||
|
||||
Returns
|
||||
-------
|
||||
output : int
|
||||
8-bit MB-LBP feature descriptor.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Face Detection Based on Multi-Block LBP
|
||||
Representation. Lun Zhang, Rufeng Chu, Shiming Xiang, Shengcai Liao,
|
||||
Stan Z. Li
|
||||
http://www.cbsr.ia.ac.cn/users/scliao/papers/Zhang-ICB07-MBLBP.pdf
|
||||
"""
|
||||
|
||||
int_image = np.ascontiguousarray(int_image, dtype=np.float32)
|
||||
lbp_code = _multiblock_lbp(int_image, r, c, width, height)
|
||||
return lbp_code
|
||||
|
||||
|
||||
def draw_multiblock_lbp(image, r, c, width, height,
|
||||
lbp_code=0,
|
||||
color_greater_block=(1, 1, 1),
|
||||
color_less_block=(0, 0.69, 0.96),
|
||||
alpha=0.5
|
||||
):
|
||||
"""Multi-block local binary pattern visualization.
|
||||
|
||||
Blocks with higher sums are colored with alpha-blended white rectangles,
|
||||
whereas blocks with lower sums are colored alpha-blended cyan. Colors
|
||||
and the `alpha` parameter can be changed.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : ndarray of float or uint
|
||||
Image on which to visualize the pattern.
|
||||
r : int
|
||||
Row-coordinate of top left corner of a rectangle containing feature.
|
||||
c : int
|
||||
Column-coordinate of top left corner of a rectangle containing feature.
|
||||
width : int
|
||||
Width of one of 9 equal rectangles that will be used to compute
|
||||
a feature.
|
||||
height : int
|
||||
Height of one of 9 equal rectangles that will be used to compute
|
||||
a feature.
|
||||
lbp_code : int
|
||||
The descriptor of feature to visualize. If not provided, the
|
||||
descriptor with 0 value will be used.
|
||||
color_greater_block : tuple of 3 floats
|
||||
Floats specifying the color for the block that has greater
|
||||
intensity value. They should be in the range [0, 1].
|
||||
Corresponding values define (R, G, B) values. Default value
|
||||
is white (1, 1, 1).
|
||||
color_greater_block : tuple of 3 floats
|
||||
Floats specifying the color for the block that has greater intensity
|
||||
value. They should be in the range [0, 1]. Corresponding values define
|
||||
(R, G, B) values. Default value is cyan (0, 0.69, 0.96).
|
||||
alpha : float
|
||||
Value in the range [0, 1] that specifies opacity of visualization.
|
||||
1 - fully transparent, 0 - opaque.
|
||||
|
||||
Returns
|
||||
-------
|
||||
output : ndarray of float
|
||||
Image with MB-LBP visualization.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Face Detection Based on Multi-Block LBP
|
||||
Representation. Lun Zhang, Rufeng Chu, Shiming Xiang, Shengcai Liao,
|
||||
Stan Z. Li
|
||||
http://www.cbsr.ia.ac.cn/users/scliao/papers/Zhang-ICB07-MBLBP.pdf
|
||||
"""
|
||||
|
||||
# Default colors for regions.
|
||||
# White is for the blocks that are brighter.
|
||||
# Cyan is for the blocks that has less intensity.
|
||||
color_greater_block = np.asarray(color_greater_block, dtype=np.float64)
|
||||
color_less_block = np.asarray(color_less_block, dtype=np.float64)
|
||||
|
||||
# Copy array to avoid the changes to the original one.
|
||||
output = np.copy(image)
|
||||
|
||||
# As the visualization uses RGB color we need 3 bands.
|
||||
if len(image.shape) < 3:
|
||||
output = gray2rgb(image)
|
||||
|
||||
# Colors are specified in floats.
|
||||
output = img_as_float(output)
|
||||
|
||||
# Offsets of neighbour rectangles relative to central one.
|
||||
# It has order starting from top left and going clockwise.
|
||||
neighbour_rect_offsets = ((-1, -1), (-1, 0), (-1, 1),
|
||||
(0, 1), (1, 1), (1, 0),
|
||||
(1, -1), (0, -1))
|
||||
|
||||
# Pre-multiply the offsets with width and height.
|
||||
neighbour_rect_offsets = np.array(neighbour_rect_offsets)
|
||||
neighbour_rect_offsets[:, 0] *= height
|
||||
neighbour_rect_offsets[:, 1] *= width
|
||||
|
||||
# Top-left coordinates of central rectangle.
|
||||
central_rect_r = r + height
|
||||
central_rect_c = c + width
|
||||
|
||||
for element_num, offset in enumerate(neighbour_rect_offsets):
|
||||
|
||||
offset_r, offset_c = offset
|
||||
|
||||
curr_r = central_rect_r + offset_r
|
||||
curr_c = central_rect_c + offset_c
|
||||
|
||||
has_greater_value = lbp_code & (1 << (7-element_num))
|
||||
|
||||
# Mix-in the visualization colors.
|
||||
if has_greater_value:
|
||||
new_value = ((1-alpha) *
|
||||
output[curr_r:curr_r+height, curr_c:curr_c+width] +
|
||||
alpha * color_greater_block)
|
||||
output[curr_r:curr_r+height, curr_c:curr_c+width] = new_value
|
||||
else:
|
||||
new_value = ((1-alpha) *
|
||||
output[curr_r:curr_r+height, curr_c:curr_c+width] +
|
||||
alpha * color_less_block)
|
||||
output[curr_r:curr_r+height, curr_c:curr_c+width] = new_value
|
||||
|
||||
return output
|
174
venv/Lib/site-packages/skimage/feature/util.py
Normal file
174
venv/Lib/site-packages/skimage/feature/util.py
Normal file
|
@ -0,0 +1,174 @@
|
|||
import numpy as np
|
||||
|
||||
from ..util import img_as_float
|
||||
from .._shared.utils import check_nD
|
||||
|
||||
|
||||
class FeatureDetector(object):
|
||||
|
||||
def __init__(self):
|
||||
self.keypoints_ = np.array([])
|
||||
|
||||
def detect(self, image):
|
||||
"""Detect keypoints in image.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : 2D array
|
||||
Input image.
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class DescriptorExtractor(object):
|
||||
|
||||
def __init__(self):
|
||||
self.descriptors_ = np.array([])
|
||||
|
||||
def extract(self, image, keypoints):
|
||||
"""Extract feature descriptors in image for given keypoints.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image : 2D array
|
||||
Input image.
|
||||
keypoints : (N, 2) array
|
||||
Keypoint locations as ``(row, col)``.
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
def plot_matches(ax, image1, image2, keypoints1, keypoints2, matches,
|
||||
keypoints_color='k', matches_color=None, only_matches=False,
|
||||
alignment='horizontal'):
|
||||
"""Plot matched features.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
ax : matplotlib.axes.Axes
|
||||
Matches and image are drawn in this ax.
|
||||
image1 : (N, M [, 3]) array
|
||||
First grayscale or color image.
|
||||
image2 : (N, M [, 3]) array
|
||||
Second grayscale or color image.
|
||||
keypoints1 : (K1, 2) array
|
||||
First keypoint coordinates as ``(row, col)``.
|
||||
keypoints2 : (K2, 2) array
|
||||
Second keypoint coordinates as ``(row, col)``.
|
||||
matches : (Q, 2) array
|
||||
Indices of corresponding matches in first and second set of
|
||||
descriptors, where ``matches[:, 0]`` denote the indices in the first
|
||||
and ``matches[:, 1]`` the indices in the second set of descriptors.
|
||||
keypoints_color : matplotlib color, optional
|
||||
Color for keypoint locations.
|
||||
matches_color : matplotlib color, optional
|
||||
Color for lines which connect keypoint matches. By default the
|
||||
color is chosen randomly.
|
||||
only_matches : bool, optional
|
||||
Whether to only plot matches and not plot the keypoint locations.
|
||||
alignment : {'horizontal', 'vertical'}, optional
|
||||
Whether to show images side by side, ``'horizontal'``, or one above
|
||||
the other, ``'vertical'``.
|
||||
|
||||
"""
|
||||
|
||||
image1 = img_as_float(image1)
|
||||
image2 = img_as_float(image2)
|
||||
|
||||
new_shape1 = list(image1.shape)
|
||||
new_shape2 = list(image2.shape)
|
||||
|
||||
if image1.shape[0] < image2.shape[0]:
|
||||
new_shape1[0] = image2.shape[0]
|
||||
elif image1.shape[0] > image2.shape[0]:
|
||||
new_shape2[0] = image1.shape[0]
|
||||
|
||||
if image1.shape[1] < image2.shape[1]:
|
||||
new_shape1[1] = image2.shape[1]
|
||||
elif image1.shape[1] > image2.shape[1]:
|
||||
new_shape2[1] = image1.shape[1]
|
||||
|
||||
if new_shape1 != image1.shape:
|
||||
new_image1 = np.zeros(new_shape1, dtype=image1.dtype)
|
||||
new_image1[:image1.shape[0], :image1.shape[1]] = image1
|
||||
image1 = new_image1
|
||||
|
||||
if new_shape2 != image2.shape:
|
||||
new_image2 = np.zeros(new_shape2, dtype=image2.dtype)
|
||||
new_image2[:image2.shape[0], :image2.shape[1]] = image2
|
||||
image2 = new_image2
|
||||
|
||||
offset = np.array(image1.shape)
|
||||
if alignment == 'horizontal':
|
||||
image = np.concatenate([image1, image2], axis=1)
|
||||
offset[0] = 0
|
||||
elif alignment == 'vertical':
|
||||
image = np.concatenate([image1, image2], axis=0)
|
||||
offset[1] = 0
|
||||
else:
|
||||
mesg = ("plot_matches accepts either 'horizontal' or 'vertical' for "
|
||||
"alignment, but '{}' was given. See "
|
||||
"https://scikit-image.org/docs/dev/api/skimage.feature.html#skimage.feature.plot_matches " # noqa
|
||||
"for details.").format(alignment)
|
||||
raise ValueError(mesg)
|
||||
|
||||
if not only_matches:
|
||||
ax.scatter(keypoints1[:, 1], keypoints1[:, 0],
|
||||
facecolors='none', edgecolors=keypoints_color)
|
||||
ax.scatter(keypoints2[:, 1] + offset[1], keypoints2[:, 0] + offset[0],
|
||||
facecolors='none', edgecolors=keypoints_color)
|
||||
|
||||
ax.imshow(image, cmap='gray')
|
||||
ax.axis((0, image1.shape[1] + offset[1], image1.shape[0] + offset[0], 0))
|
||||
|
||||
for i in range(matches.shape[0]):
|
||||
idx1 = matches[i, 0]
|
||||
idx2 = matches[i, 1]
|
||||
|
||||
if matches_color is None:
|
||||
color = np.random.rand(3)
|
||||
else:
|
||||
color = matches_color
|
||||
|
||||
ax.plot((keypoints1[idx1, 1], keypoints2[idx2, 1] + offset[1]),
|
||||
(keypoints1[idx1, 0], keypoints2[idx2, 0] + offset[0]),
|
||||
'-', color=color)
|
||||
|
||||
|
||||
def _prepare_grayscale_input_2D(image):
|
||||
image = np.squeeze(image)
|
||||
check_nD(image, 2)
|
||||
return img_as_float(image)
|
||||
|
||||
|
||||
def _mask_border_keypoints(image_shape, keypoints, distance):
|
||||
"""Mask coordinates that are within certain distance from the image border.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
image_shape : (2, ) array_like
|
||||
Shape of the image as ``(rows, cols)``.
|
||||
keypoints : (N, 2) array
|
||||
Keypoint coordinates as ``(rows, cols)``.
|
||||
distance : int
|
||||
Image border distance.
|
||||
|
||||
Returns
|
||||
-------
|
||||
mask : (N, ) bool array
|
||||
Mask indicating if pixels are within the image (``True``) or in the
|
||||
border region of the image (``False``).
|
||||
|
||||
"""
|
||||
|
||||
rows = image_shape[0]
|
||||
cols = image_shape[1]
|
||||
|
||||
mask = (((distance - 1) < keypoints[:, 0])
|
||||
& (keypoints[:, 0] < (rows - distance + 1))
|
||||
& ((distance - 1) < keypoints[:, 1])
|
||||
& (keypoints[:, 1] < (cols - distance + 1)))
|
||||
|
||||
return mask
|
Loading…
Add table
Add a link
Reference in a new issue