8000 Speed up Hessian matrix computation by jni · Pull Request #2194 · scikit-image/scikit-image · GitHub
[go: up one dir, main page]

Skip to content

Speed up Hessian matrix computation #2194

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 13 commits into from
Jul 17, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion DEPENDS.txt
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
Build Requirements
------------------
* `Python >= 2.7 <http://python.org>`__
* `Numpy >= 1.7.2 <http://numpy.scipy.org/>`__
* `Numpy >= 1.11 <http://numpy.scipy.org/>`__
* `Cython >= 0.23 <http://www.cython.org/>`__
* `Six >=1.4 <https://pypi.python.org/pypi/six>`__
* `SciPy >=0.9 <http://scipy.org>`__
Expand Down
4 changes: 2 additions & 2 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
matplotlib>=1.3.1
numpy>=1.7.2
scipy>=0.9.0
numpy>=1.11
scipy>=0.10.0
six>=1.7.3
networkx>=1.8
pillow>=2.1.0
Expand Down
64 changes: 27 additions & 37 deletions skimage/feature/corner.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from itertools import combinations_with_replacement

import numpy as np
from scipy import ndimage as ndi
from scipy import stats
Expand Down Expand Up @@ -54,7 +56,7 @@ def structure_tensor(image, sigma=1, mode='constant', cval=0):
----------
image : ndarray
Input image.
sigma : float
sigma : float, optional
Standard deviation used for the Gaussian kernel, which is used as a
weighting function for the local summation of squared differences.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
Expand Down Expand Up @@ -136,46 +138,34 @@ def hessian_matrix(image, sigma=1, mode='constant', cval=0):
--------
>>> from skimage.feature import hessian_matrix
>>> square = np.zeros((5, 5))
>>> square[2, 2] = -1.0 / 1591.54943092
>>> square[2, 2] = 4
>>> Hxx, Hxy, Hyy = hessian_matrix(square, sigma=0.1)
>>> Hxx
>>> Hxy
array([[ 0., 0., 0., 0., 0.],
[ 0., 1., 0., -1., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., -1., 0., 1., 0.],
[ 0., 0., 0., 0., 0.]])

"""

image = _prepare_grayscale_input_2D(image)

# Window extent which covers > 99% of the normal distribution.
window_ext = max(1, np.ceil(3 * sigma))

ky, kx = np.mgrid[-window_ext:window_ext + 1, -window_ext:window_ext + 1]

# Second derivative Gaussian kernels.
gaussian_exp = np.exp(-(kx ** 2 + ky ** 2) / (2 * sigma ** 2))
kernel_xx = 1 / (2 * np.pi * sigma ** 4) * (kx ** 2 / sigma ** 2 - 1)
kernel_xx *= gaussian_exp
kernel_xy = 1 / (2 * np.pi * sigma ** 6) * (kx * ky)
kernel_xy *= gaussian_exp
kernel_yy = kernel_xx.transpose()
image = img_as_float(image)

# Remove small kernel values.
eps = np.finfo(kernel_xx.dtype).eps
kernel_xx[np.abs(kernel_xx) < eps * np.abs(kernel_xx).max()] = 0
kernel_xy[np.abs(kernel_xy) < eps * np.abs(kernel_xy).max()] = 0
kernel_yy[np.abs(kernel_yy) < eps * np.abs(kernel_yy).max()] = 0
gaussian_filtered = ndi.gaussian_filter(image, sigma=sigma,
mode=mode, cval=cval)

Hxx = ndi.convolve(image, kernel_xx, mode=mode, cval=cval)
Hxy = ndi.convolve(image, kernel_xy, mode=mode, cval=cval)
Hyy = ndi.convolve(image, kernel_yy, mode=mode, cval=cval)
gradients = np.gradient(gaussian_filtered)
axes = range(image.ndim)
H_elems = [np.gradient(gradients[ax0], axis=ax1)
for ax0, ax1 in combinations_with_replacement(axes, 2)]

return Hxx, Hxy, Hyy
if image.ndim == 2:
Copy link
Member
@soupault soupault Jul 17, 2016

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Any plans to add deprecation path for this?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

+1

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There's so much code that depends on this... I would like to leave deprecation for a later PR. I'll raise an issue when this gets merged.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@jni Fine with me.

# The legacy 2D code followed (x, y) convention, so we swap the axis
# order to maintain compatibility with old code
H_elems.reverse()
return H_elems


def hessian_matrix_det(image, sigma):
def hessian_matrix_det(image, sigma=1):
"""Computes the approximate Hessian Determinant over an image.

This method uses box filters over integral images to compute the
Expand All @@ -185,7 +175,7 @@ def hessian_matrix_det(image, sigma):
----------
image : array
The image over which to compute Hessian Determinant.
sigma : float
sigma : float, optional
Standard deviation used for the Gaussian kernel, used for the Hessian
matrix.

Expand Down Expand Up @@ -280,14 +270,14 @@ def hessian_matrix_eigvals(Hxx, Hxy, Hyy):
--------
>>> from skimage.feature import hessian_matrix, hessian_matrix_eigvals
>>> square = np.zeros((5, 5))
>>> square[2, 2] = -1 / 1591.54943092
>>> square[2, 2] = 4
>>> Hxx, Hxy, Hyy = hessian_matrix(square, sigma=0.1)
>>> hessian_matrix_eigvals(Hxx, Hxy, Hyy)[0]
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.]])
array([[ 0., 0., 2., 0., 0.],
[ 0., 1., 0., 1., 0.],
[ 2., 0., -2., 0., 2.],
[ 0., 1., 0., 1., 0.],
[ 0., 0., 2., 0., 0.]])

"""

Expand Down
71 changes: 43 additions & 28 deletions skimage/feature/tests/test_corner.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,23 +40,38 @@ def test_structure_tensor():

def test_hessian_matrix():
square = np.zeros((5, 5))
square[2, 2] = 1
square[2, 2] = 4
Hxx, Hxy, Hyy = hessian_matrix(square, sigma=0.1)
assert_almost_equal(Hxx, np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, -1591.549431, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]))
assert_almost_equal(Hxy, np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]))
assert_almost_equal(Hyy, np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, -1591.549431, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]))
assert_almost_equal(Hxx, np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[2, 0, -2, 0, 2],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]))

assert_almost_equal(Hxy, np.array([[0, 0, 0, 0, 0],
[0, 1, 0, -1, 0],
[0, 0, 0, 0, 0],
[0, -1, 0, 1, 0],
[0, 0, 0, 0, 0]]))

assert_almost_equal(Hyy, np.array([[0, 0, 2, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, -2, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 2, 0, 0]]))


def test_hessian_matrix_3d():
cube = np.zeros((5, 5, 5))
cube[2, 2, 2] = 4
Hs = hessian_matrix(cube, sigma=0.1)
assert len(Hs) == 6, ("incorrect number of Hessian images (%i) for 3D" %
len(Hs))
assert_almost_equal(Hs[2][:, 2, :], np.array([[0, 0, 0, 0, 0],
[0, 1, 0, -1, 0],
[0, 0, 0, 0, 0],
[0, -1, 0, 1, 0],
[0, 0, 0, 0, 0]]))


def test_structure_tensor_eigvals():
Expand All @@ -78,19 +93,19 @@ def test_structure_tensor_eigvals():

def test_hessian_matrix_eigvals():
square = np.zeros((5, 5))
square[2, 2] = 1
square[2, 2] = 4
Hxx, Hxy, Hyy = hessian_matrix(square, sigma=0.1)
l1, l2 = hessian_matrix_eigvals(Hxx, Hxy, Hyy)
assert_almost_equal(l1, np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, -1591.549431, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]))
assert_almost_equal(l2, np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, -1591.549431, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]))
assert_almost_equal(l1, np.array([[0, 0, 2, 0, 0],
[0, 1, 0, 1, 0],
[2, 0, -2, 0, 2],
[0, 1, 0, 1, 0],
[0, 0, 2, 0, 0]]))
assert_almost_equal(l2, np.array([[0, 0, 0, 0, 0],
[0, -1, 0, -1, 0],
[0, 0, -2, 0, 0],
[0, -1, 0, -1, 0],
[0, 0, 0, 0, 0]]))


@test_parallel()
Expand Down Expand Up @@ -262,7 +277,7 @@ def test_num_peaks():
img_corners = corner_harris(rgb2gray(data.astronaut()))

for i in range(20):
n = np.random.random_integers(20)
n = np.random.randint(1, 21)
results = peak_local_max(img_corners,
min_distance=10, threshold_rel=0, num_peaks=n)
assert (results.shape[0] == n)
Expand Down
4 changes: 2 additions & 2 deletions skimage/filters/tests/test_frangi.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@ def test_null_matrix():


def test_energy_decrease():
a = np.zeros((3, 3))
a[1, 1] = 1.
a = np.zeros((5, 5))
a[2, 2] = 1.
assert frangi(a).std() < a.std()
assert frangi(a, black_ridges=False).std() < a.std()
assert hessian(a).std() > a.std()
Expand Down
30 changes: 17 additions & 13 deletions skimage/segmentation/tests/test_random_walker.py
EED3
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,11 @@
from skimage.transform import resize
from skimage._shared._warnings import expected_warnings

# older versions of scipy raise a warning with new NumPy because they use
# numpy.rank() instead of arr.ndim or numpy.linalg.matrix_rank.
SCIPY_EXPECTED = 'numpy.linalg.matrix_rank|\A\Z'
PYAMG_EXPECTED_WARNING = 'pyamg|\A\Z'
PYAMG_SCIPY_EXPECTED = SCIPY_EXPECTED + '|' + PYAMG_EXPECTED_WARNING


def make_2d_syntheticdata(lx, ly=None):
Expand Down Expand Up @@ -77,11 +81,11 @@ def test_2d_cg():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
with expected_warnings(['"cg" mode']):
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
labels_cg = random_walker(data, labels, beta=90, mode='cg')
assert (labels_cg[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
with expected_warnings(['"cg" mode']):
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
full_prob = random_walker(data, labels, beta=90, mode='cg',
return_full_prob=True)
assert (full_prob[1, 25:45, 40:60] >=
Expand All @@ -94,7 +98,7 @@ def test_2d_cg_mg():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
expected = 'scipy.sparse.sparsetools|%s' % PYAMG_EXPECTED_WARNING
expected = 'scipy.sparse.sparsetools|%s' % PYAMG_SCIPY_EXPECTED
with expected_warnings([expected]):
labels_cg_mg = random_walker(data, labels, beta=90, mode='cg_mg')
assert (labels_cg_mg[25:45, 40:60] == 2).all()
Expand All @@ -114,7 +118,7 @@ def test_types():
data, labels = make_2d_syntheticdata(lx, ly)
data = 255 * (data - data.min()) // (data.max() - data.min())
data = data.astype(np.uint8)
with expected_warnings([PYAMG_EXPECTED_WARNING]):
with expected_warnings([PYAMG_SCIPY_EXPECTED]):
labels_cg_mg = random_walker(data, labels, beta=90, mode='cg_mg')
assert (labels_cg_mg[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
Expand Down Expand Up @@ -148,7 +152,7 @@ def test_3d():
n = 30
lx, ly, lz = n, n, n
data, labels = make_3d_syntheticdata(lx, ly, lz)
with expected_warnings(['"cg" mode']):
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
labels = random_walker(data, labels, mode='cg')
assert (labels.reshape(data.shape)[13:17, 13:17, 13:17] == 2).all()
assert data.shape == labels.shape
Expand All @@ -162,7 +166,7 @@ def test_3d_inactive():
old_labels = np.copy(labels)
labels[5:25, 26:29, 26:29] = -1
after_labels = np.copy(labels)
with expected_warnings(['"cg" mode|CObject type']):
with expected_warnings(['"cg" mode|CObject type' + '|' + SCIPY_EXPECTED]):
labels = random_walker(data, labels, mode='cg')
assert (labels.reshape(data.shape)[13:17, 13:17, 13:17] == 2).all()
assert data.shape == labels.shape
Expand All @@ -173,11 +177,11 @@ def test_multispectral_2d():
lx, ly = 70, 100
data, labels = make_2d_syntheticdata(lx, ly)
data = data[..., np.newaxis].repeat(2, axis=-1) # Expect identical output
with expected_warnings(['"cg" mode']):
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
multi_labels = random_walker(data, labels, mode='cg',
multichannel=True)
assert data[..., 0].shape == labels.shape
with expected_warnings(['"cg" mode']):
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
single_labels = random_walker(data[..., 0], labels, mode='cg')
assert (multi_labels.reshape(labels.shape)[25:45, 40:60] == 2).all()
assert data[..., 0].shape == labels.shape
Expand All @@ -189,11 +193,11 @@ def test_multispectral_3d():
lx, ly, lz = n, n, n
data, labels = make_3d_syntheticdata(lx, ly, lz)
data = data[..., np.newaxis].repeat(2, axis=-1) # Expect identical output
with expected_warnings(['"cg" mode']):
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
multi_labels = random_walker(data, labels, mode='cg',
multichannel=True)
assert data[..., 0].shape == labels.shape
with expected_warnings(['"cg" mode']):
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
single_labels = random_walker(data[..., 0], labels, mode='cg')
assert (multi_labels.reshape(labels.shape)[13:17, 13:17, 13:17] == 2).all()
assert (single_labels.reshape(labels.shape)[13:17, 13:17, 13:17] == 2).all()
Expand All @@ -220,7 +224,7 @@ def test_spacing_0():
lz // 4 - small_l // 8] = 2

# Test with `spacing` kwarg
with expected_warnings(['"cg" mode']):
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
labels_aniso = random_walker(data_aniso, labels_aniso, mode='cg',
spacing=(1., 1., 0.5))

Expand Down Expand Up @@ -248,7 +252,7 @@ def test_spacing_1():

# Test with `spacing` kwarg
# First, anisotropic along Y
with expected_warnings(['"cg" mode']):
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
labels_aniso = random_walker(data_aniso, labels_aniso, mode='cg',
spacing=(1., 2., 1.))
assert (labels_aniso[13:17, 26:34, 13:17] == 2).all()
Expand All @@ -268,7 +272,7 @@ def test_spacing_1():
lz // 2 - small_l // 4] = 2

# Anisotropic along X
with expected_warnings(['"cg" mode']):
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
labels_aniso2 = random_walker(data_aniso,
labels_aniso2,
mode='cg', spacing=(2., 1., 1.))
Expand Down
2 changes: 1 addition & 1 deletion tools/appveyor/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
# fix the versions of binary packages to force the use of the whl
# of the rackspace folder instead of trying to install from PyPI
# wheels are preferred for a given version
numpy==1.8.1
numpy>=1.11
scipy==0.14.0
cython>=0.21
matplotlib==1.4.2
Expand Down
4 changes: 3 additions & 1 deletion tools/travis_before_install.sh
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ export DISPLAY=:99.0
export PYTHONWARNINGS="d,all:::skimage"
export TEST_ARGS="--exe --ignore-files=^_test -v --with-doctest \
--ignore-files=^setup.py$"
WHEELBINARIES="matplotlib numpy scipy pillow cython"
WHEELBINARIES="matplotlib scipy pillow cython"

retry () {
# https://gist.github.com/fungusakafungus/1026804
Expand Down Expand Up @@ -47,6 +47,8 @@ source ~/venv/bin/activate

pip install --upgrade pip
pip install --retries 3 -q wheel flake8 codecov nose
# install numpy from PyPI instead of our wheelhouse
pip install --retries 3 -q wheel numpy

# install wheels
for requirement in $WHEELBINARIES; do
Expand Down
0