Showing 12 of 72 files from the diff.
Other files ignored by Codecov
paper/paper.md has changed.
.coveragerc has changed.
requirements.txt was deleted.
paper/paper.bib has changed.
bin/README.rst has changed.
setup.cfg has changed.
codecov.yml has changed.
pytest.ini has changed.
README.md is new.
example.py has changed.
setup.py has changed.
.travis.yml was deleted.
bin/test has changed.

@@ -1,24 +1,61 @@
Loading
1 -
from collections import namedtuple
1 +
import sys
2 +
import dask
3 +
import dask.array as da
4 +
from dask.diagnostics import ProgressBar
5 +
import warnings
2 6
import numpy as np
7 +
from numba import njit, prange
8 +
from edt import edt
3 9
import operator as op
10 +
from tqdm import tqdm
4 11
import scipy.ndimage as spim
5 12
import scipy.spatial as sptl
6 -
import warnings
13 +
from collections import namedtuple
7 14
from scipy.signal import fftconvolve
8 -
from tqdm import tqdm
9 -
from numba import jit
10 -
from skimage.segmentation import clear_border
15 +
from skimage.segmentation import clear_border, watershed
11 16
from skimage.morphology import ball, disk, square, cube, diamond, octahedron
12 -
from skimage.morphology import reconstruction, watershed
17 +
from skimage.morphology import reconstruction
13 18
from porespy.tools import randomize_colors, fftmorphology
14 19
from porespy.tools import get_border, extend_slice, extract_subsection
15 -
from porespy.tools import ps_disk, ps_ball
16 20
from porespy.tools import _create_alias_map
21 +
from porespy.tools import ps_disk, ps_ball
22 +
23 +
24 +
def apply_padded(im, pad_width, func, pad_val=1, **kwargs):
25 +
    r"""
26 +
    Applies padding to an image before sending to ``func``, then extracts the
27 +
    result corresponding to the original image shape.
28 +
29 +
    Parameters
30 +
    ----------
31 +
    im : ND-image
32 +
        The image to which ``func`` should be applied
33 +
    pad_width : int or list of ints
34 +
        The amount of padding to apply to each axis.  Refer to ``numpy.pad``
35 +
        documentation for more details.
36 +
    pad_val : scalar
37 +
        The value to place into the padded voxels.  The default is 1 (or
38 +
        ``True``) which extends the pore space.
39 +
    func : function handle
40 +
        The function to apply to the padded image
41 +
    kwargs : additional keyword arguments
42 +
        All additional keyword arguments are collected and passed to ``func``.
43 +
44 +
    Notes
45 +
    -----
46 +
    A use case for this is when using ``skimage.morphology.skeletonize_3d``
47 +
    to ensure that the skeleton extends beyond the edges of the image.
48 +
    """
49 +
    padded = np.pad(im, pad_width=pad_width,
50 +
                    mode='constant', constant_values=pad_val)
51 +
    temp = func(padded, **kwargs)
52 +
    result = extract_subsection(im=temp, shape=im.shape)
53 +
    return result
17 54
18 55
19 56
def trim_small_clusters(im, size=1):
20 57
    r"""
21 -
    Remove isolated voxels or clusters smaller than a given size
58 +
    Remove isolated voxels or clusters of a given size or smaller
22 59
23 60
    Parameters
24 61
    ----------
@@ -36,16 +73,16 @@
Loading
36 73
        ``size`` removed.
37 74
38 75
    """
39 -
    if im.dims == 2:
76 +
    if im.ndim == 2:
40 77
        strel = disk(1)
41 -
    elif im.ndims == 3:
78 +
    elif im.ndim == 3:
42 79
        strel = ball(1)
43 80
    else:
44 -
        raise Exception('Only 2D or 3D images are accepted')
81 +
        raise Exception("Only 2D or 3D images are accepted")
45 82
    filtered_array = np.copy(im)
46 83
    labels, N = spim.label(filtered_array, structure=strel)
47 84
    id_sizes = np.array(spim.sum(im, labels, range(N + 1)))
48 -
    area_mask = (id_sizes <= size)
85 +
    area_mask = id_sizes <= size
49 86
    filtered_array[area_mask[labels]] = 0
50 87
    return filtered_array
51 88
@@ -61,12 +98,12 @@
Loading
61 98
    """
62 99
    A = im
63 100
    B = np.swapaxes(A, axis, -1)
64 -
    updown = np.empty((*B.shape[:-1], B.shape[-1]+1), B.dtype)
101 +
    updown = np.empty((*B.shape[:-1], B.shape[-1] + 1), B.dtype)
65 102
    updown[..., 0], updown[..., -1] = -1, -1
66 103
    np.subtract(B[..., 1:], B[..., :-1], out=updown[..., 1:-1])
67 104
    chnidx = np.where(updown)
68 105
    chng = updown[chnidx]
69 -
    pkidx, = np.where((chng[:-1] > 0) & (chng[1:] < 0) | (chnidx[-1][:-1] == 0))
106 +
    (pkidx,) = np.where((chng[:-1] > 0) & (chng[1:] < 0) | (chnidx[-1][:-1] == 0))
70 107
    pkidx = (*map(op.itemgetter(pkidx), chnidx),)
71 108
    out = np.zeros_like(A)
72 109
    aux = out.swapaxes(axis, -1)
@@ -76,7 +113,7 @@
Loading
76 113
    return result
77 114
78 115
79 -
def distance_transform_lin(im, axis=0, mode='both'):
116 +
def distance_transform_lin(im, axis=0, mode="both"):
80 117
    r"""
81 118
    Replaces each void voxel with the linear distance to the nearest solid
82 119
    voxel along the specified axis.
@@ -110,33 +147,38 @@
Loading
110 147
        the nearest background along the specified axis.
111 148
    """
112 149
    if im.ndim != im.squeeze().ndim:
113 -
        warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
114 -
                      ' Reduce dimensionality with np.squeeze(im) to avoid' +
115 -
                      ' unexpected behavior.')
116 -
    if mode in ['backward', 'reverse']:
150 +
        warnings.warn(
151 +
            "Input image conains a singleton axis:"
152 +
            + str(im.shape)
153 +
            + " Reduce dimensionality with np.squeeze(im) to avoid"
154 +
            + " unexpected behavior."
155 +
        )
156 +
    if mode in ["backward", "reverse"]:
117 157
        im = np.flip(im, axis)
118 -
        im = distance_transform_lin(im=im, axis=axis, mode='forward')
158 +
        im = distance_transform_lin(im=im, axis=axis, mode="forward")
119 159
        im = np.flip(im, axis)
120 160
        return im
121 -
    elif mode in ['both']:
122 -
        im_f = distance_transform_lin(im=im, axis=axis, mode='forward')
123 -
        im_b = distance_transform_lin(im=im, axis=axis, mode='backward')
161 +
    elif mode in ["both"]:
162 +
        im_f = distance_transform_lin(im=im, axis=axis, mode="forward")
163 +
        im_b = distance_transform_lin(im=im, axis=axis, mode="backward")
124 164
        return np.minimum(im_f, im_b)
125 165
    else:
126 166
        b = np.cumsum(im > 0, axis=axis)
127 -
        c = np.diff(b*(im == 0), axis=axis)
167 +
        c = np.diff(b * (im == 0), axis=axis)
128 168
        d = np.minimum.accumulate(c, axis=axis)
129 169
        if im.ndim == 1:
130 -
            e = np.pad(d, pad_width=[1, 0], mode='constant', constant_values=0)
170 +
            e = np.pad(d, pad_width=[1, 0], mode="constant", constant_values=0)
131 171
        elif im.ndim == 2:
132 172
            ax = [[[1, 0], [0, 0]], [[0, 0], [1, 0]]]
133 -
            e = np.pad(d, pad_width=ax[axis], mode='constant', constant_values=0)
173 +
            e = np.pad(d, pad_width=ax[axis], mode="constant", constant_values=0)
134 174
        elif im.ndim == 3:
135 -
            ax = [[[1, 0], [0, 0], [0, 0]],
136 -
                  [[0, 0], [1, 0], [0, 0]],
137 -
                  [[0, 0], [0, 0], [1, 0]]]
138 -
            e = np.pad(d, pad_width=ax[axis], mode='constant', constant_values=0)
139 -
        f = im*(b + e)
175 +
            ax = [
176 +
                [[1, 0], [0, 0], [0, 0]],
177 +
                [[0, 0], [1, 0], [0, 0]],
178 +
                [[0, 0], [0, 0], [1, 0]],
179 +
            ]
180 +
            e = np.pad(d, pad_width=ax[axis], mode="constant", constant_values=0)
181 +
        f = im * (b + e)
140 182
        return f
141 183
142 184
@@ -205,36 +247,36 @@
Loading
205 247
    using marker-based watershed segmenation".  Physical Review E. (2017)
206 248
207 249
    """
208 -
    tup = namedtuple('results', field_names=['im', 'dt', 'peaks', 'regions'])
209 -
    print('_'*60)
250 +
    tup = namedtuple("results", field_names=["im", "dt", "peaks", "regions"])
251 +
    print("-" * 60)
210 252
    print("Beginning SNOW Algorithm")
211 253
    im_shape = np.array(im.shape)
212 254
    if im.dtype is not bool:
213 -
        print('Converting supplied image (im) to boolean')
255 +
        print("Converting supplied image (im) to boolean")
214 256
        im = im > 0
215 257
    if dt is None:
216 -
        print('Peforming Distance Transform')
258 +
        print("Peforming Distance Transform")
217 259
        if np.any(im_shape == 1):
218 260
            ax = np.where(im_shape == 1)[0][0]
219 -
            dt = spim.distance_transform_edt(input=im.squeeze())
261 +
            dt = edt(im.squeeze())
220 262
            dt = np.expand_dims(dt, ax)
221 263
        else:
222 -
            dt = spim.distance_transform_edt(input=im)
264 +
            dt = edt(im)
223 265
224 266
    tup.im = im
225 267
    tup.dt = dt
226 268
227 269
    if sigma > 0:
228 -
        print('Applying Gaussian blur with sigma =', str(sigma))
270 +
        print("Applying Gaussian blur with sigma =", str(sigma))
229 271
        dt = spim.gaussian_filter(input=dt, sigma=sigma)
230 272
231 273
    peaks = find_peaks(dt=dt, r_max=r_max)
232 -
    print('Initial number of peaks: ', spim.label(peaks)[1])
274 +
    print("Initial number of peaks: ", spim.label(peaks)[1])
233 275
    peaks = trim_saddle_points(peaks=peaks, dt=dt, max_iters=500)
234 -
    print('Peaks after trimming saddle points: ', spim.label(peaks)[1])
276 +
    print("Peaks after trimming saddle points: ", spim.label(peaks)[1])
235 277
    peaks = trim_nearby_peaks(peaks=peaks, dt=dt)
236 278
    peaks, N = spim.label(peaks)
237 -
    print('Peaks after trimming nearby peaks: ', N)
279 +
    print("Peaks after trimming nearby peaks: ", N)
238 280
    tup.peaks = peaks
239 281
    if mask:
240 282
        mask_solid = im > 0
@@ -333,30 +375,32 @@
Loading
333 375
    combined_dt = 0
334 376
    combined_region = 0
335 377
    num = [0]
336 -
    for i in phases_num:
337 -
        print('_' * 60)
378 +
    for i, j in enumerate(phases_num):
379 +
        print("_" * 60)
338 380
        if alias is None:
339 -
            print('Processing Phase {}'.format(i))
340 -
        else:
341 -
            print('Processing Phase {}'.format(al[i]))
342 -
        phase_snow = snow_partitioning(im == i,
343 -
                                       dt=None, r_max=r_max, sigma=sigma,
344 -
                                       return_all=return_all, mask=mask,
345 -
                                       randomize=randomize)
346 -
        if len(phases_num) == 1 and phases_num == 1:
347 -
            combined_dt = phase_snow.dt
348 -
            combined_region = phase_snow.regions
381 +
            print("Processing Phase {}".format(j))
349 382
        else:
350 -
            combined_dt += phase_snow.dt
351 -
            phase_snow.regions *= phase_snow.im
352 -
            phase_snow.regions += num[i - 1]
353 -
            phase_ws = phase_snow.regions * phase_snow.im
354 -
            phase_ws[phase_ws == num[i - 1]] = 0
355 -
            combined_region += phase_ws
383 +
            print("Processing Phase {}".format(al[j]))
384 +
        phase_snow = snow_partitioning(
385 +
            im == j,
386 +
            dt=None,
387 +
            r_max=r_max,
388 +
            sigma=sigma,
389 +
            return_all=return_all,
390 +
            mask=mask,
391 +
            randomize=randomize,
392 +
        )
393 +
        combined_dt += phase_snow.dt
394 +
        phase_snow.regions *= phase_snow.im
395 +
        phase_snow.regions += num[i]
396 +
        phase_ws = phase_snow.regions * phase_snow.im
397 +
        phase_ws[phase_ws == num[i]] = 0
398 +
        combined_region += phase_ws
356 399
        num.append(np.amax(combined_region))
357 400
    if return_all:
358 -
        tup = namedtuple('results', field_names=['im', 'dt', 'phase_max_label',
359 -
                                                 'regions'])
401 +
        tup = namedtuple(
402 +
            "results", field_names=["im", "dt", "phase_max_label", "regions"]
403 +
        )
360 404
        tup.im = im
361 405
        tup.dt = combined_dt
362 406
        tup.phase_max_label = num[1:]
@@ -366,24 +410,22 @@
Loading
366 410
        return combined_region
367 411
368 412
369 -
def find_peaks(dt, r_max=4, footprint=None):
413 +
def find_peaks(dt, r_max=4, footprint=None, **kwargs):
370 414
    r"""
371 -
    Returns all local maxima in the distance transform
415 +
    Finds local maxima in the distance transform
372 416
373 417
    Parameters
374 418
    ----------
375 419
    dt : ND-array
376 420
        The distance transform of the pore space.  This may be calculated and
377 421
        filtered using any means desired.
378 -
379 422
    r_max : scalar
380 423
        The size of the structuring element used in the maximum filter.  This
381 424
        controls the localness of any maxima. The default is 4 voxels.
382 -
383 425
    footprint : ND-array
384 426
        Specifies the shape of the structuring element used to define the
385 -
        neighborhood when looking for peaks.  If none is specified then a
386 -
        spherical shape is used (or circular in 2D).
427 +
        neighborhood when looking for peaks.  If ``None`` (the default) is
428 +
        specified then a spherical shape is used (or circular in 2D).
387 429
388 430
    Returns
389 431
    -------
@@ -399,14 +441,15 @@
Loading
399 441
    ``peaks = peak_local_max(image=dt, min_distance=r, exclude_border=0,
400 442
    indices=False)``
401 443
402 -
    This automatically uses a square structuring element which is significantly
403 -
    faster than using a circular or spherical element.
444 +
    The *skimage* function automatically uses a square structuring element
445 +
    which is significantly faster than using a circular or spherical element.
404 446
    """
405 447
    im = dt > 0
406 448
    if im.ndim != im.squeeze().ndim:
407 -
        warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
408 -
                      ' Reduce dimensionality with np.squeeze(im) to avoid' +
409 -
                      ' unexpected behavior.')
449 +
        warnings.warn("Input image conains a singleton axis:"
450 +
                      + str(im.shape)
451 +
                      + " Reduce dimensionality with np.squeeze(im) to avoid"
452 +
                      + " unexpected behavior.")
410 453
    if footprint is None:
411 454
        if im.ndim == 2:
412 455
            footprint = disk
@@ -414,8 +457,17 @@
Loading
414 457
            footprint = ball
415 458
        else:
416 459
            raise Exception("only 2-d and 3-d images are supported")
417 -
    mx = spim.maximum_filter(dt + 2*(~im), footprint=footprint(r_max))
418 -
    peaks = (dt == mx)*im
460 +
    parallel = kwargs.pop('parallel', False)
461 +
    cores = kwargs.pop('cores', None)
462 +
    divs = kwargs.pop('cores', 2)
463 +
    if parallel:
464 +
        overlap = max(footprint(r_max).shape)
465 +
        peaks = chunked_func(func=find_peaks, overlap=overlap,
466 +
                             im_arg='dt', dt=dt, footprint=footprint,
467 +
                             cores=cores, divs=divs)
468 +
    else:
469 +
        mx = spim.maximum_filter(dt + 2 * (~im), footprint=footprint(r_max))
470 +
        peaks = (dt == mx) * im
419 471
    return peaks
420 472
421 473
@@ -427,14 +479,14 @@
Loading
427 479
    Parameters
428 480
    ----------
429 481
    peaks : ND-image
430 -
        An image containing True values indicating peaks in the distance
482 +
        An image containing ``True`` values indicating peaks in the distance
431 483
        transform
432 484
433 485
    Returns
434 486
    -------
435 487
    image : ND-array
436 488
        An array with the same number of isolated peaks as the original image,
437 -
        but fewer total voxels.
489 +
        but fewer total ``True`` voxels.
438 490
439 491
    Notes
440 492
    -----
@@ -447,9 +499,9 @@
Loading
447 499
    else:
448 500
        strel = cube
449 501
    markers, N = spim.label(input=peaks, structure=strel(3))
450 -
    inds = spim.measurements.center_of_mass(input=peaks,
451 -
                                            labels=markers,
452 -
                                            index=np.arange(1, N+1))
502 +
    inds = spim.measurements.center_of_mass(
503 +
        input=peaks, labels=markers, index=np.arange(1, N + 1)
504 +
    )
453 505
    inds = np.floor(inds).astype(int)
454 506
    # Centroid may not be on old pixel, so create a new peaks image
455 507
    peaks_new = np.zeros_like(peaks, dtype=bool)
@@ -457,7 +509,7 @@
Loading
457 509
    return peaks_new
458 510
459 511
460 -
def trim_saddle_points(peaks, dt, max_iters=10):
512 +
def trim_saddle_points(peaks, dt, max_iters=10, verbose=1):
461 513
    r"""
462 514
    Removes peaks that were mistakenly identified because they lied on a
463 515
    saddle or ridge in the distance transform that was not actually a true
@@ -499,26 +551,27 @@
Loading
499 551
    slices = spim.find_objects(labels)
500 552
    for i in range(N):
501 553
        s = extend_slice(s=slices[i], shape=peaks.shape, pad=10)
502 -
        peaks_i = labels[s] == i+1
554 +
        peaks_i = labels[s] == i + 1
503 555
        dt_i = dt[s]
504 556
        im_i = dt_i > 0
505 557
        iters = 0
506 558
        peaks_dil = np.copy(peaks_i)
507 559
        while iters < max_iters:
508 560
            iters += 1
509 -
            peaks_dil = spim.binary_dilation(input=peaks_dil,
510 -
                                             structure=cube(3))
511 -
            peaks_max = peaks_dil*np.amax(dt_i*peaks_dil)
512 -
            peaks_extended = (peaks_max == dt_i)*im_i
561 +
            peaks_dil = spim.binary_dilation(input=peaks_dil, structure=cube(3))
562 +
            peaks_max = peaks_dil * np.amax(dt_i * peaks_dil)
563 +
            peaks_extended = (peaks_max == dt_i) * im_i
513 564
            if np.all(peaks_extended == peaks_i):
514 565
                break  # Found a true peak
515 -
            elif np.sum(peaks_extended*peaks_i) == 0:
566 +
            elif np.sum(peaks_extended * peaks_i) == 0:
516 567
                peaks_i = False
517 568
                break  # Found a saddle point
518 569
        peaks[s] = peaks_i
519 -
        if iters >= max_iters:
520 -
            print('Maximum number of iterations reached, consider'
521 -
                  + 'running again with a larger value of max_iters')
570 +
        if iters >= max_iters and verbose:
571 +
            print(
572 +
                "Maximum number of iterations reached, consider "
573 +
                + "running again with a larger value of max_iters"
574 +
            )
522 575
    return peaks
523 576
524 577
@@ -561,7 +614,7 @@
Loading
561 614
        from skimage.morphology import cube
562 615
    peaks, N = spim.label(peaks, structure=cube(3))
563 616
    crds = spim.measurements.center_of_mass(peaks, labels=peaks,
564 -
                                            index=np.arange(1, N+1))
617 +
                                            index=np.arange(1, N + 1))
565 618
    crds = np.vstack(crds).astype(int)  # Convert to numpy array of ints
566 619
    # Get distance between each peak as a distance map
567 620
    tree = sptl.cKDTree(data=crds)
@@ -583,7 +636,7 @@
Loading
583 636
    slices = spim.find_objects(input=peaks)
584 637
    for s in drop_peaks:
585 638
        peaks[slices[s]] = 0
586 -
    return (peaks > 0)
639 +
    return peaks > 0
587 640
588 641
589 642
def find_disconnected_voxels(im, conn=None):
@@ -597,11 +650,10 @@
Loading
597 650
    im : ND-image
598 651
        A Boolean image, with True values indicating the phase for which
599 652
        disconnected voxels are sought.
600 -
601 653
    conn : int
602 654
        For 2D the options are 4 and 8 for square and diagonal neighbors, while
603 655
        for the 3D the options are 6 and 26, similarily for square and diagonal
604 -
        neighbors.  The default is max
656 +
        neighbors.  The default is the maximum option.
605 657
606 658
    Returns
607 659
    -------
@@ -618,25 +670,32 @@
Loading
618 670
619 671
    """
620 672
    if im.ndim != im.squeeze().ndim:
621 -
        warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
622 -
                      ' Reduce dimensionality with np.squeeze(im) to avoid' +
623 -
                      ' unexpected behavior.')
673 +
        warnings.warn(
674 +
            "Input image conains a singleton axis:"
675 +
            + str(im.shape)
676 +
            + " Reduce dimensionality with np.squeeze(im) to avoid"
677 +
            + " unexpected behavior."
678 +
        )
624 679
    if im.ndim == 2:
625 680
        if conn == 4:
626 681
            strel = disk(1)
627 682
        elif conn in [None, 8]:
628 683
            strel = square(3)
684 +
        else:
685 +
            raise Exception("Received conn is not valid")
629 686
    elif im.ndim == 3:
630 687
        if conn == 6:
631 688
            strel = ball(1)
632 689
        elif conn in [None, 26]:
633 690
            strel = cube(3)
691 +
        else:
692 +
            raise Exception("Received conn is not valid")
634 693
    labels, N = spim.label(input=im, structure=strel)
635 694
    holes = clear_border(labels=labels) > 0
636 695
    return holes
637 696
638 697
639 -
def fill_blind_pores(im):
698 +
def fill_blind_pores(im, conn=None):
640 699
    r"""
641 700
    Fills all pores that are not connected to the edges of the image.
642 701
@@ -649,6 +708,10 @@
Loading
649 708
    -------
650 709
    image : ND-array
651 710
        A version of ``im`` but with all the disconnected pores removed.
711 +
    conn : int
712 +
        For 2D the options are 4 and 8 for square and diagonal neighbors, while
713 +
        for the 3D the options are 6 and 26, similarily for square and diagonal
714 +
        neighbors.  The default is the maximum option.
652 715
653 716
    See Also
654 717
    --------
@@ -656,12 +719,12 @@
Loading
656 719
657 720
    """
658 721
    im = np.copy(im)
659 -
    holes = find_disconnected_voxels(im)
722 +
    holes = find_disconnected_voxels(im, conn=conn)
660 723
    im[holes] = False
661 724
    return im
662 725
663 726
664 -
def trim_floating_solid(im):
727 +
def trim_floating_solid(im, conn=None):
665 728
    r"""
666 729
    Removes all solid that that is not attached to the edges of the image.
667 730
@@ -669,6 +732,10 @@
Loading
669 732
    ----------
670 733
    im : ND-array
671 734
        The image of the porous material
735 +
    conn : int
736 +
        For 2D the options are 4 and 8 for square and diagonal neighbors, while
737 +
        for the 3D the options are 6 and 26, similarily for square and diagonal
738 +
        neighbors.  The default is the maximum option.
672 739
673 740
    Returns
674 741
    -------
@@ -681,12 +748,13 @@
Loading
681 748
682 749
    """
683 750
    im = np.copy(im)
684 -
    holes = find_disconnected_voxels(~im)
751 +
    holes = find_disconnected_voxels(~im, conn=conn)
685 752
    im[holes] = True
686 753
    return im
687 754
688 755
689 -
def trim_nonpercolating_paths(im, inlet_axis=0, outlet_axis=0):
756 +
def trim_nonpercolating_paths(im, inlet_axis=0, outlet_axis=0,
757 +
                              inlets=None, outlets=None):
690 758
    r"""
691 759
    Removes all nonpercolating paths between specified edges
692 760
@@ -699,16 +767,20 @@
Loading
699 767
    im : ND-array
700 768
        The image of the porous material with ```True`` values indicating the
701 769
        phase of interest
702 -
703 770
    inlet_axis : int
704 771
        Inlet axis of boundary condition. For three dimensional image the
705 772
        number ranges from 0 to 2. For two dimensional image the range is
706 -
        between 0 to 1.
707 -
773 +
        between 0 to 1. If ``inlets`` is given then this argument is ignored.
708 774
    outlet_axis : int
709 775
        Outlet axis of boundary condition. For three dimensional image the
710 776
        number ranges from 0 to 2. For two dimensional image the range is
711 -
        between 0 to 1.
777 +
        between 0 to 1. If ``outlets`` is given then this argument is ignored.
778 +
    inlets : ND-image (optional)
779 +
        A boolean mask indicating locations of inlets.  If this argument is
780 +
        supplied then ``inlet_axis`` is ignored.
781 +
    outlets : ND-image (optional)
782 +
        A boolean mask indicating locations of outlets. If this argument is
783 +
        supplied then ``outlet_axis`` is ignored.
712 784
713 785
    Returns
714 786
    -------
@@ -723,46 +795,50 @@
Loading
723 795
724 796
    """
725 797
    if im.ndim != im.squeeze().ndim:
726 -
        warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
727 -
                      ' Reduce dimensionality with np.squeeze(im) to avoid' +
728 -
                      ' unexpected behavior.')
798 +
        warnings.warn(
799 +
            "Input image conains a singleton axis:"
800 +
            + str(im.shape)
801 +
            + " Reduce dimensionality with np.squeeze(im) to avoid"
802 +
            + " unexpected behavior."
803 +
        )
729 804
    im = trim_floating_solid(~im)
730 805
    labels = spim.label(~im)[0]
731 -
    inlet = np.zeros_like(im, dtype=int)
732 -
    outlet = np.zeros_like(im, dtype=int)
733 -
    if im.ndim == 3:
734 -
        if inlet_axis == 0:
735 -
            inlet[0, :, :] = 1
736 -
        elif inlet_axis == 1:
737 -
            inlet[:, 0, :] = 1
738 -
        elif inlet_axis == 2:
739 -
            inlet[:, :, 0] = 1
740 -
741 -
        if outlet_axis == 0:
742 -
            outlet[-1, :, :] = 1
743 -
        elif outlet_axis == 1:
744 -
            outlet[:, -1, :] = 1
745 -
        elif outlet_axis == 2:
746 -
            outlet[:, :, -1] = 1
747 -
748 -
    if im.ndim == 2:
749 -
        if inlet_axis == 0:
750 -
            inlet[0, :] = 1
751 -
        elif inlet_axis == 1:
752 -
            inlet[:, 0] = 1
753 -
754 -
        if outlet_axis == 0:
755 -
            outlet[-1, :] = 1
756 -
        elif outlet_axis == 1:
757 -
            outlet[:, -1] = 1
758 -
    IN = np.unique(labels*inlet)
759 -
    OUT = np.unique(labels*outlet)
806 +
    if inlets is None:
807 +
        inlets = np.zeros_like(im, dtype=bool)
808 +
        if im.ndim == 3:
809 +
            if inlet_axis == 0:
810 +
                inlets[0, :, :] = True
811 +
            elif inlet_axis == 1:
812 +
                inlets[:, 0, :] = True
813 +
            elif inlet_axis == 2:
814 +
                inlets[:, :, 0] = True
815 +
        if im.ndim == 2:
816 +
            if inlet_axis == 0:
817 +
                inlets[0, :] = True
818 +
            elif inlet_axis == 1:
819 +
                inlets[:, 0] = True
820 +
    if outlets is None:
821 +
        outlets = np.zeros_like(im, dtype=bool)
822 +
        if im.ndim == 3:
823 +
            if outlet_axis == 0:
824 +
                outlets[-1, :, :] = True
825 +
            elif outlet_axis == 1:
826 +
                outlets[:, -1, :] = True
827 +
            elif outlet_axis == 2:
828 +
                outlets[:, :, -1] = True
829 +
        if im.ndim == 2:
830 +
            if outlet_axis == 0:
831 +
                outlets[-1, :] = True
832 +
            elif outlet_axis == 1:
833 +
                outlets[:, -1] = True
834 +
    IN = np.unique(labels * inlets)
835 +
    OUT = np.unique(labels * outlets)
760 836
    new_im = np.isin(labels, list(set(IN) ^ set(OUT)), invert=True)
761 837
    im[new_im == 0] = True
762 838
    return ~im
763 839
764 840
765 -
def trim_extrema(im, h, mode='maxima'):
841 +
def trim_extrema(im, h, mode="maxima"):
766 842
    r"""
767 843
    Trims local extrema in greyscale values by a specified amount.
768 844
@@ -790,37 +866,40 @@
Loading
790 866
791 867
    """
792 868
    result = im
793 -
    if mode in ['maxima', 'extrema']:
794 -
        result = reconstruction(seed=im - h, mask=im, method='dilation')
795 -
    elif mode in ['minima', 'extrema']:
796 -
        result = reconstruction(seed=im + h, mask=im, method='erosion')
869 +
    if mode in ["maxima", "extrema"]:
870 +
        result = reconstruction(seed=im - h, mask=im, method="dilation")
871 +
    elif mode in ["minima", "extrema"]:
872 +
        result = reconstruction(seed=im + h, mask=im, method="erosion")
797 873
    return result
798 874
799 875
800 -
@jit(forceobj=True)
801 -
def flood(im, regions=None, mode='max'):
876 +
def flood(im, regions=None, mode="max"):
802 877
    r"""
803 878
    Floods/fills each region in an image with a single value based on the
804 -
    specific values in that region.  The ``mode`` argument is used to
805 -
    determine how the value is calculated.
879 +
    specific values in that region.
880 +
881 +
    The ``mode`` argument is used to determine how the value is calculated.
806 882
807 883
    Parameters
808 884
    ----------
809 885
    im : array_like
810 -
        An ND image with isolated regions containing 0's elsewhere.
811 -
886 +
        An image with isolated regions with numerical values in each voxel,
887 +
        and 0's elsewhere.
812 888
    regions : array_like
813 -
        An array the same shape as ``im`` with each region labeled.  If None is
814 -
        supplied (default) then ``scipy.ndimage.label`` is used with its
815 -
        default arguments.
816 -
889 +
        An array the same shape as ``im`` with each region labeled.  If
890 +
        ``None`` is supplied (default) then ``scipy.ndimage.label`` is used
891 +
        with its default arguments.
817 892
    mode : string
818 893
        Specifies how to determine which value should be used to flood each
819 894
        region.  Options are:
820 895
821 -
        'max' - Floods each region with the local maximum in that region
896 +
        'maximum' - Floods each region with the local maximum in that region
897 +
898 +
        'minimum' - Floods each region the local minimum in that region
899 +
900 +
        'median' - Floods each region the local median in that region
822 901
823 -
        'min' - Floods each region the local minimum in that region
902 +
        'mean' - Floods each region the local mean in that region
824 903
825 904
        'size' - Floods each region with the size of that region
826 905
@@ -841,24 +920,16 @@
Loading
841 920
    else:
842 921
        labels = np.copy(regions)
843 922
        N = labels.max()
844 -
    I = im.flatten()
845 -
    L = labels.flatten()
846 -
    if mode.startswith('max'):
847 -
        V = np.zeros(shape=N+1, dtype=float)
848 -
        for i in range(len(L)):
849 -
            if V[L[i]] < I[i]:
850 -
                V[L[i]] = I[i]
851 -
    elif mode.startswith('min'):
852 -
        V = np.ones(shape=N+1, dtype=float)*np.inf
853 -
        for i in range(len(L)):
854 -
            if V[L[i]] > I[i]:
855 -
                V[L[i]] = I[i]
856 -
    elif mode.startswith('size'):
857 -
        V = np.zeros(shape=N+1, dtype=int)
858 -
        for i in range(len(L)):
859 -
            V[L[i]] += 1
860 -
    im_flooded = np.reshape(V[labels], newshape=im.shape)
861 -
    im_flooded = im_flooded*mask
923 +
    mode = "sum" if mode == "size" else mode
924 +
    mode = "maximum" if mode == "max" else mode
925 +
    mode = "minimum" if mode == "min" else mode
926 +
    if mode in ["mean", "median", "maximum", "minimum", "sum"]:
927 +
        f = getattr(spim, mode)
928 +
        vals = f(input=im, labels=labels, index=range(0, N + 1))
929 +
        im_flooded = vals[labels]
930 +
        im_flooded = im_flooded * mask
931 +
    else:
932 +
        raise Exception(mode + " is not a recognized mode")
862 933
    return im_flooded
863 934
864 935
@@ -886,10 +957,10 @@
Loading
886 957
        the image.  Obviously, voxels with a value of zero have no error.
887 958
888 959
    """
889 -
    temp = np.ones(shape=dt.shape)*np.inf
960 +
    temp = np.ones(shape=dt.shape) * np.inf
890 961
    for ax in range(dt.ndim):
891 962
        dt_lin = distance_transform_lin(np.ones_like(temp, dtype=bool),
892 -
                                        axis=ax, mode='both')
963 +
                                        axis=ax, mode="both")
893 964
        temp = np.minimum(temp, dt_lin)
894 965
    result = np.clip(dt - temp, a_min=0, a_max=np.inf)
895 966
    return result
@@ -913,6 +984,10 @@
Loading
913 984
        A copy of ``im`` with each voxel value indicating the size of the
914 985
        region to which it belongs.  This is particularly useful for finding
915 986
        chord sizes on the image produced by ``apply_chords``.
987 +
988 +
    See Also
989 +
    --------
990 +
    flood
916 991
    """
917 992
    if im.dtype == bool:
918 993
        im = spim.label(im)[0]
@@ -962,20 +1037,23 @@
Loading
962 1037
963 1038
    """
964 1039
    if im.ndim != im.squeeze().ndim:
965 -
        warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
966 -
                      ' Reduce dimensionality with np.squeeze(im) to avoid' +
967 -
                      ' unexpected behavior.')
1040 +
        warnings.warn(
1041 +
            "Input image conains a singleton axis:"
1042 +
            + str(im.shape)
1043 +
            + " Reduce dimensionality with np.squeeze(im) to avoid"
1044 +
            + " unexpected behavior."
1045 +
        )
968 1046
    if spacing < 0:
969 -
        raise Exception('Spacing cannot be less than 0')
1047 +
        raise Exception("Spacing cannot be less than 0")
970 1048
    if spacing == 0:
971 1049
        label = True
972 1050
    result = np.zeros(im.shape, dtype=int)  # Will receive chords at end
973 -
    slxyz = [slice(None, None, spacing*(axis != i) + 1) for i in [0, 1, 2]]
974 -
    slices = tuple(slxyz[:im.ndim])
1051 +
    slxyz = [slice(None, None, spacing * (axis != i) + 1) for i in [0, 1, 2]]
1052 +
    slices = tuple(slxyz[: im.ndim])
975 1053
    s = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]  # Straight-line structuring element
976 1054
    if im.ndim == 3:  # Make structuring element 3D if necessary
977 1055
        s = np.pad(np.atleast_3d(s), pad_width=((0, 0), (0, 0), (1, 1)),
978 -
                   mode='constant', constant_values=0)
1056 +
                   mode="constant", constant_values=0)
979 1057
    im = im[slices]
980 1058
    s = np.swapaxes(s, 0, axis)
981 1059
    chords = spim.label(im, structure=s)[0]
@@ -1025,25 +1103,28 @@
Loading
1025 1103
1026 1104
    """
1027 1105
    if im.ndim != im.squeeze().ndim:
1028 -
        warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
1029 -
                      ' Reduce dimensionality with np.squeeze(im) to avoid' +
1030 -
                      ' unexpected behavior.')
1106 +
        warnings.warn(
1107 +
            "Input image conains a singleton axis:"
1108 +
            + str(im.shape)
1109 +
            + " Reduce dimensionality with np.squeeze(im) to avoid"
1110 +
            + " unexpected behavior."
1111 +
        )
1031 1112
    if im.ndim < 3:
1032 -
        raise Exception('Must be a 3D image to use this function')
1113 +
        raise Exception("Must be a 3D image to use this function")
1033 1114
    if spacing < 0:
1034 -
        raise Exception('Spacing cannot be less than 0')
1115 +
        raise Exception("Spacing cannot be less than 0")
1035 1116
    ch = np.zeros_like(im, dtype=int)
1036 -
    ch[:, ::4+2*spacing, ::4+2*spacing] = 1  # X-direction
1037 -
    ch[::4+2*spacing, :, 2::4+2*spacing] = 2  # Y-direction
1038 -
    ch[2::4+2*spacing, 2::4+2*spacing, :] = 3  # Z-direction
1039 -
    chords = ch*im
1117 +
    ch[:, :: 4 + 2 * spacing, :: 4 + 2 * spacing] = 1       # X-direction
1118 +
    ch[:: 4 + 2 * spacing, :, 2::4 + 2 * spacing] = 2     # Y-direction
1119 +
    ch[2::4 + 2 * spacing, 2::4 + 2 * spacing, :] = 3   # Z-direction
1120 +
    chords = ch * im
1040 1121
    if trim_edges:
1041 1122
        temp = clear_border(spim.label(chords > 0)[0]) > 0
1042 -
        chords = temp*chords
1123 +
        chords = temp * chords
1043 1124
    return chords
1044 1125
1045 1126
1046 -
def local_thickness(im, sizes=25, mode='hybrid'):
1127 +
def local_thickness(im, sizes=25, mode="hybrid", **kwargs):
1047 1128
    r"""
1048 1129
    For each voxel, this function calculates the radius of the largest sphere
1049 1130
    that both engulfs the voxel and fits entirely within the foreground.
@@ -1104,19 +1185,20 @@
Loading
1104 1185
    function.  This is not needed in ``local_thickness`` however.
1105 1186
1106 1187
    """
1107 -
    im_new = porosimetry(im=im, sizes=sizes, access_limited=False, mode=mode)
1188 +
    im_new = porosimetry(im=im, sizes=sizes, access_limited=False, mode=mode,
1189 +
                         **kwargs)
1108 1190
    return im_new
1109 1191
1110 1192
1111 -
def porosimetry(im, sizes=25, inlets=None, access_limited=True,
1112 -
                mode='hybrid'):
1193 +
def porosimetry(im, sizes=25, inlets=None, access_limited=True, mode='hybrid',
1194 +
                fft=True, **kwargs):
1113 1195
    r"""
1114 -
    Performs a porosimetry simulution on the image
1196 +
    Performs a porosimetry simulution on an image
1115 1197
1116 1198
    Parameters
1117 1199
    ----------
1118 1200
    im : ND-array
1119 -
        An ND image of the porous material containing True values in the
1201 +
        An ND image of the porous material containing ``True`` values in the
1120 1202
        pore space.
1121 1203
1122 1204
    sizes : array_like or scalar
@@ -1125,7 +1207,7 @@
Loading
1125 1207
        the min and max of the distance transform are used.
1126 1208
1127 1209
    inlets : ND-array, boolean
1128 -
        A boolean mask with True values indicating where the invasion
1210 +
        A boolean mask with ``True`` values indicating where the invasion
1129 1211
        enters the image.  By default all faces are considered inlets,
1130 1212
        akin to a mercury porosimetry experiment.  Users can also apply
1131 1213
        solid boundaries to their image externally before passing it in,
@@ -1134,7 +1216,7 @@
Loading
1134 1216
1135 1217
    access_limited : Boolean
1136 1218
        This flag indicates if the intrusion should only occur from the
1137 -
        surfaces (``access_limited`` is True, which is the default), or
1219 +
        surfaces (``access_limited`` is ``True``, which is the default), or
1138 1220
        if the invading phase should be allowed to appear in the core of
1139 1221
        the image.  The former simulates experimental tools like mercury
1140 1222
        intrusion porosimetry, while the latter is useful for comparison
@@ -1159,6 +1241,12 @@
Loading
1159 1241
        included mostly for comparison purposes.  The morphological operations
1160 1242
        are done using fft-based method implementations.
1161 1243
1244 +
    fft : boolean (default is ``True``)
1245 +
        Indicates whether to use the ``fftmorphology`` function in
1246 +
        ``porespy.filters`` or to use the standard morphology functions in
1247 +
        ``scipy.ndimage``.  Always use ``fft=True`` unless you have a good
1248 +
        reason not to.
1249 +
1162 1250
    Returns
1163 1251
    -------
1164 1252
    image : ND-array
@@ -1168,7 +1256,7 @@
Loading
1168 1256
        capillary pressure by applying a boolean comparison:
1169 1257
        ``inv_phase = im > r`` where ``r`` is the radius (in voxels) of the
1170 1258
        invading sphere.  Of course, ``r`` can be converted to capillary
1171 -
        pressure using your favorite model.
1259 +
        pressure using a preferred model.
1172 1260
1173 1261
    Notes
1174 1262
    -----
@@ -1179,19 +1267,19 @@
Loading
1179 1267
1180 1268
    See Also
1181 1269
    --------
1182 -
    fftmorphology
1183 1270
    local_thickness
1184 1271
1185 1272
    """
1186 1273
    if im.ndim != im.squeeze().ndim:
1187 -
        warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
1188 -
                      ' Reduce dimensionality with np.squeeze(im) to avoid' +
1189 -
                      ' unexpected behavior.')
1274 +
        warnings.warn("Input image contains a singleton axis:"
1275 +
                      + str(im.shape)
1276 +
                      + " Reduce dimensionality with np.squeeze(im) to avoid"
1277 +
                      + " unexpected behavior.")
1190 1278
1191 -
    dt = spim.distance_transform_edt(im > 0)
1279 +
    dt = edt(im > 0)
1192 1280
1193 1281
    if inlets is None:
1194 -
        inlets = get_border(im.shape, mode='faces')
1282 +
        inlets = get_border(im.shape, mode="faces")
1195 1283
1196 1284
    if isinstance(sizes, int):
1197 1285
        sizes = np.logspace(start=np.log10(np.amax(dt)), stop=0, num=sizes)
@@ -1203,59 +1291,100 @@
Loading
1203 1291
    else:
1204 1292
        strel = ps_ball
1205 1293
1206 -
    if mode == 'mio':
1294 +
    # Parse kwargs for any parallelization arguments
1295 +
    parallel = kwargs.pop('parallel', False)
1296 +
    cores = kwargs.pop('cores', None)
1297 +
    divs = kwargs.pop('divs', 2)
1298 +
1299 +
    if mode == "mio":
1207 1300
        pw = int(np.floor(dt.max()))
1208 -
        impad = np.pad(im, mode='symmetric', pad_width=pw)
1209 -
        inletspad = np.pad(inlets, mode='symmetric', pad_width=pw)
1210 -
        inlets = np.where(inletspad)
1211 -
#        sizes = np.unique(np.around(sizes, decimals=0).astype(int))[-1::-1]
1301 +
        impad = np.pad(im, mode="symmetric", pad_width=pw)
1302 +
        inlets = np.pad(inlets, mode="symmetric", pad_width=pw)
1303 +
        # sizes = np.unique(np.around(sizes, decimals=0).astype(int))[-1::-1]
1212 1304
        imresults = np.zeros(np.shape(impad))
1213 -
        for r in tqdm(sizes):
1214 -
            imtemp = fftmorphology(impad, strel(r), mode='erosion')
1215 -
            if access_limited:
1216 -
                imtemp = trim_disconnected_blobs(imtemp, inlets)
1217 -
            imtemp = fftmorphology(imtemp, strel(r), mode='dilation')
1218 -
            if np.any(imtemp):
1219 -
                imresults[(imresults == 0)*imtemp] = r
1305 +
        with tqdm(sizes) as pbar:
1306 +
            for r in sizes:
1307 +
                pbar.update()
1308 +
                if parallel:
1309 +
                    imtemp = chunked_func(func=spim.binary_erosion,
1310 +
                                          input=impad, structure=strel(r),
1311 +
                                          overlap=int(2*r) + 1,
1312 +
                                          cores=cores, divs=divs)
1313 +
                elif fft:
1314 +
                    imtemp = fftmorphology(impad, strel(r), mode="erosion")
1315 +
                else:
1316 +
                    imtemp = spim.binary_erosion(input=impad,
1317 +
                                                 structure=strel(r))
1318 +
                if access_limited:
1319 +
                    imtemp = trim_disconnected_blobs(imtemp, inlets)
1320 +
                if parallel:
1321 +
                    imtemp = chunked_func(func=spim.binary_dilation,
1322 +
                                          input=imtemp, structure=strel(r),
1323 +
                                          overlap=int(2*r) + 1,
1324 +
                                          cores=cores, divs=divs)
1325 +
                elif fft:
1326 +
                    imtemp = fftmorphology(imtemp, strel(r), mode="dilation")
1327 +
                else:
1328 +
                    imtemp = spim.binary_dilation(input=imtemp,
1329 +
                                                  structure=strel(r))
1330 +
                if np.any(imtemp):
1331 +
                    imresults[(imresults == 0) * imtemp] = r
1220 1332
        imresults = extract_subsection(imresults, shape=im.shape)
1221 -
    elif mode == 'dt':
1222 -
        inlets = np.where(inlets)
1333 +
    elif mode == "dt":
1223 1334
        imresults = np.zeros(np.shape(im))
1224 -
        for r in tqdm(sizes):
1225 -
            imtemp = dt >= r
1226 -
            if access_limited:
1227 -
                imtemp = trim_disconnected_blobs(imtemp, inlets)
1228 -
            if np.any(imtemp):
1229 -
                imtemp = spim.distance_transform_edt(~imtemp) < r
1230 -
                imresults[(imresults == 0)*imtemp] = r
1231 -
    elif mode == 'hybrid':
1232 -
        inlets = np.where(inlets)
1335 +
        with tqdm(sizes) as pbar:
1336 +
            for r in sizes:
1337 +
                pbar.update()
1338 +
                imtemp = dt >= r
1339 +
                if access_limited:
1340 +
                    imtemp = trim_disconnected_blobs(imtemp, inlets)
1341 +
                if np.any(imtemp):
1342 +
                    imtemp = edt(~imtemp) < r
1343 +
                    imresults[(imresults == 0) * imtemp] = r
1344 +
    elif mode == "hybrid":
1233 1345
        imresults = np.zeros(np.shape(im))
1234 -
        for r in tqdm(sizes):
1235 -
            imtemp = dt >= r
1236 -
            if access_limited:
1237 -
                imtemp = trim_disconnected_blobs(imtemp, inlets)
1238 -
            if np.any(imtemp):
1239 -
                imtemp = fftconvolve(imtemp, strel(r), mode='same') > 0.0001
1240 -
                imresults[(imresults == 0)*imtemp] = r
1346 +
        with tqdm(sizes) as pbar:
1347 +
            for r in sizes:
1348 +
                pbar.update()
1349 +
                imtemp = dt >= r
1350 +
                if access_limited:
1351 +
                    imtemp = trim_disconnected_blobs(imtemp, inlets)
1352 +
                if np.any(imtemp):
1353 +
                    if parallel:
1354 +
                        imtemp = chunked_func(func=spim.binary_dilation,
1355 +
                                              input=imtemp, structure=strel(r),
1356 +
                                              overlap=int(2*r) + 1,
1357 +
                                              cores=cores, divs=divs)
1358 +
                    elif fft:
1359 +
                        imtemp = fftmorphology(imtemp, strel(r),
1360 +
                                               mode="dilation")
1361 +
                    else:
1362 +
                        imtemp = spim.binary_dilation(input=imtemp,
1363 +
                                                      structure=strel(r))
1364 +
                    imresults[(imresults == 0) * imtemp] = r
1241 1365
    else:
1242 -
        raise Exception('Unreckognized mode ' + mode)
1366 +
        raise Exception("Unrecognized mode " + mode)
1243 1367
    return imresults
1244 1368
1245 1369
1246 -
def trim_disconnected_blobs(im, inlets):
1370 +
def trim_disconnected_blobs(im, inlets, strel=None):
1247 1371
    r"""
1248 1372
    Removes foreground voxels not connected to specified inlets
1249 1373
1250 1374
    Parameters
1251 1375
    ----------
1252 1376
    im : ND-array
1253 -
        The array to be trimmed
1377 +
        The image containing the blobs to be trimmed
1254 1378
    inlets : ND-array or tuple of indices
1255 1379
        The locations of the inlets.  Can either be a boolean mask the same
1256 1380
        shape as ``im``, or a tuple of indices such as that returned by the
1257 1381
        ``where`` function.  Any voxels *not* connected directly to
1258 1382
        the inlets will be trimmed.
1383 +
    strel : ND-array
1384 +
        The neighborhood over which connectivity should be checked. It must
1385 +
        be symmetric and the same dimensionality as the image.  It is passed
1386 +
        directly to the ``scipy.ndimage.label`` function as the ``structure``
1387 +
        argument so refer to that docstring for additional info.
1259 1388
1260 1389
    Returns
1261 1390
    -------
@@ -1270,23 +1399,27 @@
Loading
1270 1399
    elif (inlets.shape == im.shape) and (inlets.max() == 1):
1271 1400
        inlets = inlets.astype(bool)
1272 1401
    else:
1273 -
        raise Exception('inlets not valid, refer to docstring for info')
1274 -
    labels = spim.label(inlets + (im > 0))[0]
1402 +
        raise Exception("inlets not valid, refer to docstring for info")
1403 +
    if im.ndim == 3:
1404 +
        strel = cube
1405 +
    else:
1406 +
        strel = square
1407 +
    labels = spim.label(inlets + (im > 0), structure=strel(3))[0]
1275 1408
    keep = np.unique(labels[inlets])
1276 1409
    keep = keep[keep > 0]
1277 1410
    if len(keep) > 0:
1278 1411
        im2 = np.reshape(np.in1d(labels, keep), newshape=im.shape)
1279 1412
    else:
1280 1413
        im2 = np.zeros_like(im)
1281 -
    im2 = im2*im
1414 +
    im2 = im2 * im
1282 1415
    return im2
1283 1416
1284 1417
1285 1418
def _get_axial_shifts(ndim=2, include_diagonals=False):
1286 -
    r'''
1419 +
    r"""
1287 1420
    Helper function to generate the axial shifts that will be performed on
1288 1421
    the image to identify bordering pixels/voxels
1289 -
    '''
1422 +
    """
1290 1423
    if ndim == 2:
1291 1424
        if include_diagonals:
1292 1425
            neighbors = square(3)
@@ -1311,40 +1444,37 @@
Loading
1311 1444
1312 1445
1313 1446
def _make_stack(im, include_diagonals=False):
1314 -
    r'''
1447 +
    r"""
1315 1448
    Creates a stack of images with one extra dimension to the input image
1316 1449
    with length equal to the number of borders to search + 1.
1317 1450
    Image is rolled along the axial shifts so that the border pixel is
1318 1451
    overlapping the original pixel. First image in stack is the original.
1319 1452
    Stacking makes direct vectorized array comparisons possible.
1320 -
    '''
1453 +
    """
1321 1454
    ndim = len(np.shape(im))
1322 1455
    axial_shift = _get_axial_shifts(ndim, include_diagonals)
1323 1456
    if ndim == 2:
1324 -
        stack = np.zeros([np.shape(im)[0],
1325 -
                          np.shape(im)[1],
1326 -
                          len(axial_shift)+1])
1457 +
        stack = np.zeros([np.shape(im)[0], np.shape(im)[1], len(axial_shift) + 1])
1327 1458
        stack[:, :, 0] = im
1328 1459
        for i in range(len(axial_shift)):
1329 1460
            ax0, ax1 = axial_shift[i]
1330 1461
            temp = np.roll(np.roll(im, ax0, 0), ax1, 1)
1331 -
            stack[:, :, i+1] = temp
1462 +
            stack[:, :, i + 1] = temp
1332 1463
        return stack
1333 1464
    elif ndim == 3:
1334 -
        stack = np.zeros([np.shape(im)[0],
1335 -
                          np.shape(im)[1],
1336 -
                          np.shape(im)[2],
1337 -
                          len(axial_shift)+1])
1465 +
        stack = np.zeros(
1466 +
            [np.shape(im)[0], np.shape(im)[1], np.shape(im)[2], len(axial_shift) + 1]
1467 +
        )
1338 1468
        stack[:, :, :, 0] = im
1339 1469
        for i in range(len(axial_shift)):
1340 1470
            ax0, ax1, ax2 = axial_shift[i]
1341 1471
            temp = np.roll(np.roll(np.roll(im, ax0, 0), ax1, 1), ax2, 2)
1342 -
            stack[:, :, :, i+1] = temp
1472 +
            stack[:, :, :, i + 1] = temp
1343 1473
        return stack
1344 1474
1345 1475
1346 1476
def nphase_border(im, include_diagonals=False):
1347 -
    r'''
1477 +
    r"""
1348 1478
    Identifies the voxels in regions that border *N* other regions.
1349 1479
1350 1480
    Useful for finding triple-phase boundaries.
@@ -1365,17 +1495,20 @@
Loading
1365 1495
    image : ND-array
1366 1496
        A copy of ``im`` with voxel values equal to the number of uniquely
1367 1497
        different bordering values
1368 -
    '''
1498 +
    """
1369 1499
    if im.ndim != im.squeeze().ndim:
1370 -
        warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
1371 -
                      ' Reduce dimensionality with np.squeeze(im) to avoid' +
1372 -
                      ' unexpected behavior.')
1500 +
        warnings.warn(
1501 +
            "Input image conains a singleton axis:"
1502 +
            + str(im.shape)
1503 +
            + " Reduce dimensionality with np.squeeze(im) to avoid"
1504 +
            + " unexpected behavior."
1505 +
        )
1373 1506
    # Get dimension of image
1374 1507
    ndim = len(np.shape(im))
1375 1508
    if ndim not in [2, 3]:
1376 1509
        raise NotImplementedError("Function only works for 2d and 3d images")
1377 1510
    # Pad image to handle edges
1378 -
    im = np.pad(im, pad_width=1, mode='edge')
1511 +
    im = np.pad(im, pad_width=1, mode="edge")
1379 1512
    # Stack rolled images for each neighbor to be inspected
1380 1513
    stack = _make_stack(im, include_diagonals)
1381 1514
    # Sort the stack along the last axis
@@ -1385,9 +1518,9 @@
Loading
1385 1518
    # Number of changes is number of unique bordering regions
1386 1519
    for k in range(np.shape(stack)[ndim])[1:]:
1387 1520
        if ndim == 2:
1388 -
            mask = stack[:, :, k] != stack[:, :, k-1]
1521 +
            mask = stack[:, :, k] != stack[:, :, k - 1]
1389 1522
        elif ndim == 3:
1390 -
            mask = stack[:, :, :, k] != stack[:, :, :, k-1]
1523 +
            mask = stack[:, :, :, k] != stack[:, :, :, k - 1]
1391 1524
        out += mask
1392 1525
    # Un-pad
1393 1526
    if ndim == 2:
@@ -1396,7 +1529,7 @@
Loading
1396 1529
        return out[1:-1, 1:-1, 1:-1].copy()
1397 1530
1398 1531
1399 -
def prune_branches(skel, branch_points=None, iterations=1):
1532 +
def prune_branches(skel, branch_points=None, iterations=1, **kwargs):
1400 1533
    r"""
1401 1534
    Removes all dangling ends or tails of a skeleton.
1402 1535
@@ -1421,20 +1554,28 @@
Loading
1421 1554
        from skimage.morphology import square as cube
1422 1555
    else:
1423 1556
        from skimage.morphology import cube
1557 +
    parallel = kwargs.pop('parallel', False)
1558 +
    divs = kwargs.pop('divs', 2)
1559 +
    cores = kwargs.pop('cores', None)
1424 1560
    # Create empty image to house results
1425 1561
    im_result = np.zeros_like(skel)
1426 1562
    # If branch points are not supplied, attempt to find them
1427 1563
    if branch_points is None:
1428 -
        branch_points = spim.convolve(skel*1.0, weights=cube(3)) > 3
1429 -
        branch_points = branch_points*skel
1564 +
        branch_points = spim.convolve(skel * 1.0, weights=cube(3)) > 3
1565 +
        branch_points = branch_points * skel
1430 1566
    # Store original branch points before dilating
1431 1567
    pts_orig = branch_points
1432 1568
    # Find arcs of skeleton by deleting branch points
1433 -
    arcs = skel*(~branch_points)
1569 +
    arcs = skel * (~branch_points)
1434 1570
    # Label arcs
1435 1571
    arc_labels = spim.label(arcs, structure=cube(3))[0]
1436 1572
    # Dilate branch points so they overlap with the arcs
1437 -
    branch_points = spim.binary_dilation(branch_points, structure=cube(3))
1573 +
    if parallel:
1574 +
        branch_points = chunked_func(func=spim.binary_dilation,
1575 +
                                     input=branch_points, structure=cube(3),
1576 +
                                     overlap=3, divs=divs, cores=cores)
1577 +
    else:
1578 +
        branch_points = spim.binary_dilation(branch_points, structure=cube(3))
1438 1579
    pts_labels = spim.label(branch_points, structure=cube(3))[0]
1439 1580
    # Now scan through each arc to see if it's connected to two branch points
1440 1581
    slices = spim.find_objects(arc_labels)
@@ -1442,18 +1583,750 @@
Loading
1442 1583
    for s in slices:
1443 1584
        label_num += 1
1444 1585
        # Find branch point labels the overlap current arc
1445 -
        hits = pts_labels[s]*(arc_labels[s] == label_num)
1586 +
        hits = pts_labels[s] * (arc_labels[s] == label_num)
1446 1587
        # If image contains 2 branch points, then it's not a tail.
1447 1588
        if len(np.unique(hits)) == 3:
1448 1589
            im_result[s] += arc_labels[s] == label_num
1449 1590
    # Add missing branch points back to arc image to make complete skeleton
1450 -
    im_result += skel*pts_orig
1591 +
    im_result += skel * pts_orig
1451 1592
    if iterations > 1:
1452 1593
        iterations -= 1
1453 1594
        im_temp = np.copy(im_result)
1454 1595
        im_result = prune_branches(skel=im_result,
1455 1596
                                   branch_points=None,
1456 -
                                   iterations=iterations)
1597 +
                                   iterations=iterations,
1598 +
                                   parallel=parallel,
1599 +
                                   divs=divs, cores=cores)
1457 1600
        if np.all(im_temp == im_result):
1458 1601
            iterations = 0
1459 1602
    return im_result
1603 +
1604 +
1605 +
def chunked_func(func,
1606 +
                 overlap=None,
1607 +
                 divs=2,
1608 +
                 cores=None,
1609 +
                 im_arg=["input", "image", "im"],
1610 +
                 strel_arg=["strel", "structure", "footprint"],
1611 +
                 **kwargs):
1612 +
    r"""
1613 +
    Performs the specfied operation "chunk-wise" in parallel using dask
1614 +
1615 +
    This can be used to save memory by doing one chunk at a time (``cores=1``)
1616 +
    or to increase computation speed by spreading the work across multiple
1617 +
    cores (e.g. ``cores=8``)
1618 +
1619 +
    This function can be used with any operation that applies a structuring
1620 +
    element of some sort, since this implies that the operation is local
1621 +
    and can be chunked.
1622 +
1623 +
    Parameters
1624 +
    ----------
1625 +
    func : function handle
1626 +
        The function which should be applied to each chunk, such as
1627 +
        ``spipy.ndimage.binary_dilation``.
1628 +
    overlap : scalar or list of scalars, optional
1629 +
        The amount of overlap to include when dividing up the image.  This
1630 +
        value will almost always be the size (i.e. diameter) of the
1631 +
        structuring element. If not specified then the amount of overlap is
1632 +
        inferred from the size of the structuring element, in which case the
1633 +
        ``strel_arg`` must be specified.
1634 +
    divs : scalar or list of scalars (default = [2, 2, 2])
1635 +
        The number of chunks to divide the image into in each direction.  The
1636 +
        default is 2 chunks in each direction, resulting in a quartering of
1637 +
        the image and 8 total chunks (in 3D).  A scalar is interpreted as
1638 +
        applying to all directions, while a list of scalars is interpreted
1639 +
        as applying to each individual direction.
1640 +
    cores : scalar
1641 +
        The number of cores which should be used.  By default, all cores will
1642 +
        be used, or as many are needed for the given number of chunks, which
1643 +
        ever is smaller.
1644 +
    im_arg : string
1645 +
        The keyword used by ``func`` for the image to be operated on.  By
1646 +
        default this function will look for ``image``, ``input``, and ``im``
1647 +
        which are commonly used by *scipy.ndimage* and *skimage*.
1648 +
    strel_arg : string
1649 +
        The keyword used by ``func`` for the structuring element to apply.
1650 +
        This is only needed if ``overlap`` is not specified. By default this
1651 +
        function will look for ``strel``, ``structure``, and ``footprint``
1652 +
        which are commonly used by *scipy.ndimage* and *skimage*.
1653 +
    kwargs : additional keyword arguments
1654 +
        All other arguments are passed to ``func`` as keyword arguments. Note
1655 +
        that PoreSpy will fetch the image from this list of keywords using the
1656 +
        value provided to ``im_arg``.
1657 +
1658 +
    Returns
1659 +
    -------
1660 +
    result : ND-image
1661 +
        An image the same size as the input image, with the specified filter
1662 +
        applied as though done on a single large image.  There should be *no*
1663 +
        difference.
1664 +
1665 +
    Notes
1666 +
    -----
1667 +
    This function divides the image into the specified number of chunks, but
1668 +
    also applies a padding to each chunk to create an overlap with neighboring
1669 +
    chunks.  This way the operation does not have any edge artifacts. The
1670 +
    amount of padding is usually equal to the radius of the structuring
1671 +
    element but some functions do not use one, such as the distance transform
1672 +
    and Gaussian blur.  In these cases the user can specify ``overlap``.
1673 +
1674 +
    See Also
1675 +
    --------
1676 +
    skikit-image.util.apply_parallel
1677 +
1678 +
    Examples
1679 +
    --------
1680 +
    >>> import scipy.ndimage as spim
1681 +
    >>> import porespy as ps
1682 +
    >>> from skimage.morphology import ball
1683 +
    >>> im = ps.generators.blobs(shape=[100, 100, 100])
1684 +
    >>> f = spim.binary_dilation
1685 +
    >>> im2 = ps.filters.chunked_func(func=f, overlap=7, im_arg='input',
1686 +
    ...                               input=im, structure=ball(3), cores=1)
1687 +
    >>> im3 = spim.binary_dilation(input=im, structure=ball(3))
1688 +
    >>> np.all(im2 == im3)
1689 +
    True
1690 +
1691 +
    """
1692 +
1693 +
    @dask.delayed
1694 +
    def apply_func(func, **kwargs):
1695 +
        # Apply function on sub-slice of overall image
1696 +
        return func(**kwargs)
1697 +
1698 +
    # Import the array_split methods
1699 +
    from array_split import shape_split, ARRAY_BOUNDS
1700 +
1701 +
    # Determine the value for im_arg
1702 +
    if type(im_arg) == str:
1703 +
        im_arg = [im_arg]
1704 +
    for item in im_arg:
1705 +
        if item in kwargs.keys():
1706 +
            im = kwargs[item]
1707 +
            im_arg = item
1708 +
            break
1709 +
    # Fetch image from the kwargs dict
1710 +
    im = kwargs[im_arg]
1711 +
    # Determine the number of divisions to create
1712 +
    divs = np.ones((im.ndim,), dtype=int) * np.array(divs)
1713 +
    # If overlap given then use it, otherwise search for strel in kwargs
1714 +
    if overlap is not None:
1715 +
        halo = overlap * (divs > 1)
1716 +
    else:
1717 +
        if type(strel_arg) == str:
1718 +
            strel_arg = [strel_arg]
1719 +
        for item in strel_arg:
1720 +
            if item in kwargs.keys():
1721 +
                strel = kwargs[item]
1722 +
                break
1723 +
        halo = np.array(strel.shape) * (divs > 1)
1724 +
    slices = np.ravel(
1725 +
        shape_split(
1726 +
            im.shape, axis=divs, halo=halo.tolist(), tile_bounds_policy=ARRAY_BOUNDS
1727 +
        )
1728 +
    )
1729 +
    # Apply func to each subsection of the image
1730 +
    res = []
1731 +
    # print('Image will be broken into the following chunks:')
1732 +
    for s in slices:
1733 +
        # Extract subsection from image and input into kwargs
1734 +
        kwargs[im_arg] = im[tuple(s)]
1735 +
        # print(kwargs[im_arg].shape)
1736 +
        res.append(apply_func(func=func, **kwargs))
1737 +
    # Have dask actually compute the function on each subsection in parallel
1738 +
    # with ProgressBar():
1739 +
    #    ims = dask.compute(res, num_workers=cores)[0]
1740 +
    ims = dask.compute(res, num_workers=cores)[0]
1741 +
    # Finally, put the pieces back together into a single master image, im2
1742 +
    im2 = np.zeros_like(im, dtype=im.dtype)
1743 +
    for i, s in enumerate(slices):
1744 +
        # Prepare new slice objects into main and sub-sliced image
1745 +
        a = []  # Slices into main image
1746 +
        b = []  # Slices into chunked image
1747 +
        for dim in range(im.ndim):
1748 +
            if s[dim].start == 0:
1749 +
                ax = bx = 0
1750 +
            else:
1751 +
                ax = s[dim].start + halo[dim]
1752 +
                bx = halo[dim]
1753 +
            if s[dim].stop == im.shape[dim]:
1754 +
                ay = by = im.shape[dim]
1755 +
            else:
1756 +
                ay = s[dim].stop - halo[dim]
1757 +
                by = s[dim].stop - s[dim].start - halo[dim]
1758 +
            a.append(slice(ax, ay, None))
1759 +
            b.append(slice(bx, by, None))
1760 +
        # Convert lists of slices to tuples
1761 +
        a = tuple(a)
1762 +
        b = tuple(b)
1763 +
        # Insert image chunk into main image
1764 +
        im2[a] = ims[i][b]
1765 +
    return im2
1766 +
1767 +
1768 +
def snow_partitioning_parallel(im,
1769 +
                               overlap='dt',
1770 +
                               divs=2,
1771 +
                               mode='parallel',
1772 +
                               num_workers=None,
1773 +
                               crop=True,
1774 +
                               zoom_factor=0.5,
1775 +
                               r_max=5,
1776 +
                               sigma=0.4,
1777 +
                               return_all=False):
1778 +
    r"""
1779 +
    Perform SNOW algorithm in parallel and serial mode to reduce time and
1780 +
    memory usage repectively by geomertirc domain decomposition of large size
1781 +
    image.
1782 +
1783 +
    Parameters
1784 +
    ----------
1785 +
    im: ND_array
1786 +
        A binary image of porous media with 'True' values indicating phase of
1787 +
        interest
1788 +
1789 +
    overlap: float or int or str
1790 +
        Overlapping thickness between two subdomains that is used to merge
1791 +
        watershed segmented regions at intersection of two or more subdomains.
1792 +
        If 'dt' the overlap will be calculated based on maximum
1793 +
        distance transform in the whole image.
1794 +
        If 'ws' the overlap will be calculated by finding the maximum dimension
1795 +
        of the bounding box of largest segmented region. The image is scale down
1796 +
        by 'zoom_factor' provided by user.
1797 +
        If any real number of overlap is provided then this value will be
1798 +
        considered as overlapping thickness.
1799 +
1800 +
    divs: list or int
1801 +
        Number of domains each axis will be divided.
1802 +
        If a scalar is provided then it will be assigned to all axis.
1803 +
        If list is provided then each respective axis will be divided by its
1804 +
        corresponding number in the list. For example [2, 3, 4] will divide
1805 +
        z, y and x axis to 2, 3, and 4 respectively.
1806 +
1807 +
    mode: str
1808 +
        if 'parallel' then all subdomains will be processed in number of cores
1809 +
        provided as num_workers
1810 +
        if 'serial' then all subdomains will be processed one by one in one core
1811 +
        of CPU.
1812 +
1813 +
    num_workers: int or None
1814 +
        Number of cores that will be used to parallel process all domains.
1815 +
        If None then all cores will be used but user can specify any integer
1816 +
        values to control the memory usage.
1817 +
1818 +
    crop: bool
1819 +
        If True the image shape is cropped to fit specified division.
1820 +
1821 +
    zoom_factor: float or int
1822 +
        The amount of zoom appiled to image to find overlap thickness using "ws"
1823 +
        overlap mode.
1824 +
1825 +
    return_all : boolean
1826 +
        If set to ``True`` a named tuple is returned containing the original
1827 +
        image, the distance transform, and the final
1828 +
        pore regions.  The default is ``False``
1829 +
1830 +
    Returns
1831 +
    ----------
1832 +
    regions: ND_array
1833 +
        Partitioned image of segmentated regions with unique labels. Each
1834 +
        region correspond to pore body while intersection with other region
1835 +
        correspond throat area.
1836 +
    """
1837 +
    # --------------------------------------------------------------------------
1838 +
    # Adjust image shape according to specified dimension
1839 +
    tup = namedtuple("results", field_names=["im", "dt", "regions"])
1840 +
    if isinstance(divs, int):
1841 +
        divs = [divs for i in range(im.ndim)]
1842 +
    shape = []
1843 +
    for i in range(im.ndim):
1844 +
        shape.append(divs[i] * (im.shape[i] // divs[i]))
1845 +
1846 +
    if shape != list(im.shape):
1847 +
        if crop:
1848 +
            for i in range(im.ndim):
1849 +
                im = im.swapaxes(0, i)
1850 +
                im = im[:shape[i], ...]
1851 +
                im = im.swapaxes(i, 0)
1852 +
            print(f'Image is cropped to shape {shape}.')
1853 +
        else:
1854 +
            print('-' * 80)
1855 +
            print(f"Possible image shape for specified divisions is {shape}.")
1856 +
            print("To crop the image please set crop argument to 'True'.")
1857 +
            return
1858 +
    # --------------------------------------------------------------------------
1859 +
    # Get overlap thickness from distance transform
1860 +
    chunk_shape = (np.array(shape) / np.array(divs)).astype(int)
1861 +
    print('# Beginning parallel SNOW algorithm...')
1862 +
    print('=' * 80)
1863 +
    print('Calculating overlap thickness')
1864 +
    if overlap == 'dt':
1865 +
        dt = edt((im > 0), parallel=0)
1866 +
        overlap = dt.max()
1867 +
    elif overlap == 'ws':
1868 +
        rev = spim.interpolation.zoom(im, zoom=zoom_factor, order=0)
1869 +
        rev = rev > 0
1870 +
        dt = edt(rev, parallel=0)
1871 +
        rev_snow = snow_partitioning(rev, dt=dt, r_max=r_max, sigma=sigma)
1872 +
        labels, counts = np.unique(rev_snow, return_counts=True)
1873 +
        node = np.where(counts == counts[1:].max())[0][0]
1874 +
        slices = spim.find_objects(rev_snow)
1875 +
        overlap = max(rev_snow[slices[node - 1]].shape) / (zoom_factor * 2.0)
1876 +
        dt = edt((im > 0), parallel=0)
1877 +
    else:
1878 +
        overlap = overlap / 2.0
1879 +
        dt = edt((im > 0), parallel=0)
1880 +
    print('Overlap Thickness: ' + str(int(2.0 * overlap)) + ' voxels')
1881 +
    # --------------------------------------------------------------------------
1882 +
    # Get overlap and trim depth of all image dimension
1883 +
    depth = {}
1884 +
    trim_depth = {}
1885 +
    for i in range(im.ndim):
1886 +
        depth[i] = int(2.0 * overlap)
1887 +
        trim_depth[i] = int(2.0 * overlap) - 1
1888 +
1889 +
    tup.im = im
1890 +
    tup.dt = dt
1891 +
    # --------------------------------------------------------------------------
1892 +
    # Applying snow to image chunks
1893 +
    im = da.from_array(dt, chunks=chunk_shape)
1894 +
    im = da.overlap.overlap(im, depth=depth, boundary='none')
1895 +
    im = im.map_blocks(chunked_snow, r_max=r_max, sigma=sigma)
1896 +
    im = da.overlap.trim_internal(im, trim_depth, boundary='none')
1897 +
    if mode == 'serial':
1898 +
        num_workers = 1
1899 +
    elif mode == 'parallel':
1900 +
        num_workers = num_workers
1901 +
    else:
1902 +
        raise Exception('Mode of operation can either be parallel or serial')
1903 +
    with ProgressBar():
1904 +
        # print('-' * 80)
1905 +
        print('Applying snow to image chunks')
1906 +
        regions = im.compute(num_workers=num_workers)
1907 +
    # --------------------------------------------------------------------------
1908 +
    # Relabelling watershed chunks
1909 +
    # print('-' * 80)
1910 +
    print('Relabelling watershed chunks')
1911 +
    regions = relabel_chunks(im=regions, chunk_shape=chunk_shape)
1912 +
    # --------------------------------------------------------------------------
1913 +
    # Stitching watershed chunks
1914 +
    # print('-' * 80)
1915 +
    print('Stitching watershed chunks')
1916 +
    regions = watershed_stitching(im=regions, chunk_shape=chunk_shape)
1917 +
    print('=' * 80)
1918 +
    if return_all:
1919 +
        tup.regions = regions
1920 +
        return tup
1921 +
    else:
1922 +
        return regions
1923 +
1924 +
    return regions
1925 +
1926 +
1927 +
def chunked_snow(im, r_max=5, sigma=0.4):
1928 +
    r"""
1929 +
    Partitions the void space into pore regions using a marker-based watershed
1930 +
    algorithm, with specially filtered peaks as markers.
1931 +
1932 +
    The SNOW network extraction algorithm (Sub-Network of an Over-segmented
1933 +
    Watershed) was designed to handle to perculiarities of high porosity
1934 +
    materials, but it applies well to other materials as well.
1935 +
1936 +
    Parameters
1937 +
    ----------
1938 +
    im : array_like
1939 +
        Distance transform of phase of interest in a binary image
1940 +
    r_max : int
1941 +
        The radius of the spherical structuring element to use in the Maximum
1942 +
        filter stage that is used to find peaks.  The default is 5
1943 +
    sigma : float
1944 +
        The standard deviation of the Gaussian filter used in step 1.  The
1945 +
        default is 0.4.  If 0 is given then the filter is not applied, which is
1946 +
        useful if a distance transform is supplied as the ``im`` argument that
1947 +
        has already been processed.
1948 +
1949 +
    Returns
1950 +
    -------
1951 +
    image : ND-array
1952 +
        An image the same shape as ``im`` with the void space partitioned into
1953 +
        pores using a marker based watershed with the peaks found by the
1954 +
        SNOW algorithm [1].
1955 +
1956 +
    References
1957 +
    ----------
1958 +
    [1] Gostick, J. "A versatile and efficient network extraction algorithm
1959 +
    using marker-based watershed segmenation".  Physical Review E. (2017)
1960 +
    """
1961 +
1962 +
    dt = spim.gaussian_filter(input=im, sigma=sigma)
1963 +
    peaks = find_peaks(dt=dt, r_max=r_max)
1964 +
    peaks = trim_saddle_points(peaks=peaks, dt=dt, max_iters=99, verbose=0)
1965 +
    peaks = trim_nearby_peaks(peaks=peaks, dt=dt)
1966 +
    peaks, N = spim.label(peaks)
1967 +
    regions = watershed(image=-dt, markers=peaks, mask=im > 0)
1968 +
1969 +
    return regions * (im > 0)
1970 +
1971 +
1972 +
def pad(im, pad_width=1, constant_value=0):
1973 +
    r"""
1974 +
    Pad the image with a constant values and width.
1975 +
1976 +
    Parameters:
1977 +
    ----------
1978 +
    im : ND-array
1979 +
        The image that requires padding
1980 +
    pad_width : int
1981 +
        The number of values that will be padded from the edges. Default values
1982 +
        is 1.
1983 +
    contant_value : int
1984 +
        Pads with the specified constant value
1985 +
1986 +
    return:
1987 +
    -------
1988 +
    output: ND-array
1989 +
        Padded image with same dimnesions as provided image
1990 +
    """
1991 +
    shape = np.array(im.shape)
1992 +
    pad_shape = shape + (2 * pad_width)
1993 +
    temp = np.zeros(pad_shape, dtype=np.uint32)
1994 +
    if constant_value != 0:
1995 +
        temp = temp + constant_value
1996 +
    if im.ndim == 3:
1997 +
        temp[pad_width: -pad_width,
1998 +
             pad_width: -pad_width,
1999 +
             pad_width: -pad_width] = im
2000 +
    elif im.ndim == 2:
2001 +
        temp[pad_width: -pad_width,
2002 +
             pad_width: -pad_width] = im
2003 +
    else:
2004 +
        temp[pad_width: -pad_width] = im
2005 +
2006 +
    return temp
2007 +
2008 +
2009 +
def relabel_chunks(im, chunk_shape):  # pragma: no cover
2010 +
    r"""
2011 +
    Assign new labels to each chunk or sub-domain of actual image. This
2012 +
    prevents from two or more regions to have same label.
2013 +
2014 +
    Parameters:
2015 +
    -----------
2016 +
2017 +
    im: ND-array
2018 +
        Actual image that contains repeating labels in chunks or sub-domains
2019 +
2020 +
    chunk_shape: tuple
2021 +
        The shape of chunk that will be relabeled in actual image. Note the
2022 +
        chunk shape should be a multiple of actual image shape otherwise some
2023 +
        labels will not be relabeled.
2024 +
2025 +
    return:
2026 +
    -------
2027 +
    output : ND-array
2028 +
        Relabeled image with unique label assigned to each region.
2029 +
    """
2030 +
    im = pad(im, pad_width=1)
2031 +
    im_shape = np.array(im.shape, dtype=np.uint32)
2032 +
    max_num = 0
2033 +
    c = np.array(chunk_shape, dtype=np.uint32) + 2
2034 +
    num = (im_shape / c).astype(int)
2035 +
2036 +
    if im.ndim == 3:
2037 +
        for z in range(num[0]):
2038 +
            for y in range(num[1]):
2039 +
                for x in range(num[2]):
2040 +
                    chunk = im[z * c[0]: (z + 1) * c[0],
2041 +
                               y * c[1]: (y + 1) * c[1],
2042 +
                               x * c[2]: (x + 1) * c[2]]
2043 +
                    chunk += max_num
2044 +
                    chunk[chunk == max_num] = 0
2045 +
                    max_num = chunk.max()
2046 +
                    im[z * c[0]: (z + 1) * c[0],
2047 +
                       y * c[1]: (y + 1) * c[1],
2048 +
                       x * c[2]: (x + 1) * c[2]] = chunk
2049 +
    else:
2050 +
        for y in range(num[0]):
2051 +
            for x in range(num[1]):
2052 +
                chunk = im[y * c[0]: (y + 1) * c[0],
2053 +
                           x * c[1]: (x + 1) * c[1]]
2054 +
                chunk += max_num
2055 +
                chunk[chunk == max_num] = 0
2056 +
                max_num = chunk.max()
2057 +
                im[y * c[0]: (y + 1) * c[0],
2058 +
                   x * c[1]: (x + 1) * c[1]] = chunk
2059 +
2060 +
    return im
2061 +
2062 +
2063 +
def trim_internal_slice(im, chunk_shape):  # pragma: no cover
2064 +
    r"""
2065 +
    Delete extra slices from image that were used to stitch two or more chunks
2066 +
    together.
2067 +
2068 +
    Parameters:
2069 +
    -----------
2070 +
2071 +
    im :  ND-array
2072 +
        image that contains extra slices in x, y, z direction.
2073 +
2074 +
    chunk_shape : tuple
2075 +
        The shape of the chunk from which image is subdivided.
2076 +
2077 +
    Return:
2078 +
    -------
2079 +
    output :  ND-array
2080 +
        Image without extra internal slices. The shape of the image will be
2081 +
        same as input image provided for  waterhsed segmentation.
2082 +
    """
2083 +
    im_shape = np.array(im.shape, dtype=np.uint32)
2084 +
    c1 = np.array(chunk_shape, dtype=np.uint32) + 2
2085 +
    c2 = np.array(chunk_shape, dtype=np.uint32)
2086 +
    num = (im_shape / c1).astype(int)
2087 +
    out_shape = num * c2
2088 +
    out = np.empty((out_shape), dtype=np.uint32)
2089 +
2090 +
    if im.ndim == 3:
2091 +
        for z in range(num[0]):
2092 +
            for y in range(num[1]):
2093 +
                for x in range(num[2]):
2094 +
                    chunk = im[z * c1[0]: (z + 1) * c1[0],
2095 +
                               y * c1[1]: (y + 1) * c1[1],
2096 +
                               x * c1[2]: (x + 1) * c1[2]]
2097 +
2098 +
                    out[z * c2[0]: (z + 1) * c2[0],
2099 +
                        y * c2[1]: (y + 1) * c2[1],
2100 +
                        x * c2[2]: (x + 1) * c2[2]] = chunk[1:-1, 1:-1, 1:-1]
2101 +
    else:
2102 +
        for y in range(num[0]):
2103 +
            for x in range(num[1]):
2104 +
                chunk = im[y * c1[0]: (y + 1) * c1[0],
2105 +
                           x * c1[1]: (x + 1) * c1[1]]
2106 +
2107 +
                out[y * c2[0]: (y + 1) * c2[0],
2108 +
                    x * c2[1]: (x + 1) * c2[1]] = chunk[1:-1, 1:-1]
2109 +
2110 +
    return out
2111 +
2112 +
2113 +
def watershed_stitching(im, chunk_shape):
2114 +
    r"""
2115 +
    Stitch individual sub-domains of watershed segmentation into one big
2116 +
    segmentation with all boundary labels of each sub-domain relabeled to merge
2117 +
    boundary regions.
2118 +
2119 +
    Parameters:
2120 +
    -----------
2121 +
    im : ND-array
2122 +
        A worked image with watershed segmentation performed on all sub-domains
2123 +
        individually.
2124 +
2125 +
    chunk_shape: tuple
2126 +
        The shape of the sub-domain in which image segmentation is performed.
2127 +
2128 +
    return:
2129 +
    -------
2130 +
    output : ND-array
2131 +
        Stitched watershed segmentation with all sub-domains merged to form a
2132 +
        single watershed segmentation.
2133 +
    """
2134 +
2135 +
    c_shape = np.array(chunk_shape)
2136 +
    cuts_num = (np.array(im.shape) / c_shape).astype(np.uint32)
2137 +
2138 +
    for axis, num in enumerate(cuts_num):
2139 +
        keys = []
2140 +
        values = []
2141 +
        if num > 1:
2142 +
            im = im.swapaxes(0, axis)
2143 +
            for i in range(1, num):
2144 +
                sl = i * (chunk_shape[axis] + 3) - (i - 1)
2145 +
                sl1 = im[sl - 3, ...]
2146 +
                sl1_mask = sl1 > 0
2147 +
                sl2 = im[sl - 1, ...] * sl1_mask
2148 +
                sl1_labels = sl1.flatten()[sl1.flatten() > 0]
2149 +
                sl2_labels = sl2.flatten()[sl2.flatten() > 0]
2150 +
                if sl1_labels.size != sl2_labels.size:
2151 +
                    raise Exception('The selected overlapping thickness is not '
2152 +
                                    'suitable for input image. Change '
2153 +
                                    'overlapping criteria '
2154 +
                                    'or manually input value.')
2155 +
                keys.append(sl1_labels)
2156 +
                values.append(sl2_labels)
2157 +
            im = replace_labels(array=im, keys=keys, values=values)
2158 +
            im = im.swapaxes(axis, 0)
2159 +
    im = trim_internal_slice(im=im, chunk_shape=chunk_shape)
2160 +
    im = resequence_labels(array=im)
2161 +
2162 +
    return im
2163 +
2164 +
2165 +
@njit(parallel=True)
2166 +
def copy(im, output):  # pragma: no cover
2167 +
    r"""
2168 +
    The function copy the input array and make output array that is allocated
2169 +
    in different memory space. This a numba version of copy function of numpy.
2170 +
    Because each element is copied using parallel approach this implementation
2171 +
    is facter than numpy version of copy.
2172 +
2173 +
    parameter:
2174 +
    ----------
2175 +
    array: ND-array
2176 +
        Array that needs to be copied
2177 +
2178 +
    Return:
2179 +
    -------
2180 +
    output: ND-array
2181 +
        Copied array
2182 +
    """
2183 +
2184 +
    if im.ndim == 3:
2185 +
        for i in prange(im.shape[0]):
2186 +
            for j in prange(im.shape[1]):
2187 +
                for k in prange(im.shape[2]):
2188 +
                    output[i, j, k] = im[i, j, k]
2189 +
    elif im.ndim == 2:
2190 +
        for i in prange(im.shape[0]):
2191 +
            for j in prange(im.shape[1]):
2192 +
                output[i, j] = im[i, j]
2193 +
    else:
2194 +
        for i in prange(im.shape[0]):
2195 +
            output[i] = im[i]
2196 +
2197 +
    return output
2198 +
2199 +
2200 +
@njit(parallel=True)
2201 +
def _replace(array, keys, values, ind_sort):  # pragma: no cover
2202 +
    r"""
2203 +
    This function replace keys elements in input array with new value elements.
2204 +
    This function is used as internal function of replace_relabels.
2205 +
2206 +
    Parameter:
2207 +
    ----------
2208 +
    array : ND-array
2209 +
        Array which requires replacing labels
2210 +
    keys :  1D-array
2211 +
        The unique labels that need to be replaced
2212 +
    values : 1D-array
2213 +
        The unique values that will be assigned to labels
2214 +
2215 +
    return:
2216 +
    -------
2217 +
    array : ND-array
2218 +
        Array with replaced labels.
2219 +
    """
2220 +
    # ind_sort = np.argsort(keys)
2221 +
    keys_sorted = keys[ind_sort]
2222 +
    values_sorted = values[ind_sort]
2223 +
    s_keys = set(keys)
2224 +
2225 +
    for i in prange(array.shape[0]):
2226 +
        if array[i] in s_keys:
2227 +
            ind = np.searchsorted(keys_sorted, array[i])
2228 +
            array[i] = values_sorted[ind]
2229 +
2230 +
2231 +
def replace_labels(array, keys, values):
2232 +
    r"""
2233 +
    Replace labels in array provided as keys to values.
2234 +
2235 +
    Parameter:
2236 +
    ----------
2237 +
    array : ND-array
2238 +
        Array which requires replacing labels
2239 +
    keys :  1D-array
2240 +
        The unique labels that need to be replaced
2241 +
    values : 1D-array
2242 +
        The unique values that will be assigned to labels
2243 +
2244 +
    return:
2245 +
    -------
2246 +
    array : ND-array
2247 +
        Array with replaced labels.
2248 +
    """
2249 +
    a_shape = array.shape
2250 +
    array = array.flatten()
2251 +
    keys = np.concatenate(keys, axis=0)
2252 +
    values = np.concatenate(values, axis=0)
2253 +
    ind_sort = np.argsort(keys)
2254 +
    _replace(array, keys, values, ind_sort)
2255 +
2256 +
    return array.reshape(a_shape)
2257 +
2258 +
2259 +
@njit()
2260 +
def _sequence(array, count):  # pragma: no cover
2261 +
    r"""
2262 +
    Internal function of resequnce_labels method. This function resquence array
2263 +
    elements in an ascending order using numba technique which is many folds
2264 +
    faster than make contigious funcition.
2265 +
2266 +
    parameter:
2267 +
    ----------
2268 +
    array: 1d-array
2269 +
        1d-array that needs resquencing
2270 +
    count: 1d-array
2271 +
        1d-array of zeros having same size as array
2272 +
2273 +
    return:
2274 +
    -------
2275 +
    array: 1d-array
2276 +
        The input array with elements resequenced in ascending order
2277 +
    Note: The output of this function is not same as make_contigous or
2278 +
    relabel_sequential function of scikit-image. This function resequence and
2279 +
    randomize the regions while other methods only do resequencing and output
2280 +
    sorted array.
2281 +
    """
2282 +
    a = 1
2283 +
    i = 0
2284 +
    while i < (len(array)):
2285 +
        data = array[i]
2286 +
        if data != 0:
2287 +
            if count[data] == 0:
2288 +
                count[data] = a
2289 +
                a += 1
2290 +
        array[i] = count[data]
2291 +
        i += 1
2292 +
2293 +
2294 +
@njit(parallel=True)
2295 +
def amax(array):  # pragma: no cover
2296 +
    r"""
2297 +
    Find largest element in an array using fast parallel numba technique
2298 +
2299 +
    Parameter:
2300 +
    ----------
2301 +
    array: ND-array
2302 +
        array in which largest elements needs to be calcuted
2303 +
2304 +
    return:
2305 +
    scalar: float or int
2306 +
        The largest element value in the input array
2307 +
    """
2308 +
2309 +
    return np.max(array)
2310 +
2311 +
2312 +
def resequence_labels(array):
2313 +
    r"""
2314 +
    Resequence the lablels to make them contigious.
2315 +
2316 +
    Parameter:
2317 +
    ----------
2318 +
    array: ND-array
2319 +
        Array that requires resequencing
2320 +
2321 +
    return:
2322 +
    -------
2323 +
    array : ND-array
2324 +
        Resequenced array with same shape as input array
2325 +
    """
2326 +
    a_shape = array.shape
2327 +
    array = array.ravel()
2328 +
    max_num = amax(array) + 1
2329 +
    count = np.zeros(max_num, dtype=np.uint32)
2330 +
    _sequence(array, count)
2331 +
2332 +
    return array.reshape(a_shape)

@@ -12,7 +12,6 @@
Loading
12 12
              voxel_size=1,
13 13
              boundary_faces=['top', 'bottom', 'left', 'right', 'front', 'back'],
14 14
              marching_cubes_area=False):
15 -
16 15
    r"""
17 16
    Analyzes an image that has been partitioned into void and solid regions
18 17
    and extracts the void and solid phase geometry as well as network

@@ -1,11 +1,12 @@
Loading
1 -
import numpy as np
2 1
import scipy as sp
2 +
import numpy as np
3 3
import scipy.ndimage as spim
4 4
import warnings
5 +
from edt import edt
5 6
from collections import namedtuple
6 7
from skimage.morphology import ball, disk
7 8
from skimage.measure import marching_cubes_lewiner
8 -
from array_split import shape_split
9 +
from array_split import shape_split, ARRAY_BOUNDS
9 10
from scipy.signal import fftconvolve
10 11
11 12
@@ -27,9 +28,9 @@
Loading
27 28
        Returns a copy of ``im`` rotated accordingly.
28 29
    """
29 30
    if im.ndim != im.squeeze().ndim:
30 -
        warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
31 -
                      ' Reduce dimensionality with np.squeeze(im) to avoid' +
32 -
                      ' unexpected behavior.')
31 +
        warnings.warn(f'Input image conains a singleton axis: {im.shape}'
32 +
                      + ' Reduce dimensionality with np.squeeze(im) to avoid'
33 +
                      + ' unexpected behavior.')
33 34
    im = np.copy(im)
34 35
    if im.ndim == 2:
35 36
        im = (np.swapaxes(im, 1, 0))
@@ -112,9 +113,9 @@
Loading
112 113
        return t
113 114
114 115
    if im.ndim != im.squeeze().ndim:
115 -
        warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
116 -
                      ' Reduce dimensionality with np.squeeze(im) to avoid' +
117 -
                      ' unexpected behavior.')
116 +
        warnings.warn(f'Input image conains a singleton axis: {im.shape}'
117 +
                      + ' Reduce dimensionality with np.squeeze(im) to avoid'
118 +
                      + ' unexpected behavior.')
118 119
119 120
    # Perform erosion and dilation
120 121
    # The array must be padded with 0's so it works correctly at edges
@@ -141,7 +142,7 @@
Loading
141 142
    return result
142 143
143 144
144 -
def subdivide(im, divs=2):
145 +
def subdivide(im, divs=2, overlap=0, flatten=False):
145 146
    r"""
146 147
    Returns slices into an image describing the specified number of sub-arrays.
147 148
@@ -158,48 +159,53 @@
Loading
158 159
        The number of sub-divisions to create in each axis of the image.  If a
159 160
        scalar is given it is assumed this value applies in all dimensions.
160 161
162 +
    overlap : scalar or array_like
163 +
        The amount of overlap to use when dividing along each axis.  If a
164 +
        scalar is given it is assumed this value applies in all dimensions.
165 +
166 +
    flatten : boolean
167 +
        If set to ``True`` then the slice objects are returned as a flat
168 +
        list, while if ``False`` they are returned in a ND-array where each
169 +
        subdivision is accessed using row-col or row-col-layer indexing.
170 +
161 171
    Returns
162 172
    -------
163 -
    slices : 1D-array
164 -
        A 1-D array containing slice objects for indexing into ``im`` that
165 -
        extract the sub-divided arrays.
173 +
    slices : ND-array
174 +
        An ND-array containing sets of slice objects for indexing into ``im``
175 +
        that extract subdivisions of an image.  If ``flatten`` was ``True``,
176 +
        then this array is flat, suitable  for iterating.  If ``flatten`` was
177 +
        ``False`` then the slice objects must be accessed by row, col, layer
178 +
        indices.  An ND-array is the preferred container since it's shape can
179 +
        be easily queried.
166 180
167 181
    Notes
168 182
    -----
169 183
    This method uses the
170 184
    `array_split package <https://github.com/array-split/array_split>`_ which
171 185
    offers the same functionality as the ``split`` method of Numpy's ND-array,
172 -
    but supports the splitting multidimensional arrays in all dimensions.
186 +
    but supports the splitting of multidimensional arrays in all dimensions.
187 +
188 +
    See Also
189 +
    --------
190 +
    chunked_func
173 191
174 192
    Examples
175 193
    --------
176 194
    >>> import porespy as ps
177 195
    >>> import matplotlib.pyplot as plt
178 196
    >>> im = ps.generators.blobs(shape=[200, 200])
179 -
    >>> s = ps.tools.subdivide(im, divs=[2, 2])
180 -
181 -
    ``s`` contains an array with the shape given by ``divs``.  To access the
182 -
    first and last quadrants of ``im`` use:
183 -
    >>> print(im[tuple(s[0, 0])].shape)
184 -
    (100, 100)
185 -
    >>> print(im[tuple(s[1, 1])].shape)
186 -
    (100, 100)
187 -
188 -
    It can be easier to index the array with the slices by applying ``flatten``
189 -
    first:
190 -
    >>> s_flat = s.flatten()
191 -
    >>> for i in s_flat:
192 -
    ...     print(im[i].shape)
193 -
    (100, 100)
194 -
    (100, 100)
195 -
    (100, 100)
196 -
    (100, 100)
197 +
    >>> s = ps.tools.subdivide(im, divs=[2, 2], flatten=True)
198 +
    >>> print(len(s))
199 +
    4
200 +
197 201
    """
198 -
    # Expand scalar divs
199 -
    if isinstance(divs, int):
200 -
        divs = [divs for i in range(im.ndim)]
201 -
    s = shape_split(im.shape, axis=divs)
202 -
    return s
202 +
    divs = np.ones((im.ndim,), dtype=int) * np.array(divs)
203 +
    halo = overlap * (divs > 1)
204 +
    slices = shape_split(im.shape, axis=divs, halo=halo.tolist(),
205 +
                         tile_bounds_policy=ARRAY_BOUNDS).astype(object)
206 +
    if flatten is True:
207 +
        slices = np.ravel(slices)
208 +
    return slices
203 209
204 210
205 211
def bbox_to_slices(bbox):
@@ -265,16 +271,16 @@
Loading
265 271
266 272
    """
267 273
    if r == 0:
268 -
        dt = spim.distance_transform_edt(input=im)
274 +
        dt = edt(input=im)
269 275
        r = int(np.amax(dt)) * 2
270 276
    im_padded = np.pad(array=im, pad_width=r, mode='constant',
271 277
                       constant_values=True)
272 -
    dt = spim.distance_transform_edt(input=im_padded)
278 +
    dt = edt(input=im_padded)
273 279
    seeds = (dt >= r) + get_border(shape=im_padded.shape)
274 280
    # Remove seeds not connected to edges
275 281
    labels = spim.label(seeds)[0]
276 282
    mask = labels == 1  # Assume label of 1 on edges, assured by adding border
277 -
    dt = spim.distance_transform_edt(~mask)
283 +
    dt = edt(~mask)
278 284
    outer_region = dt < r
279 285
    outer_region = extract_subsection(im=outer_region, shape=im.shape)
280 286
    return outer_region
@@ -314,7 +320,7 @@
Loading
314 320
    dim = [range(int(-s / 2), int(s / 2) + s % 2) for s in im.shape]
315 321
    inds = np.meshgrid(*dim, indexing='ij')
316 322
    inds[axis] = inds[axis] * 0
317 -
    d = np.sqrt(np.sum(np.square(inds), axis=0))
323 +
    d = np.sqrt(np.sum(sp.square(inds), axis=0))
318 324
    mask = d < r
319 325
    im_temp = im*mask
320 326
    return im_temp
@@ -341,7 +347,7 @@
Loading
341 347
342 348
    Examples
343 349
    --------
344 -
    >>> import numpy as np
350 +
    >>> import scipy as sp
345 351
    >>> from porespy.tools import extract_subsection
346 352
    >>> im = np.array([[1, 1, 1, 1], [1, 2, 2, 2], [1, 2, 3, 3], [1, 2, 3, 4]])
347 353
    >>> print(im)
@@ -505,9 +511,9 @@
Loading
505 511
    Examples
506 512
    --------
507 513
    >>> import porespy as ps
508 -
    >>> import numpy as np
509 -
    >>> np.random.seed(0)
510 -
    >>> im = np.random.randint(low=0, high=5, size=[4, 4])
514 +
    >>> import scipy as sp
515 +
    >>> sp.random.seed(0)
516 +
    >>> im = sp.random.randint(low=0, high=5, size=[4, 4])
511 517
    >>> print(im)
512 518
    [[4 0 3 3]
513 519
     [3 1 3 2]
@@ -529,7 +535,7 @@
Loading
529 535
    keep_vals = np.array(keep_vals)
530 536
    swap_vals = ~np.in1d(im_flat, keep_vals)
531 537
    im_vals = np.unique(im_flat[swap_vals])
532 -
    new_vals = np.random.permutation(im_vals)
538 +
    new_vals = sp.random.permutation(im_vals)
533 539
    im_map = np.zeros(shape=[np.amax(im_vals) + 1, ], dtype=int)
534 540
    im_map[im_vals] = new_vals
535 541
    im_new = im_map[im_flat]
@@ -566,7 +572,7 @@
Loading
566 572
    Example
567 573
    -------
568 574
    >>> import porespy as ps
569 -
    >>> import numpy as np
575 +
    >>> import scipy as sp
570 576
    >>> im = np.array([[0, 2, 9], [6, 8, 3]])
571 577
    >>> im = ps.tools.make_contiguous(im)
572 578
    >>> print(im)
@@ -632,7 +638,7 @@
Loading
632 638
    Examples
633 639
    --------
634 640
    >>> import porespy as ps
635 -
    >>> import numpy as np
641 +
    >>> import scipy as sp
636 642
    >>> mask = ps.tools.get_border(shape=[3, 3], mode='corners')
637 643
    >>> print(mask)
638 644
    [[ True False  True]
@@ -669,7 +675,7 @@
Loading
669 675
            border[0::, t:-t, 0::] = False
670 676
            border[0::, 0::, t:-t] = False
671 677
    if return_indices:
672 -
        border = np.where(border)
678 +
        border = sp.where(border)
673 679
    return border
674 680
675 681
@@ -732,7 +738,7 @@
Loading
732 738
    return im
733 739
734 740
735 -
def functions_to_table(mod, colwidth=[27, 48]):
741 +
def _functions_to_table(mod, colwidth=[27, 48]):
736 742
    r"""
737 743
    Given a module of functions, returns a ReST formatted text string that
738 744
    outputs a table when printed.
@@ -800,9 +806,9 @@
Loading
800 806
    """
801 807
    im = region
802 808
    if im.ndim != im.squeeze().ndim:
803 -
        warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
804 -
                      ' Reduce dimensionality with np.squeeze(im) to avoid' +
805 -
                      ' unexpected behavior.')
809 +
        warnings.warn(f'Input image conains a singleton axis: {im.shape}'
810 +
                      + ' Reduce dimensionality with np.squeeze(im) to avoid'
811 +
                      + ' unexpected behavior.')
806 812
    if strel is None:
807 813
        if region.ndim == 3:
808 814
            strel = ball(1)
@@ -842,7 +848,7 @@
Loading
842 848
    rad = int(np.ceil(radius))
843 849
    other = np.ones((2 * rad + 1, 2 * rad + 1), dtype=bool)
844 850
    other[rad, rad] = False
845 -
    disk = spim.distance_transform_edt(other) < radius
851 +
    disk = edt(other) < radius
846 852
    return disk
847 853
848 854
@@ -863,7 +869,7 @@
Loading
863 869
    rad = int(np.ceil(radius))
864 870
    other = np.ones((2 * rad + 1, 2 * rad + 1, 2 * rad + 1), dtype=bool)
865 871
    other[rad, rad, rad] = False
866 -
    ball = spim.distance_transform_edt(other) < radius
872 +
    ball = edt(other) < radius
867 873
    return ball
868 874
869 875
@@ -901,7 +907,7 @@
Loading
901 907
    return im1
902 908
903 909
904 -
def insert_sphere(im, c, r):
910 +
def insert_sphere(im, c, r, v=True, overwrite=True):
905 911
    r"""
906 912
    Inserts a sphere of a specified radius into a given image
907 913
@@ -913,26 +919,48 @@
Loading
913 919
        The [x, y, z] coordinate indicating the center of the sphere
914 920
    r : int
915 921
        The radius of sphere to insert
922 +
    v : int
923 +
        The value to put into the sphere voxels.  The default is ``True``
924 +
        which corresponds to inserting spheres into a Boolean image.  If
925 +
        a numerical value is given, ``im`` is converted to the same type as
926 +
        ``v``.
927 +
    overwrite : boolean
928 +
        If ``True`` (default) then the sphere overwrites whatever values are
929 +
        present in ``im``.  If ``False`` then the sphere values are only
930 +
        inserted into locations that are 0 or ``False``.
916 931
917 932
    Returns
918 933
    -------
919 934
    image : ND-array
920 935
        The original image with a sphere inerted at the specified location
921 936
    """
937 +
    # Convert image to same type os v for eventual insertion
938 +
    if im.dtype != type(v):
939 +
        im = im.astype(type(v))
940 +
    # Parse the arugments
941 +
    r = int(sp.around(r, decimals=0))
942 +
    if r == 0:
943 +
        return im
922 944
    c = np.array(c, dtype=int)
923 945
    if c.size != im.ndim:
924 946
        raise Exception('Coordinates do not match dimensionality of image')
925 -
947 +
    # Define a bounding box around inserted sphere, minding imaage boundaries
926 948
    bbox = []
927 949
    [bbox.append(np.clip(c[i] - r, 0, im.shape[i])) for i in range(im.ndim)]
928 950
    [bbox.append(np.clip(c[i] + r, 0, im.shape[i])) for i in range(im.ndim)]
929 951
    bbox = np.ravel(bbox)
952 +
    # Obtain slices into image
930 953
    s = bbox_to_slices(bbox)
931 -
    temp = im[s]
932 -
    blank = np.ones_like(temp)
954 +
    # Generate sphere template within image boundaries
955 +
    blank = np.ones_like(im[s], dtype=int)
933 956
    blank[tuple(c - bbox[0:im.ndim])] = 0
934 -
    blank = spim.distance_transform_edt(blank) < r
935 -
    im[s] = blank
957 +
    sph = edt(blank) < r
958 +
    if overwrite:  # Clear voxles under sphere to be zero
959 +
        temp = im[s]*sph > 0
960 +
        im[s][temp] = 0
961 +
    else:  # Clear portions of sphere to prevent overwriting
962 +
        sph *= im[s] == 0
963 +
    im[s] = im[s] + sph*v
936 964
    return im
937 965
938 966
@@ -983,7 +1011,7 @@
Loading
983 1011
    else:
984 1012
        xyz_line_in_template_coords = [xyz_line[i] - xyz_min[i] for i in range(3)]
985 1013
        template[tuple(xyz_line_in_template_coords)] = 1
986 -
        template = spim.distance_transform_edt(template == 0) <= r
1014 +
        template = edt(template == 0) <= r
987 1015
988 1016
    im[xyz_min[0]:xyz_max[0]+1,
989 1017
       xyz_min[1]:xyz_max[1]+1,
@@ -1023,9 +1051,9 @@
Loading
1023 1051
    add_boundary_regions
1024 1052
    """
1025 1053
    if im.ndim != im.squeeze().ndim:
1026 -
        warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
1027 -
                      ' Reduce dimensionality with np.squeeze(im) to avoid' +
1028 -
                      ' unexpected behavior.')
1054 +
        warnings.warn(f'Input image conains a singleton axis: {im.shape}'
1055 +
                      + ' Reduce dimensionality with np.squeeze(im) to avoid'
1056 +
                      + ' unexpected behavior.')
1029 1057
    f = faces
1030 1058
    if f is not None:
1031 1059
        if im.ndim == 2:
@@ -1070,18 +1098,37 @@
Loading
1070 1098
    """
1071 1099
    # -------------------------------------------------------------------------
1072 1100
    # Get alias if provided by user
1073 -
    phases_num = np.unique(im * 1)
1101 +
    phases_num = np.unique(im).astype(int)
1074 1102
    phases_num = np.trim_zeros(phases_num)
1075 1103
    al = {}
1104 +
    wrong_labels = []
1076 1105
    for values in phases_num:
1077 1106
        al[values] = 'phase{}'.format(values)
1078 1107
    if alias is not None:
1079 1108
        alias_sort = dict(sorted(alias.items()))
1080 1109
        phase_labels = np.array([*alias_sort])
1081 1110
        al = alias
1082 -
        if set(phase_labels) != set(phases_num):
1083 -
            raise Exception('Alias labels does not match with image labels '
1084 -
                            'please provide correct image labels')
1111 +
        for i in phase_labels:
1112 +
            if i == 0:
1113 +
                raise Exception("Label 0 is not allowed in alias. "
1114 +
                                + "Please specify alias with a positive "
1115 +
                                  "integer")
1116 +
            elif i not in phases_num:
1117 +
                wrong_labels.append(i)
1118 +
        if wrong_labels:
1119 +
            raise Exception("Alias label(s) {} does not "
1120 +
                            "match with image "
1121 +
                            "label(s).".format(wrong_labels)
1122 +
                            + "Please provide correct labels from image.")
1123 +
        if phase_labels.size < phases_num.size:
1124 +
            missed_labels = np.setdiff1d(phases_num, phase_labels)
1125 +
            for i in missed_labels:
1126 +
                warnings.warn(
1127 +
                    "label_{} alias is not provided although it "
1128 +
                    "exists in the input image.".format(i)
1129 +
                    + "The default label alias phase{} is assigned to "
1130 +
                      "label_{}".format(i, i))
1131 +
                al[i] = 'phase{}'.format(i)
1085 1132
    return al
1086 1133
1087 1134
@@ -1110,7 +1157,7 @@
Loading
1110 1157
        labels = [labels]
1111 1158
    s = spim.find_objects(regions)
1112 1159
    im_new = np.zeros_like(regions)
1113 -
    x_min, y_min, z_min = np.inf, np.inf, np.inf
1160 +
    x_min, y_min, z_min = sp.inf, sp.inf, sp.inf
1114 1161
    x_max, y_max, z_max = 0, 0, 0
1115 1162
    for i in labels:
1116 1163
        im_new[s[i-1]] = regions[s[i-1]] == i
@@ -1125,3 +1172,81 @@
Loading
1125 1172
            bbox = bbox_to_slices([x_min, y_min, x_max, y_max])
1126 1173
        im_new = im_new[bbox]
1127 1174
    return im_new
1175 +
1176 +
1177 +
def size_to_seq(size, bins=None):
1178 +
    r"""
1179 +
    Converts an image of invasion size values into sequence values.
1180 +
1181 +
    This is meant to accept the output of the ``porosimetry`` function.
1182 +
1183 +
    Parameters
1184 +
    ----------
1185 +
    size : ND-image
1186 +
        The image containing invasion size values in each voxel.
1187 +
    bins : array_like or int (optional)
1188 +
        The bins to use when converting sizes to sequence.  The default is
1189 +
        to create 1 bin for each unique value in ``size``.  If an **int**
1190 +
        is supplied it is interpreted as the number of bins between 0 and the
1191 +
        maximum value in ``size``.  If an array is supplied it is used as
1192 +
        the bins directly.
1193 +
1194 +
    Returns
1195 +
    -------
1196 +
    seq : ND-image
1197 +
        An ND-image the same shape as ``size`` with invasion size values
1198 +
        replaced by the invasion sequence.  This assumes that the invasion
1199 +
        process occurs via increasing pressure steps, such as produced by
1200 +
        the ``porosimetry`` function.
1201 +
1202 +
    """
1203 +
    solid = size == 0
1204 +
    if bins is None:
1205 +
        bins = np.unique(size)
1206 +
    elif isinstance(bins, int):
1207 +
        bins = np.linspace(0, size.max(), bins)
1208 +
    vals = np.digitize(size, bins=bins, right=True)
1209 +
    # Invert the vals so smallest size has largest sequence
1210 +
    vals = -(vals - vals.max() - 1)*~solid
1211 +
    # In case too many bins are given, remove empty ones
1212 +
    vals = make_contiguous(vals)
1213 +
1214 +
    # Possibly simpler way?
1215 +
    #    vals = (-(size - size.max())).astype(int) + 1
1216 +
    #    vals[vals > size.max()] = 0
1217 +
1218 +
    return vals
1219 +
1220 +
1221 +
def seq_to_satn(seq):
1222 +
    r"""
1223 +
    Converts an image of invasion sequence values to saturation values.
1224 +
1225 +
    Parameters
1226 +
    ----------
1227 +
    seq : ND-image
1228 +
        The image containing invasion sequence values in each voxel.
1229 +
        Note that the invasion steps must be positive integers, solid voxels
1230 +
        indicated by 0, and uninvaded voxels indicated by -1.
1231 +
1232 +
    Returns
1233 +
    -------
1234 +
    satn : ND-image
1235 +
        An ND-iamge the same size as ``seq`` but with sequnece values replaced
1236 +
        by the fraction of pores invaded at or below the sequence number.
1237 +
        Solid voxels and uninvaded voxels are represented by 0 and -1
1238 +
        respectively.
1239 +
1240 +
    """
1241 +
    seq = np.copy(seq).astype(int)
1242 +
    solid = seq == 0
1243 +
    uninvaded = seq == -1
1244 +
    seq = np.clip(seq, a_min=0, a_max=None)
1245 +
    seq = make_contiguous(seq)
1246 +
    b = np.bincount(seq.flatten())
1247 +
    b[0] = 0
1248 +
    c = np.cumsum(b)
1249 +
    satn = c[seq]/((seq > 0).sum() + uninvaded.sum())
1250 +
    satn[solid] = 0.0
1251 +
    satn[uninvaded] = -1.0
1252 +
    return satn

@@ -3,6 +3,81 @@
Loading
3 3
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
4 4
5 5
6 +
def bar(tup, h='pdf', **kwargs):
7 +
    r"""
8 +
    Convenience wrapper for matplotlib's ``bar``.
9 +
10 +
    This automatically:
11 +
        * fetches the ``bin_centers``
12 +
        * fetches the bin heights from the specified ``h``
13 +
        * sets the bin widths
14 +
        * sets the edges to black
15 +
16 +
    Parameters
17 +
    ----------
18 +
    tup : named-tuple
19 +
        The named tuple returned by various functions in the
20 +
        ``porespy.metrics`` submodule, such as ``chord_length_distribution``.
21 +
    h : str
22 +
        The value to use for bin heights.  The default is ``pdf``, but
23 +
        ``cdf`` is another option. Depending on the function the named-tuple
24 +
        may have different options.
25 +
    kwargs : keyword arguments
26 +
        All other keyword arguments are passed to ``bar``, including
27 +
        ``edgecolor`` if you wish to overwrite the default black.
28 +
29 +
    Returns
30 +
    -------
31 +
    fig: Matplotlib figure handle
32 +
    """
33 +
    if 'edgecolor' not in kwargs:
34 +
        kwargs['edgecolor'] = 'k'
35 +
    fig = plt.bar(x=tup.bin_centers, height=getattr(tup, h),
36 +
                  width=tup.bin_widths, **kwargs)
37 +
    return fig
38 +
39 +
40 +
def imshow(*im, ind=None, axis=None):
41 +
    r"""
42 +
    Convenience wrapper for matplotlib's ``imshow``.
43 +
44 +
    This automatically:
45 +
        * slices a 3D image in the middle of the last axis
46 +
        * uses a masked array to make 0's white
47 +
        * sets the origin to 'lower' so bottom-left corner is [0, 0]
48 +
49 +
    Parameters
50 +
    ----------
51 +
    im : ND-array
52 +
        The 2D or 3D image (or images) to show.  If 2D then all other
53 +
        arguments are ignored.
54 +
    ind : int
55 +
        The slice to show if ``im`` is 3D.  If not given then the middle of
56 +
        the image is used.
57 +
    axis : int
58 +
        The axis to show if ``im`` is 3D.  If not given, then the last
59 +
        axis of the image is used, so an 'lower' slice is shown.
60 +
61 +
    Note
62 +
    ----
63 +
    ``im`` can also be a series of unnamed arguments, in which case all
64 +
    received images will be shown using ``subplot``.
65 +
    """
66 +
    if not isinstance(im, tuple):
67 +
        im = tuple([im])
68 +
    for i, image in enumerate(im):
69 +
        if image.ndim == 3:
70 +
            if axis is None:
71 +
                axis = 2
72 +
            if ind is None:
73 +
                ind = int(image.shape[axis]/2)
74 +
            image = image.take(indices=ind, axis=axis)
75 +
        image = np.ma.array(image, mask=image == 0)
76 +
        fig = plt.subplot(1, len(im), i+1)
77 +
        plt.imshow(image, origin='lower')
78 +
    return fig
79 +
80 +
6 81
def show_mesh(mesh):
7 82
    r"""
8 83
    Visualizes the mesh of a region as obtained by ``get_mesh`` function in

@@ -1,3 +1,4 @@
Loading
1 +
import sys
1 2
import numpy as np
2 3
import openpnm as op
3 4
from tqdm import tqdm
@@ -35,7 +36,7 @@
Loading
35 36
    directly to an OpenPNM network object using the ``update`` command.
36 37
37 38
    """
38 -
    print('_'*60)
39 +
    print('-'*60)
39 40
    print('Extracting pore and throat information from image')
40 41
    from skimage.morphology import disk, ball
41 42
    struc_elem = disk if im.ndim == 2 else ball
@@ -67,7 +68,7 @@
Loading
67 68
    # dt_shape = np.array(dt.shape)
68 69
69 70
    # Start extracting size information for pores and throats
70 -
    for i in tqdm(Ps):
71 +
    for i in tqdm(Ps, file=sys.stdout):
71 72
        pore = i - 1
72 73
        if slices[pore] is None:
73 74
            continue

@@ -0,0 +1,103 @@
Loading
1 +
import numpy as np
2 +
import openpnm as op
3 +
from porespy.filters import trim_nonpercolating_paths
4 +
import collections
5 +
6 +
7 +
def tortuosity(im, axis, return_im=False, **kwargs):
8 +
    r"""
9 +
    Calculates tortuosity of given image in specified direction
10 +
11 +
    Parameters
12 +
    ----------
13 +
    im : ND-image
14 +
        The binary image to analyze with ``True`` indicating phase of interest
15 +
    axis : int
16 +
        The axis along which to apply boundary conditions
17 +
    return_im : boolean
18 +
        If ``True`` then the resulting tuple contains a copy of the input
19 +
        image with the concentration profile.
20 +
21 +
    Returns
22 +
    -------
23 +
    results :  tuple
24 +
        A named-tuple containing:
25 +
        * ``tortuosity`` calculated using the ``effective_porosity`` as
26 +
        * ``effective_porosity`` of the image after applying
27 +
        ``trim_nonpercolating_paths``.  This removes disconnected
28 +
        voxels which cause singular matrices.
29 +
        :math:`(D_{AB}/D_{eff}) \cdot \varepsilon`.
30 +
        * ``original_porosity`` of the image as given
31 +
        * ``formation_factor`` found as :math:`D_{AB}/D_{eff}`.
32 +
        * ``image`` containing the concentration values from the simulation.
33 +
        This is only returned if ``return_im`` is ``True``.
34 +
35 +
    """
36 +
    if axis > (im.ndim - 1):
37 +
        raise Exception("Axis argument is too high")
38 +
    # Obtain original porosity
39 +
    porosity_orig = im.sum()/im.size
40 +
    # removing floating pores
41 +
    im = trim_nonpercolating_paths(im, inlet_axis=axis, outlet_axis=axis)
42 +
    # porosity is changed because of trimmimg floating pores
43 +
    porosity_true = im.sum()/im.size
44 +
    if porosity_true < porosity_orig:
45 +
        print('Caution, True porosity is:', porosity_true,
46 +
              'and volume fraction filled:',
47 +
              abs(porosity_orig-porosity_true)*100, '%')
48 +
    # cubic network generation
49 +
    net = op.network.CubicTemplate(template=im, spacing=1)
50 +
    # adding phase
51 +
    water = op.phases.Water(network=net)
52 +
    water['throat.diffusive_conductance'] = 1  # dummy value
53 +
    # running Fickian Diffusion
54 +
    fd = op.algorithms.FickianDiffusion(network=net, phase=water)
55 +
    # choosing axis of concentration gradient
56 +
    inlets = net['pore.coords'][:, axis] <= 1
57 +
    outlets = net['pore.coords'][:, axis] >= im.shape[axis]-1
58 +
    # boundary conditions on concentration
59 +
    C_in = 1.0
60 +
    C_out = 0.0
61 +
    fd.set_value_BC(pores=inlets, values=C_in)
62 +
    fd.set_value_BC(pores=outlets, values=C_out)
63 +
    # Use specified solver if given
64 +
    if 'solver_family' in kwargs.keys():
65 +
        fd.settings.update(kwargs)
66 +
    else:  # Use pyamg otherwise, if presnet
67 +
        try:
68 +
            import pyamg
69 +
            fd.settings['solver_family'] = 'pyamg'
70 +
        except ModuleNotFoundError:  # Use scipy cg as last resort
71 +
            fd.settings['solver_family'] = 'scipy'
72 +
            fd.settings['solver_type'] = 'cg'
73 +
    op.utils.tic()
74 +
    fd.run()
75 +
    op.utils.toc()
76 +
    # calculating molar flow rate, effective diffusivity and tortuosity
77 +
    rate_out = fd.rate(pores=outlets)[0]
78 +
    rate_in = fd.rate(pores=inlets)[0]
79 +
    if not np.allclose(-rate_out, rate_in):
80 +
        raise Exception('Something went wrong, inlet and outlet rate do not match')
81 +
    delta_C = C_in - C_out
82 +
    L = im.shape[axis]
83 +
    A = np.prod(im.shape)/L
84 +
    N_A = A/L*delta_C
85 +
    Deff = rate_in/N_A
86 +
    tau = porosity_true/(Deff)
87 +
    result = collections.namedtuple('tortuosity_result', ['tortuosity',
88 +
                                                          'effective_porosity',
89 +
                                                          'original_porosity',
90 +
                                                          'formation_factor',
91 +
                                                          'image'])
92 +
    result.tortuosity = tau
93 +
    result.formation_factor = 1/Deff
94 +
    result.original_porosity = porosity_orig
95 +
    result.effective_porosity = porosity_true
96 +
    if return_im:
97 +
        conc = np.zeros([im.size, ], dtype=float)
98 +
        conc[net['pore.template_indices']] = fd['pore.concentration']
99 +
        conc = np.reshape(conc, newshape=im.shape)
100 +
        result.image = conc
101 +
    else:
102 +
        result.image = None
103 +
    return result

@@ -3,9 +3,9 @@
Loading
3 3
4 4
def set_mpl_style():
5 5
6 -
    sfont = 25
7 -
    mfont = 35
8 -
    lfont = 45
6 +
    sfont = 15
7 +
    mfont = 15
8 +
    lfont = 15
9 9
10 10
    line_props = {'linewidth': 4,
11 11
                  'markersize': 10}

@@ -85,32 +85,32 @@
Loading
85 85
    return new_im
86 86
87 87
88 -
def sem(im, direction='X'):
88 +
def sem(im, axis=0):
89 89
    r"""
90 -
    Simulates an SEM photograph looking into the porous material in the
91 -
    specified direction.  Features are colored according to their depth into
92 -
    the image, so darker features are further away.
90 +
    Simulates an SEM photograph looking into the porous material.
91 +
92 +
    Features are colored according to their depth into the image, so
93 +
    darker features are further away.
93 94
94 95
    Parameters
95 96
    ----------
96 97
    im : array_like
97 98
        ND-image of the porous material with the solid phase marked as 1 or
98 99
        True
99 -
100 -
    direction : string
101 -
        Specify the axis along which the camera will point.  Options are
102 -
        'X', 'Y', and 'Z'.
100 +
    axis : int
101 +
        Specifes the axis along which the camera will point.
103 102
104 103
    Returns
105 104
    -------
106 105
    image : 2D-array
107 -
        A 2D greyscale image suitable for use in matplotlib\'s ```imshow```
106 +
        A 2D greyscale image suitable for use in matplotlib's ``imshow``
108 107
        function.
108 +
109 109
    """
110 110
    im = np.array(~im, dtype=int)
111 -
    if direction in ['Y', 'y']:
111 +
    if axis == 1:
112 112
        im = np.transpose(im, axes=[1, 0, 2])
113 -
    if direction in ['Z', 'z']:
113 +
    if axis == 2:
114 114
        im = np.transpose(im, axes=[2, 1, 0])
115 115
    t = im.shape[0]
116 116
    depth = np.reshape(np.arange(0, t), [t, 1, 1])
@@ -119,12 +119,12 @@
Loading
119 119
    return im
120 120
121 121
122 -
def xray(im, direction='X'):
122 +
def xray(im, axis=0):
123 123
    r"""
124 -
    Simulates an X-ray radiograph looking through the porouls material in the
125 -
    specfied direction.  The resulting image is colored according to the amount
126 -
    of attenuation an X-ray would experience, so regions with more solid will
127 -
    appear darker.
124 +
    Simulates an X-ray radiograph looking through the porous material.
125 +
126 +
    The resulting image is colored according to the amount of attenuation an
127 +
    X-ray would experience, so regions with more solid will appear darker.
128 128
129 129
    Parameters
130 130
    ----------
@@ -132,9 +132,8 @@
Loading
132 132
        ND-image of the porous material with the solid phase marked as 1 or
133 133
        True
134 134
135 -
    direction : string
136 -
        Specify the axis along which the camera will point.  Options are
137 -
        'X', 'Y', and 'Z'.
135 +
    axis : int
136 +
        Specifes the axis along which the camera will point.
138 137
139 138
    Returns
140 139
    -------
@@ -143,9 +142,9 @@
Loading
143 142
        function.
144 143
    """
145 144
    im = np.array(~im, dtype=int)
146 -
    if direction in ['Y', 'y']:
145 +
    if axis == 1:
147 146
        im = np.transpose(im, axes=[1, 0, 2])
148 -
    if direction in ['Z', 'z']:
147 +
    if axis == 2:
149 148
        im = np.transpose(im, axes=[2, 1, 0])
150 149
    im = np.sum(im, axis=0)
151 150
    return im

@@ -391,23 +391,23 @@
Loading
391 391
    num = [0, *num]
392 392
    phases_num = np.unique(im * 1)
393 393
    phases_num = np.trim_zeros(phases_num)
394 -
    for i in phases_num:
395 -
        loc1 = np.logical_and(conns1 >= num[i - 1], conns1 < num[i])
396 -
        loc2 = np.logical_and(conns2 >= num[i - 1], conns2 < num[i])
397 -
        loc3 = np.logical_and(label >= num[i - 1], label < num[i])
398 -
        net['throat.{}'.format(al[i])] = loc1 * loc2
399 -
        net['pore.{}'.format(al[i])] = loc3
400 -
        if i == phases_num[-1]:
394 +
    for i0, i1 in enumerate(phases_num):
395 +
        loc1 = np.logical_and(conns1 >= num[i0], conns1 < num[i0 + 1])
396 +
        loc2 = np.logical_and(conns2 >= num[i0], conns2 < num[i0 + 1])
397 +
        loc3 = np.logical_and(label >= num[i0], label < num[i0 + 1])
398 +
        net['throat.{}'.format(al[i1])] = loc1 * loc2
399 +
        net['pore.{}'.format(al[i1])] = loc3
400 +
        if i1 == phases_num[-1]:
401 401
            loc4 = np.logical_and(conns1 < num[-1], conns2 >= num[-1])
402 402
            loc5 = label >= num[-1]
403 403
            net['throat.boundary'] = loc4
404 404
            net['pore.boundary'] = loc5
405 -
        for j in phases_num:
406 -
            if j > i:
407 -
                pi_pj_sa = np.zeros_like(label)
408 -
                loc6 = np.logical_and(conns2 >= num[j - 1], conns2 < num[j])
405 +
        for j0, j1 in enumerate(phases_num):
406 +
            if j0 > i0:
407 +
                pi_pj_sa = np.zeros_like(label, dtype=float)
408 +
                loc6 = np.logical_and(conns2 >= num[j0], conns2 < num[j0 + 1])
409 409
                pi_pj_conns = loc1 * loc6
410 -
                net['throat.{}_{}'.format(al[i], al[j])] = pi_pj_conns
410 +
                net['throat.{}_{}'.format(al[i1], al[j1])] = pi_pj_conns
411 411
                if any(pi_pj_conns):
412 412
                    # ---------------------------------------------------------
413 413
                    # Calculates phase[i] interfacial area that connects with
@@ -434,9 +434,7 @@
Loading
434 434
                        s_pa_c = np.trim_zeros(s_pa_c)
435 435
                        pi_pj_sa[i_index] = p_sa_c
436 436
                        pi_pj_sa[j_index] = s_pa_c
437 -
                    net['pore.{}_{}_area'.format(al[i],
438 -
                                                 al[j])] = (pi_pj_sa *
439 -
                                                            voxel_size ** 2)
437 +
                    net[f'pore.{al[i1]}_{al[j1]}_area'] = pi_pj_sa * voxel_size ** 2
440 438
    return net
441 439
442 440
@@ -479,10 +477,12 @@
Loading
479 477
            dic['bottom'] = 1
480 478
        for i in f:
481 479
            if i in ['left', 'front', 'bottom']:
482 -
                network['pore.{}'.format(i)] = (coords[:, dic[i]] <
483 -
                                                min(condition[:, dic[i]]))
480 +
                network['pore.{}'.format(i)] = (
481 +
                    coords[:, dic[i]] < min(condition[:, dic[i]])
482 +
                )
484 483
            elif i in ['right', 'back', 'top']:
485 -
                network['pore.{}'.format(i)] = (coords[:, dic[i]] >
486 -
                                                max(condition[:, dic[i]]))
484 +
                network['pore.{}'.format(i)] = (
485 +
                    coords[:, dic[i]] > max(condition[:, dic[i]])
486 +
                )
487 487
488 488
    return network

@@ -1,10 +1,11 @@
Loading
1 +
import sys
1 2
import numpy as np
2 -
import scipy.ndimage as spim
3 3
from tqdm import tqdm
4 +
import scipy.ndimage as spim
4 5
from porespy.tools import extract_subsection, bbox_to_slices
5 -
from skimage.measure import regionprops
6 6
from skimage.measure import mesh_surface_area, marching_cubes_lewiner
7 7
from skimage.morphology import skeletonize_3d, ball
8 +
from skimage.measure import regionprops
8 9
from pandas import DataFrame
9 10
10 11
@@ -176,53 +177,54 @@
Loading
176 177
    which may be helpful.
177 178
178 179
    """
179 -
    print('_'*60)
180 +
    print('-'*60)
180 181
    print('Calculating regionprops')
181 182
182 -
    results = regionprops(im, coordinates='xy')
183 -
    for i in tqdm(range(len(results))):
184 -
        mask = results[i].image
185 -
        mask_padded = np.pad(mask, pad_width=1, mode='constant')
186 -
        temp = spim.distance_transform_edt(mask_padded)
187 -
        dt = extract_subsection(temp, shape=mask.shape)
188 -
        # ---------------------------------------------------------------------
189 -
        # Slice indices
190 -
        results[i].slice = results[i]._slice
191 -
        # ---------------------------------------------------------------------
192 -
        # Volume of regions in voxels
193 -
        results[i].volume = results[i].area
194 -
        # ---------------------------------------------------------------------
195 -
        # Volume of bounding box, in voxels
196 -
        results[i].bbox_volume = np.prod(mask.shape)
197 -
        # ---------------------------------------------------------------------
198 -
        # Create an image of the border
199 -
        results[i].border = dt == 1
200 -
        # ---------------------------------------------------------------------
201 -
        # Create an image of the maximal inscribed sphere
202 -
        r = dt.max()
203 -
        inv_dt = spim.distance_transform_edt(dt < r)
204 -
        results[i].inscribed_sphere = inv_dt < r
205 -
        # ---------------------------------------------------------------------
206 -
        # Find surface area using marching cubes and analyze the mesh
207 -
        tmp = np.pad(np.atleast_3d(mask), pad_width=1, mode='constant')
208 -
        tmp = spim.convolve(tmp, weights=ball(1))/5
209 -
        verts, faces, norms, vals = marching_cubes_lewiner(volume=tmp, level=0)
210 -
        results[i].surface_mesh_vertices = verts
211 -
        results[i].surface_mesh_simplices = faces
212 -
        area = mesh_surface_area(verts, faces)
213 -
        results[i].surface_area = area
214 -
        # ---------------------------------------------------------------------
215 -
        # Find sphericity
216 -
        vol = results[i].volume
217 -
        r = (3/4/np.pi*vol)**(1/3)
218 -
        a_equiv = 4*np.pi*(r)**2
219 -
        a_region = results[i].surface_area
220 -
        results[i].sphericity = a_equiv/a_region
221 -
        # ---------------------------------------------------------------------
222 -
        # Find skeleton of region
223 -
        results[i].skeleton = skeletonize_3d(mask)
224 -
        # ---------------------------------------------------------------------
225 -
        # Volume of convex image, equal to area in 2D, so just translating
226 -
        results[i].convex_volume = results[i].convex_area
183 +
    results = regionprops(im)
184 +
    with tqdm(range(len(results))) as pbar:
185 +
        for i in range(len(results)):
186 +
            pbar.update()
187 +
            mask = results[i].image
188 +
            mask_padded = np.pad(mask, pad_width=1, mode='constant')
189 +
            temp = spim.distance_transform_edt(mask_padded)
190 +
            dt = extract_subsection(temp, shape=mask.shape)
191 +
            # Slice indices
192 +
            results[i].slice = results[i]._slice
193 +
            # ----------------------------------------------------------------
194 +
            # Volume of regions in voxels
195 +
            results[i].volume = results[i].area
196 +
            # ----------------------------------------------------------------
197 +
            # Volume of bounding box, in voxels
198 +
            results[i].bbox_volume = np.prod(mask.shape)
199 +
            # ----------------------------------------------------------------
200 +
            # Create an image of the border
201 +
            results[i].border = dt == 1
202 +
            # ----------------------------------------------------------------
203 +
            # Create an image of the maximal inscribed sphere
204 +
            r = dt.max()
205 +
            inv_dt = spim.distance_transform_edt(dt < r)
206 +
            results[i].inscribed_sphere = inv_dt < r
207 +
            # ----------------------------------------------------------------
208 +
            # Find surface area using marching cubes and analyze the mesh
209 +
            tmp = np.pad(np.atleast_3d(mask), pad_width=1, mode='constant')
210 +
            tmp = spim.convolve(tmp, weights=ball(1))/5
211 +
            verts, faces, norms, vals = marching_cubes_lewiner(volume=tmp, level=0)
212 +
            results[i].surface_mesh_vertices = verts
213 +
            results[i].surface_mesh_simplices = faces
214 +
            area = mesh_surface_area(verts, faces)
215 +
            results[i].surface_area = area
216 +
            # ----------------------------------------------------------------
217 +
            # Find sphericity
218 +
            vol = results[i].volume
219 +
            r = (3/4/np.pi*vol)**(1/3)
220 +
            a_equiv = 4*np.pi*(r)**2
221 +
            a_region = results[i].surface_area
222 +
            results[i].sphericity = a_equiv/a_region
223 +
            # ----------------------------------------------------------------
224 +
            # Find skeleton of region
225 +
            results[i].skeleton = skeletonize_3d(mask)
226 +
            # ----------------------------------------------------------------
227 +
            # Volume of convex image, equal to area in 2D, so just translating
228 +
            results[i].convex_volume = results[i].convex_area
227 229
228 230
    return results

@@ -1,10 +1,15 @@
Loading
1 -
import porespy as ps
2 1
import numpy as np
2 +
from edt import edt
3 +
import porespy as ps
3 4
import scipy.spatial as sptl
4 5
import scipy.ndimage as spim
5 -
from porespy.tools import norm_to_uniform, ps_ball, ps_disk
6 +
from numba import njit, jit
7 +
from porespy.tools import norm_to_uniform, ps_ball, ps_disk, get_border
8 +
from porespy.tools import insert_sphere, fftmorphology
6 9
from typing import List
7 -
10 +
from numpy import array
11 +
from tqdm import tqdm
12 +
from skimage.morphology import ball, disk
8 13
9 14
10 15
def insert_shape(im, element, center=None, corner=None, value=1,
@@ -56,8 +61,8 @@
Loading
56 61
        for dim in range(im.ndim):
57 62
            r, d = np.divmod(element.shape[dim], 2)
58 63
            if d == 0:
59 -
                raise Exception('Cannot specify center point when element ' +
60 -
                                'has one or more even dimension')
64 +
                raise Exception('Cannot specify center point when element '
65 +
                                + 'has one or more even dimension')
61 66
            lower_im = np.amax((center[dim] - r, 0))
62 67
            upper_im = np.amin((center[dim] + r + 1, im.shape[dim]))
63 68
            s_im.append(slice(lower_im, upper_im))
@@ -87,13 +92,17 @@
Loading
87 92
    return im
88 93
89 94
90 -
def RSA(im: np.array, radius: int, volume_fraction: int = 1,
91 -
        mode: str = 'extended'):
95 +
def RSA(im: array, radius: int, volume_fraction: int = 1, n_max: int = None,
96 +
        mode: str = 'contained'):
92 97
    r"""
93 98
    Generates a sphere or disk packing using Random Sequential Addition
94 99
95 -
    This which ensures that spheres do not overlap but does not guarantee they
96 -
    are tightly packed.
100 +
    This algorithm ensures that spheres do not overlap but does not
101 +
    guarantee they are tightly packed.
102 +
103 +
    This function adds spheres to the background of the received ``im``, which
104 +
    allows iteratively adding spheres of different radii to the unfilled space,
105 +
    be repeatedly passing in the result of previous calls to RSA.
97 106
98 107
    Parameters
99 108
    ----------
@@ -101,97 +110,176 @@
Loading
101 110
        The image into which the spheres should be inserted.  By accepting an
102 111
        image rather than a shape, it allows users to insert spheres into an
103 112
        already existing image.  To begin the process, start with an array of
104 -
        zero such as ``im = np.zeros([200, 200], dtype=bool)``.
113 +
        zeros such as ``im = np.zeros([200, 200, 200], dtype=bool)``.
105 114
    radius : int
106 115
        The radius of the disk or sphere to insert.
107 -
    volume_fraction : scalar
116 +
    volume_fraction : scalar (default is 1.0)
108 117
        The fraction of the image that should be filled with spheres.  The
109 -
        spheres are addeds 1's, so each sphere addition increases the
110 -
        ``volume_fraction`` until the specified limit is reach.
111 -
    mode : string
118 +
        spheres are added as 1's, so each sphere addition increases the
119 +
        ``volume_fraction`` until the specified limit is reach.  Note that if
120 +
        ``n_max`` is reached first, then ``volume_fraction`` will not be
121 +
        acheived.
122 +
    n_max : int (default is 10,000)
123 +
        The maximum number of spheres to add.  By default the value of
124 +
        ``n_max`` is high so that the addition of spheres will go indefinately
125 +
        until ``volume_fraction`` is met, but specifying a smaller value
126 +
        will halt addition after the given number of spheres are added.
127 +
    mode : string (default is 'contained')
112 128
        Controls how the edges of the image are handled.  Options are:
113 129
114 -
        'extended' - Spheres are allowed to extend beyond the edge of the image
115 -
116 130
        'contained' - Spheres are all completely within the image
117 131
118 -
        'periodic' - The portion of a sphere that extends beyond the image is
119 -
        inserted into the opposite edge of the image (Not Implemented Yet!)
132 +
        'extended' - Spheres are allowed to extend beyond the edge of the
133 +
        image.  In this mode the volume fraction will be less that requested
134 +
        since some spheres extend beyond the image, but their entire volume
135 +
        is counted as added for computational efficiency.
120 136
121 137
    Returns
122 138
    -------
123 139
    image : ND-array
124 -
        A copy of ``im`` with spheres of specified radius *added* to the
125 -
        background.
140 +
        A handle to the input ``im`` with spheres of specified radius
141 +
        *added* to the background.
126 142
127 143
    Notes
128 144
    -----
129 -
    Each sphere is filled with 1's, but the center is marked with a 2.  This
130 -
    allows easy boolean masking to extract only the centers, which can be
131 -
    converted to coordinates using ``scipy.where`` and used for other purposes.
132 -
    The obtain only the spheres, use``im = im == 1``.
133 -
134 -
    This function adds spheres to the background of the received ``im``, which
135 -
    allows iteratively adding spheres of different radii to the unfilled space.
145 +
    This function uses Numba to speed up the search for valid sphere insertion
146 +
    points.  It seems that Numba does not look at the state of the scipy
147 +
    random number generator, so setting the seed to a known value has no
148 +
    effect on the output of this function. Each call to this function will
149 +
    produce a unique image.  If you wish to use the same realization multiple
150 +
    times you must save the array (e.g. ``numpy.save``).
136 151
137 152
    References
138 153
    ----------
139 154
    [1] Random Heterogeneous Materials, S. Torquato (2001)
140 155
141 156
    """
142 -
    # Note: The 2D vs 3D splitting of this just me being lazy...I can't be
143 -
    # bothered to figure it out programmatically right now
144 -
    # TODO: Ideally the spheres should be added periodically
145 -
    print(78*'―')
146 -
    print('RSA: Adding spheres of size ' + str(radius))
147 -
    d2 = len(im.shape) == 2
148 -
    mrad = 2*radius
149 -
    if d2:
150 -
        im_strel = ps_disk(radius)
151 -
        mask_strel = ps_disk(mrad)
152 -
    else:
153 -
        im_strel = ps_ball(radius)
154 -
        mask_strel = ps_ball(mrad)
155 -
    if np.any(im > 0):
156 -
        # Dilate existing objects by im_strel to remove pixels near them
157 -
        # from consideration for sphere placement
158 -
        mask = ps.tools.fftmorphology(im > 0, im_strel > 0, mode='dilate')
159 -
        mask = mask.astype(int)
157 +
    print(80*'-')
158 +
    print(f'RSA: Adding spheres of size {radius}')
159 +
    im = im.astype(bool)
160 +
    if n_max is None:
161 +
        n_max = 10000
162 +
    vf_final = volume_fraction
163 +
    vf_start = im.sum()/im.size
164 +
    print('Initial volume fraction:', vf_start)
165 +
    if im.ndim == 2:
166 +
        template_lg = ps_disk(radius*2)
167 +
        template_sm = ps_disk(radius)
160 168
    else:
161 -
        mask = np.zeros_like(im)
169 +
        template_lg = ps_ball(radius*2)
170 +
        template_sm = ps_ball(radius)
171 +
    vf_template = template_sm.sum()/im.size
172 +
    # Pad image by the radius of large template to enable insertion near edges
173 +
    im = np.pad(im, pad_width=2*radius, mode='edge')
174 +
    # Depending on mode, adjust mask to remove options around edge
162 175
    if mode == 'contained':
163 -
        mask = _remove_edge(mask, radius)
176 +
        border = get_border(im.shape, thickness=2*radius, mode='faces')
164 177
    elif mode == 'extended':
165 -
        pass
166 -
    elif mode == 'periodic':
167 -
        raise Exception('Periodic edges are not implemented yet')
178 +
        border = get_border(im.shape, thickness=radius+1, mode='faces')
168 179
    else:
169 -
        raise Exception('Unrecognized mode: ' + mode)
170 -
    vf = im.sum()/im.size
171 -
    free_spots = np.argwhere(mask == 0)
180 +
        raise Exception('Unrecognized mode: ', mode)
181 +
    # Remove border pixels
182 +
    im[border] = True
183 +
    # Dilate existing objects by strel to remove pixels near them
184 +
    # from consideration for sphere placement
185 +
    print('Dilating foreground features by sphere radius')
186 +
    dt = edt(im == 0)
187 +
    options_im = (dt >= radius)
188 +
    # ------------------------------------------------------------------------
189 +
    # Begin inserting the spheres
190 +
    vf = vf_start
191 +
    free_sites = np.flatnonzero(options_im)
172 192
    i = 0
173 -
    while vf <= volume_fraction and len(free_spots) > 0:
174 -
        choice = np.random.randint(0, len(free_spots), size=1)
175 -
        if d2:
176 -
            [x, y] = free_spots[choice].flatten()
177 -
            im = _fit_strel_to_im_2d(im, im_strel, radius, x, y)
178 -
            mask = _fit_strel_to_im_2d(mask, mask_strel, mrad, x, y)
179 -
            im[x, y] = 2
180 -
        else:
181 -
            [x, y, z] = free_spots[choice].flatten()
182 -
            im = _fit_strel_to_im_3d(im, im_strel, radius, x, y, z)
183 -
            mask = _fit_strel_to_im_3d(mask, mask_strel, mrad, x, y, z)
184 -
            im[x, y, z] = 2
185 -
        free_spots = np.argwhere(mask == 0)
186 -
        vf = im.sum()/im.size
193 +
    while (vf <= vf_final) and (i < n_max) and (len(free_sites) > 0):
194 +
        c, count = _make_choice(options_im, free_sites=free_sites)
195 +
        # The 100 below is arbitrary and may change performance
196 +
        if count > 100:
197 +
            # Regenerate list of free_sites
198 +
            print('Regenerating free_sites after', i, 'iterations')
199 +
            free_sites = np.flatnonzero(options_im)
200 +
        if all(np.array(c) == -1):
201 +
            break
202 +
        s_sm = tuple([slice(x - radius, x + radius + 1, None) for x in c])
203 +
        s_lg = tuple([slice(x - 2*radius, x + 2*radius + 1, None) for x in c])
204 +
        im[s_sm] += template_sm  # Add ball to image
205 +
        options_im[s_lg][template_lg] = False  # Update extended region
206 +
        vf += vf_template
187 207
        i += 1
188 -
    if vf > volume_fraction:
189 -
        print('Volume Fraction', volume_fraction, 'reached')
190 -
    if len(free_spots) == 0:
191 -
        print('No more free spots', 'Volume Fraction', vf)
208 +
    print('Number of spheres inserted:', i)
209 +
    # ------------------------------------------------------------------------
210 +
    # Get slice into returned image to retain original size
211 +
    s = tuple([slice(2*radius, d-2*radius, None) for d in im.shape])
212 +
    im = im[s]
213 +
    vf = im.sum()/im.size
214 +
    print('Final volume fraction:', vf)
192 215
    return im
193 216
194 217
218 +
@njit
219 +
def _make_choice(options_im, free_sites):
220 +
    r"""
221 +
    This function is called by _begin_inserting to find valid insertion points
222 +
223 +
    Parameters
224 +
    ----------
225 +
    options_im : ND-array
226 +
        An array with ``True`` at all valid locations and ``False`` at all
227 +
        locations where a sphere already exists PLUS a region of radius R
228 +
        around each sphere since these points are also invalid insertion
229 +
        points.
230 +
    free_sites : array_like
231 +
        A 1D array containing valid insertion indices.  This list is used to
232 +
        select insertion points from a limited which occasionally gets
233 +
        smaller.
234 +
235 +
    Returns
236 +
    -------
237 +
    coords : list
238 +
        The XY or XYZ coordinates of the next insertion point
239 +
    count : int
240 +
        The number of attempts that were needed to find valid point.  If
241 +
        this value gets too high, a short list of ``free_sites`` should be
242 +
        generated in the calling function.
243 +
244 +
    """
245 +
    choice = False
246 +
    count = 0
247 +
    upper_limit = len(free_sites)
248 +
    max_iters = upper_limit*20
249 +
    if options_im.ndim == 2:
250 +
        coords = [-1, -1]
251 +
        Nx, Ny = options_im.shape
252 +
        while not choice:
253 +
            if count >= max_iters:
254 +
                coords = [-1, -1]
255 +
                break
256 +
            ind = np.random.randint(0, upper_limit)
257 +
            # This numpy function is not supported by numba yet
258 +
            # c1, c2 = np.unravel_index(free_sites[ind], options_im.shape)
259 +
            # So using manual unraveling
260 +
            coords[1] = free_sites[ind] % Ny
261 +
            coords[0] = (free_sites[ind] // Ny) % Nx
262 +
            choice = options_im[coords[0], coords[1]]
263 +
            count += 1
264 +
    if options_im.ndim == 3:
265 +
        coords = [-1, -1, -1]
266 +
        Nx, Ny, Nz = options_im.shape
267 +
        while not choice:
268 +
            if count >= max_iters:
269 +
                coords = [-1, -1, -1]
270 +
                break
271 +
            ind = np.random.randint(0, upper_limit)
272 +
            # This numpy function is not supported by numba yet
273 +
            # c1, c2, c3 = np.unravel_index(free_sites[ind], options_im.shape)
274 +
            # So using manual unraveling
275 +
            coords[2] = free_sites[ind] % Nz
276 +
            coords[1] = (free_sites[ind] // Nz) % Ny
277 +
            coords[0] = (free_sites[ind] // (Nz * Ny)) % Nx
278 +
            choice = options_im[coords[0], coords[1], coords[2]]
279 +
            count += 1
280 +
    return coords, count
281 +
282 +
195 283
def bundle_of_tubes(shape: List[int], spacing: int):
196 284
    r"""
197 285
    Create a 3D image of a bundle of tubes, in the form of a rectangular
@@ -351,7 +439,7 @@
Loading
351 439
        if np.all(pts >= 0) and np.all(pts < im.shape):
352 440
            line_pts = line_segment(pts[0], pts[1])
353 441
            im[tuple(line_pts)] = True
354 -
    im = spim.distance_transform_edt(~im) > radius
442 +
    im = edt(~im) > radius
355 443
    return im
356 444
357 445
@@ -492,7 +580,7 @@
Loading
492 580
                          s+r:im.shape[1]-r:2*s,
493 581
                          s:im.shape[2]-r:2*s]
494 582
        im[coords[0], coords[1], coords[2]] = 1
495 -
    im = ~(spim.distance_transform_edt(~im) < r)
583 +
    im = ~(edt(~im) < r)
496 584
    return im
497 585
498 586
@@ -543,8 +631,12 @@
Loading
543 631
    im = np.random.random(size=shape)
544 632
545 633
    # Helper functions for calculating porosity: phi = g(f(N))
546 -
    f = lambda N: spim.distance_transform_edt(im > N/bulk_vol) < radius
547 -
    g = lambda im: 1 - im.sum() / np.prod(shape)
634 +
    def f(N):
635 +
        return edt(im > N/bulk_vol) < radius
636 +
637 +
    def g(im):
638 +
        r"""Returns fraction of 0s, given a binary image"""
639 +
        return 1 - im.sum() / np.prod(shape)
548 640
549 641
    # # Newton's method for getting image porosity match the given
550 642
    # w = 1.0                         # Damping factor
@@ -574,92 +666,187 @@
Loading
574 666
    return ~f(N)
575 667
576 668
577 -
def generate_noise(shape: List[int], porosity=None, octaves: int = 3,
578 -
                   frequency: int = 32, mode: str = 'simplex'):
669 +
def perlin_noise(shape: List[int], porosity=None, octaves: int = 3,
670 +
                 frequency: List[int] = 2, persistence: float = 0.5):
579 671
    r"""
580 -
    Generate a field of spatially correlated random noise using the Perlin
581 -
    noise algorithm, or the updated Simplex noise algorithm.
672 +
    Generate a Perlin noise field
582 673
583 674
    Parameters
584 675
    ----------
585 676
    shape : array_like
586 -
        The size of the image to generate in [Nx, Ny, Nz] where N is the
587 -
        number of voxels.
588 -
677 +
        The shape of the desired image
678 +
    frequncy : array_like
679 +
        Controls the frequency of the noise, with higher values leading to
680 +
        smaller features or more tightly spaced undulations in the brightness.
589 681
    porosity : float
590 -
        If specified, this will threshold the image to the specified value
591 -
        prior to returning.  If no value is given (the default), then the
592 -
        scalar noise field is returned.
593 -
682 +
        If specified, the returned image will be thresholded to the specified
683 +
        porosity.  If not provided, the greyscale noise is returned (default).
594 684
    octaves : int
595 -
        Controls the *texture* of the noise, with higher octaves giving more
596 -
        complex features over larger length scales.
597 -
598 -
    frequency : array_like
599 -
        Controls the relative sizes of the features, with higher frequencies
600 -
        giving larger features.  A scalar value will apply the same frequency
601 -
        in all directions, given an isotropic field; a vector value will
602 -
        apply the specified values along each axis to create anisotropy.
603 -
604 -
    mode : string
605 -
        Which noise algorithm to use, either ``'simplex'`` (default) or
606 -
        ``'perlin'``.
685 +
        Controls the texture of the noise, with higher values giving more
686 +
        comlex features of larger length scales.
687 +
    persistence : float
688 +
        Controls how prominent each successive octave is.  Shoul be a number
689 +
        less than 1.
607 690
608 691
    Returns
609 692
    -------
610 -
    image : ND-array
611 -
        If porosity is given, then a boolean array with ``True`` values
612 -
        denoting the pore space is returned.  If not, then normally
613 -
        distributed and spatially correlated randomly noise is returned.
693 +
    An ND-array of the specified ``shape``.  If ``porosity`` is not given
694 +
    then the array contains greyscale values distributed normally about 0.
695 +
    Use ``porespy.tools.norm_to_uniform`` to create an well-scale image for
696 +
    thresholding.  If ``porosity`` is given then these steps are done
697 +
    internally and a boolean image is returned.
614 698
615 699
    Notes
616 700
    -----
617 -
    This method depends the a package called 'noise' which must be
618 -
    compiled. It is included in the Anaconda distribution, or a platform
619 -
    specific binary can be downloaded.
701 +
    The implementation used here is a bit fussy about the values of
702 +
    ``frequency`` and ``octaves``.  (1) the image ``shape`` must an integer
703 +
    multiple of ``frequency`` in each direction, and (2) ``frequency`` to the
704 +
    power of ``octaves`` must be less than or equal the``shape`` in each
705 +
    direction.  Exceptions are thrown if these conditions are not met.
620 706
621 -
    See Also
622 -
    --------
623 -
    porespy.tools.norm_to_uniform
707 +
    References
708 +
    ----------
709 +
    This implementation is taken from Pierre Vigier's
710 +
    `Github repo <https://github.com/pvigier/perlin-numpy>`_
624 711
625 712
    """
626 -
    try:
627 -
        import noise
628 -
    except ModuleNotFoundError:
629 -
        raise Exception("The noise package must be installed")
713 +
    # Parse args
630 714
    shape = np.array(shape)
631 -
    if np.size(shape) == 1:
632 -
        Lx, Ly, Lz = np.full((3, ), int(shape))
633 -
    elif len(shape) == 2:
634 -
        Lx, Ly = shape
635 -
        Lz = 1
636 -
    elif len(shape) == 3:
637 -
        Lx, Ly, Lz = shape
638 -
    if mode == 'simplex':
639 -
        f = noise.snoise3
640 -
    else:
641 -
        f = noise.pnoise3
642 -
    frequency = np.atleast_1d(frequency)
643 -
    if frequency.size == 1:
644 -
        freq = np.full(shape=[3, ], fill_value=frequency[0])
645 -
    elif frequency.size == 2:
646 -
        freq = np.concatenate((frequency, [1]))
647 -
    else:
648 -
        freq = np.array(frequency)
649 -
    im = np.zeros(shape=[Lx, Ly, Lz], dtype=float)
650 -
    for x in range(Lx):
651 -
        for y in range(Ly):
652 -
            for z in range(Lz):
653 -
                im[x, y, z] = f(x=x/freq[0], y=y/freq[1], z=z/freq[2],
654 -
                                octaves=octaves)
655 -
    im = im.squeeze()
656 -
    if porosity:
657 -
        im = norm_to_uniform(im, scale=[0, 1])
658 -
        im = im < porosity
659 -
    return im
660 -
661 -
662 -
def blobs(shape: List[int], porosity: float = 0.5, blobiness: int = 1):
715 +
    if shape.size == 1:  # Assume 3D
716 +
        shape = np.ones(3, dtype=int)*shape
717 +
    res = np.array(frequency)
718 +
    if res.size == 1:  # Assume shape as shape
719 +
        res = np.ones(shape.size, dtype=int)*res
720 +
721 +
    # Check inputs for various sins
722 +
    if res.size != shape.size:
723 +
        raise Exception('shape and res must have same dimensions')
724 +
    if np.any(np.mod(shape, res) > 0):
725 +
        raise Exception('res must be a multiple of shape along each axis')
726 +
    if np.any(shape/res**octaves < 1):
727 +
        raise Exception('(res[i])**octaves must be <= shape[i]')
728 +
    check = shape/(res**octaves)
729 +
    if np.any(check % 1):
730 +
        raise Exception("Image size must be factor of res**octaves")
731 +
732 +
    # Generate noise
733 +
    noise = np.zeros(shape)
734 +
    frequency = 1
735 +
    amplitude = 1
736 +
    for _ in tqdm(range(octaves)):
737 +
        if noise.ndim == 2:
738 +
            noise += amplitude * _perlin_noise_2D(shape, frequency*res)
739 +
        elif noise.ndim == 3:
740 +
            noise += amplitude * _perlin_noise_3D(shape, frequency*res)
741 +
        frequency *= 2
742 +
        amplitude *= persistence
743 +
744 +
    if porosity is not None:
745 +
        noise = norm_to_uniform(noise, scale=[0, 1])
746 +
        noise = noise > porosity
747 +
748 +
    return noise
749 +
750 +
751 +
def _perlin_noise_3D(shape, res):
752