Showing 1 of 5 files from the diff.

@@ -1,7 +1,10 @@
Loading
1 1
import sys
2 2
import dask
3 +
import dask.array as da
4 +
from dask.diagnostics import ProgressBar
3 5
import warnings
4 6
import numpy as np
7 +
from numba import njit, prange
5 8
from edt import edt
6 9
import operator as op
7 10
from tqdm import tqdm
@@ -9,9 +12,9 @@
Loading
9 12
import scipy.spatial as sptl
10 13
from collections import namedtuple
11 14
from scipy.signal import fftconvolve
12 -
from skimage.segmentation import clear_border
15 +
from skimage.segmentation import clear_border, watershed
13 16
from skimage.morphology import ball, disk, square, cube, diamond, octahedron
14 -
from skimage.morphology import reconstruction, watershed
17 +
from skimage.morphology import reconstruction
15 18
from porespy.tools import randomize_colors, fftmorphology
16 19
from porespy.tools import get_border, extend_slice, extract_subsection
17 20
from porespy.tools import _create_alias_map
@@ -506,7 +509,7 @@
Loading
506 509
    return peaks_new
507 510
508 511
509 -
def trim_saddle_points(peaks, dt, max_iters=10):
512 +
def trim_saddle_points(peaks, dt, max_iters=10, verbose=1):
510 513
    r"""
511 514
    Removes peaks that were mistakenly identified because they lied on a
512 515
    saddle or ridge in the distance transform that was not actually a true
@@ -564,7 +567,7 @@
Loading
564 567
                peaks_i = False
565 568
                break  # Found a saddle point
566 569
        peaks[s] = peaks_i
567 -
        if iters >= max_iters:
570 +
        if iters >= max_iters and verbose:
568 571
            print(
569 572
                "Maximum number of iterations reached, consider "
570 573
                + "running again with a larger value of max_iters"
@@ -1760,3 +1763,570 @@
Loading
1760 1763
        # Insert image chunk into main image
1761 1764
        im2[a] = ims[i][b]
1762 1765
    return im2
1766 +
1767 +
1768 +
def snow_partitioning_parallel(im,
1769 +
                               overlap='dt',
1770 +
                               divs=2,
1771 +
                               mode='parallel',
1772 +
                               num_workers=None,
1773 +
                               crop=True,
1774 +
                               zoom_factor=0.5,
1775 +
                               r_max=5,
1776 +
                               sigma=0.4,
1777 +
                               return_all=False):
1778 +
    r"""
1779 +
    Perform SNOW algorithm in parallel and serial mode to reduce time and
1780 +
    memory usage repectively by geomertirc domain decomposition of large size
1781 +
    image.
1782 +
1783 +
    Parameters
1784 +
    ----------
1785 +
    im: ND_array
1786 +
        A binary image of porous media with 'True' values indicating phase of
1787 +
        interest
1788 +
1789 +
    overlap: float or int or str
1790 +
        Overlapping thickness between two subdomains that is used to merge
1791 +
        watershed segmented regions at intersection of two or more subdomains.
1792 +
        If 'dt' the overlap will be calculated based on maximum
1793 +
        distance transform in the whole image.
1794 +
        If 'ws' the overlap will be calculated by finding the maximum dimension
1795 +
        of the bounding box of largest segmented region. The image is scale down
1796 +
        by 'zoom_factor' provided by user.
1797 +
        If any real number of overlap is provided then this value will be
1798 +
        considered as overlapping thickness.
1799 +
1800 +
    divs: list or int
1801 +
        Number of domains each axis will be divided.
1802 +
        If a scalar is provided then it will be assigned to all axis.
1803 +
        If list is provided then each respective axis will be divided by its
1804 +
        corresponding number in the list. For example [2, 3, 4] will divide
1805 +
        z, y and x axis to 2, 3, and 4 respectively.
1806 +
1807 +
    mode: str
1808 +
        if 'parallel' then all subdomains will be processed in number of cores
1809 +
        provided as num_workers
1810 +
        if 'serial' then all subdomains will be processed one by one in one core
1811 +
        of CPU.
1812 +
1813 +
    num_workers: int or None
1814 +
        Number of cores that will be used to parallel process all domains.
1815 +
        If None then all cores will be used but user can specify any integer
1816 +
        values to control the memory usage.
1817 +
1818 +
    crop: bool
1819 +
        If True the image shape is cropped to fit specified division.
1820 +
1821 +
    zoom_factor: float or int
1822 +
        The amount of zoom appiled to image to find overlap thickness using "ws"
1823 +
        overlap mode.
1824 +
1825 +
    return_all : boolean
1826 +
        If set to ``True`` a named tuple is returned containing the original
1827 +
        image, the distance transform, and the final
1828 +
        pore regions.  The default is ``False``
1829 +
1830 +
    Returns
1831 +
    ----------
1832 +
    regions: ND_array
1833 +
        Partitioned image of segmentated regions with unique labels. Each
1834 +
        region correspond to pore body while intersection with other region
1835 +
        correspond throat area.
1836 +
    """
1837 +
    # --------------------------------------------------------------------------
1838 +
    # Adjust image shape according to specified dimension
1839 +
    tup = namedtuple("results", field_names=["im", "dt", "regions"])
1840 +
    if isinstance(divs, int):
1841 +
        divs = [divs for i in range(im.ndim)]
1842 +
    shape = []
1843 +
    for i in range(im.ndim):
1844 +
        shape.append(divs[i] * (im.shape[i] // divs[i]))
1845 +
1846 +
    if shape != list(im.shape):
1847 +
        if crop:
1848 +
            for i in range(im.ndim):
1849 +
                im = im.swapaxes(0, i)
1850 +
                im = im[:shape[i], ...]
1851 +
                im = im.swapaxes(i, 0)
1852 +
            print(f'Image is cropped to shape {shape}.')
1853 +
        else:
1854 +
            print('-' * 80)
1855 +
            print(f"Possible image shape for specified divisions is {shape}.")
1856 +
            print("To crop the image please set crop argument to 'True'.")
1857 +
            return
1858 +
    # --------------------------------------------------------------------------
1859 +
    # Get overlap thickness from distance transform
1860 +
    chunk_shape = (np.array(shape) / np.array(divs)).astype(int)
1861 +
    print('# Beginning parallel SNOW algorithm...')
1862 +
    print('=' * 80)
1863 +
    print('Calculating overlap thickness')
1864 +
    if overlap == 'dt':
1865 +
        dt = edt((im > 0), parallel=0)
1866 +
        overlap = dt.max()
1867 +
    elif overlap == 'ws':
1868 +
        rev = spim.interpolation.zoom(im, zoom=zoom_factor, order=0)
1869 +
        rev = rev > 0
1870 +
        dt = edt(rev, parallel=0)
1871 +
        rev_snow = snow_partitioning(rev, dt=dt, r_max=r_max, sigma=sigma)
1872 +
        labels, counts = np.unique(rev_snow, return_counts=True)
1873 +
        node = np.where(counts == counts[1:].max())[0][0]
1874 +
        slices = spim.find_objects(rev_snow)
1875 +
        overlap = max(rev_snow[slices[node - 1]].shape) / (zoom_factor * 2.0)
1876 +
        dt = edt((im > 0), parallel=0)
1877 +
    else:
1878 +
        overlap = overlap / 2.0
1879 +
        dt = edt((im > 0), parallel=0)
1880 +
    print('Overlap Thickness: ' + str(int(2.0 * overlap)) + ' voxels')
1881 +
    # --------------------------------------------------------------------------
1882 +
    # Get overlap and trim depth of all image dimension
1883 +
    depth = {}
1884 +
    trim_depth = {}
1885 +
    for i in range(im.ndim):
1886 +
        depth[i] = int(2.0 * overlap)
1887 +
        trim_depth[i] = int(2.0 * overlap) - 1
1888 +
1889 +
    tup.im = im
1890 +
    tup.dt = dt
1891 +
    # --------------------------------------------------------------------------
1892 +
    # Applying snow to image chunks
1893 +
    im = da.from_array(dt, chunks=chunk_shape)
1894 +
    im = da.overlap.overlap(im, depth=depth, boundary='none')
1895 +
    im = im.map_blocks(chunked_snow, r_max=r_max, sigma=sigma)
1896 +
    im = da.overlap.trim_internal(im, trim_depth, boundary='none')
1897 +
    if mode == 'serial':
1898 +
        num_workers = 1
1899 +
    elif mode == 'parallel':
1900 +
        num_workers = num_workers
1901 +
    else:
1902 +
        raise Exception('Mode of operation can either be parallel or serial')
1903 +
    with ProgressBar():
1904 +
        # print('-' * 80)
1905 +
        print('Applying snow to image chunks')
1906 +
        regions = im.compute(num_workers=num_workers)
1907 +
    # --------------------------------------------------------------------------
1908 +
    # Relabelling watershed chunks
1909 +
    # print('-' * 80)
1910 +
    print('Relabelling watershed chunks')
1911 +
    regions = relabel_chunks(im=regions, chunk_shape=chunk_shape)
1912 +
    # --------------------------------------------------------------------------
1913 +
    # Stitching watershed chunks
1914 +
    # print('-' * 80)
1915 +
    print('Stitching watershed chunks')
1916 +
    regions = watershed_stitching(im=regions, chunk_shape=chunk_shape)
1917 +
    print('=' * 80)
1918 +
    if return_all:
1919 +
        tup.regions = regions
1920 +
        return tup
1921 +
    else:
1922 +
        return regions
1923 +
1924 +
    return regions
1925 +
1926 +
1927 +
def chunked_snow(im, r_max=5, sigma=0.4):
1928 +
    r"""
1929 +
    Partitions the void space into pore regions using a marker-based watershed
1930 +
    algorithm, with specially filtered peaks as markers.
1931 +
1932 +
    The SNOW network extraction algorithm (Sub-Network of an Over-segmented
1933 +
    Watershed) was designed to handle to perculiarities of high porosity
1934 +
    materials, but it applies well to other materials as well.
1935 +
1936 +
    Parameters
1937 +
    ----------
1938 +
    im : array_like
1939 +
        Distance transform of phase of interest in a binary image
1940 +
    r_max : int
1941 +
        The radius of the spherical structuring element to use in the Maximum
1942 +
        filter stage that is used to find peaks.  The default is 5
1943 +
    sigma : float
1944 +
        The standard deviation of the Gaussian filter used in step 1.  The
1945 +
        default is 0.4.  If 0 is given then the filter is not applied, which is
1946 +
        useful if a distance transform is supplied as the ``im`` argument that
1947 +
        has already been processed.
1948 +
1949 +
    Returns
1950 +
    -------
1951 +
    image : ND-array
1952 +
        An image the same shape as ``im`` with the void space partitioned into
1953 +
        pores using a marker based watershed with the peaks found by the
1954 +
        SNOW algorithm [1].
1955 +
1956 +
    References
1957 +
    ----------
1958 +
    [1] Gostick, J. "A versatile and efficient network extraction algorithm
1959 +
    using marker-based watershed segmenation".  Physical Review E. (2017)
1960 +
    """
1961 +
1962 +
    dt = spim.gaussian_filter(input=im, sigma=sigma)
1963 +
    peaks = find_peaks(dt=dt, r_max=r_max)
1964 +
    peaks = trim_saddle_points(peaks=peaks, dt=dt, max_iters=99, verbose=0)
1965 +
    peaks = trim_nearby_peaks(peaks=peaks, dt=dt)
1966 +
    peaks, N = spim.label(peaks)
1967 +
    regions = watershed(image=-dt, markers=peaks, mask=im > 0)
1968 +
1969 +
    return regions * (im > 0)
1970 +
1971 +
1972 +
def pad(im, pad_width=1, constant_value=0):
1973 +
    r"""
1974 +
    Pad the image with a constant values and width.
1975 +
1976 +
    Parameters:
1977 +
    ----------
1978 +
    im : ND-array
1979 +
        The image that requires padding
1980 +
    pad_width : int
1981 +
        The number of values that will be padded from the edges. Default values
1982 +
        is 1.
1983 +
    contant_value : int
1984 +
        Pads with the specified constant value
1985 +
1986 +
    return:
1987 +
    -------
1988 +
    output: ND-array
1989 +
        Padded image with same dimnesions as provided image
1990 +
    """
1991 +
    shape = np.array(im.shape)
1992 +
    pad_shape = shape + (2 * pad_width)
1993 +
    temp = np.zeros(pad_shape, dtype=np.uint32)
1994 +
    if constant_value != 0:
1995 +
        temp = temp + constant_value
1996 +
    if im.ndim == 3:
1997 +
        temp[pad_width: -pad_width,
1998 +
             pad_width: -pad_width,
1999 +
             pad_width: -pad_width] = im
2000 +
    elif im.ndim == 2:
2001 +
        temp[pad_width: -pad_width,
2002 +
             pad_width: -pad_width] = im
2003 +
    else:
2004 +
        temp[pad_width: -pad_width] = im
2005 +
2006 +
    return temp
2007 +
2008 +
2009 +
def relabel_chunks(im, chunk_shape):  # pragma: no cover
2010 +
    r"""
2011 +
    Assign new labels to each chunk or sub-domain of actual image. This
2012 +
    prevents from two or more regions to have same label.
2013 +
2014 +
    Parameters:
2015 +
    -----------
2016 +
2017 +
    im: ND-array
2018 +
        Actual image that contains repeating labels in chunks or sub-domains
2019 +
2020 +
    chunk_shape: tuple
2021 +
        The shape of chunk that will be relabeled in actual image. Note the
2022 +
        chunk shape should be a multiple of actual image shape otherwise some
2023 +
        labels will not be relabeled.
2024 +
2025 +
    return:
2026 +
    -------
2027 +
    output : ND-array
2028 +
        Relabeled image with unique label assigned to each region.
2029 +
    """
2030 +
    im = pad(im, pad_width=1)
2031 +
    im_shape = np.array(im.shape, dtype=np.uint32)
2032 +
    max_num = 0
2033 +
    c = np.array(chunk_shape, dtype=np.uint32) + 2
2034 +
    num = (im_shape / c).astype(int)
2035 +
2036 +
    if im.ndim == 3:
2037 +
        for z in range(num[0]):
2038 +
            for y in range(num[1]):
2039 +
                for x in range(num[2]):
2040 +
                    chunk = im[z * c[0]: (z + 1) * c[0],
2041 +
                               y * c[1]: (y + 1) * c[1],
2042 +
                               x * c[2]: (x + 1) * c[2]]
2043 +
                    chunk += max_num
2044 +
                    chunk[chunk == max_num] = 0
2045 +
                    max_num = chunk.max()
2046 +
                    im[z * c[0]: (z + 1) * c[0],
2047 +
                       y * c[1]: (y + 1) * c[1],
2048 +
                       x * c[2]: (x + 1) * c[2]] = chunk
2049 +
    else:
2050 +
        for y in range(num[0]):
2051 +
            for x in range(num[1]):
2052 +
                chunk = im[y * c[0]: (y + 1) * c[0],
2053 +
                           x * c[1]: (x + 1) * c[1]]
2054 +
                chunk += max_num
2055 +
                chunk[chunk == max_num] = 0
2056 +
                max_num = chunk.max()
2057 +
                im[y * c[0]: (y + 1) * c[0],
2058 +
                   x * c[1]: (x + 1) * c[1]] = chunk
2059 +
2060 +
    return im
2061 +
2062 +
2063 +
def trim_internal_slice(im, chunk_shape):  # pragma: no cover
2064 +
    r"""
2065 +
    Delete extra slices from image that were used to stitch two or more chunks
2066 +
    together.
2067 +
2068 +
    Parameters:
2069 +
    -----------
2070 +
2071 +
    im :  ND-array
2072 +
        image that contains extra slices in x, y, z direction.
2073 +
2074 +
    chunk_shape : tuple
2075 +
        The shape of the chunk from which image is subdivided.
2076 +
2077 +
    Return:
2078 +
    -------
2079 +
    output :  ND-array
2080 +
        Image without extra internal slices. The shape of the image will be
2081 +
        same as input image provided for  waterhsed segmentation.
2082 +
    """
2083 +
    im_shape = np.array(im.shape, dtype=np.uint32)
2084 +
    c1 = np.array(chunk_shape, dtype=np.uint32) + 2
2085 +
    c2 = np.array(chunk_shape, dtype=np.uint32)
2086 +
    num = (im_shape / c1).astype(int)
2087 +
    out_shape = num * c2
2088 +
    out = np.empty((out_shape), dtype=np.uint32)
2089 +
2090 +
    if im.ndim == 3:
2091 +
        for z in range(num[0]):
2092 +
            for y in range(num[1]):
2093 +
                for x in range(num[2]):
2094 +
                    chunk = im[z * c1[0]: (z + 1) * c1[0],
2095 +
                               y * c1[1]: (y + 1) * c1[1],
2096 +
                               x * c1[2]: (x + 1) * c1[2]]
2097 +
2098 +
                    out[z * c2[0]: (z + 1) * c2[0],
2099 +
                        y * c2[1]: (y + 1) * c2[1],
2100 +
                        x * c2[2]: (x + 1) * c2[2]] = chunk[1:-1, 1:-1, 1:-1]
2101 +
    else:
2102 +
        for y in range(num[0]):
2103 +
            for x in range(num[1]):
2104 +
                chunk = im[y * c1[0]: (y + 1) * c1[0],
2105 +
                           x * c1[1]: (x + 1) * c1[1]]
2106 +
2107 +
                out[y * c2[0]: (y + 1) * c2[0],
2108 +
                    x * c2[1]: (x + 1) * c2[1]] = chunk[1:-1, 1:-1]
2109 +
2110 +
    return out
2111 +
2112 +
2113 +
def watershed_stitching(im, chunk_shape):
2114 +
    r"""
2115 +
    Stitch individual sub-domains of watershed segmentation into one big
2116 +
    segmentation with all boundary labels of each sub-domain relabeled to merge
2117 +
    boundary regions.
2118 +
2119 +
    Parameters:
2120 +
    -----------
2121 +
    im : ND-array
2122 +
        A worked image with watershed segmentation performed on all sub-domains
2123 +
        individually.
2124 +
2125 +
    chunk_shape: tuple
2126 +
        The shape of the sub-domain in which image segmentation is performed.
2127 +
2128 +
    return:
2129 +
    -------
2130 +
    output : ND-array
2131 +
        Stitched watershed segmentation with all sub-domains merged to form a
2132 +
        single watershed segmentation.
2133 +
    """
2134 +
2135 +
    c_shape = np.array(chunk_shape)
2136 +
    cuts_num = (np.array(im.shape) / c_shape).astype(np.uint32)
2137 +
2138 +
    for axis, num in enumerate(cuts_num):
2139 +
        keys = []
2140 +
        values = []
2141 +
        if num > 1:
2142 +
            im = im.swapaxes(0, axis)
2143 +
            for i in range(1, num):
2144 +
                sl = i * (chunk_shape[axis] + 3) - (i - 1)
2145 +
                sl1 = im[sl - 3, ...]
2146 +
                sl1_mask = sl1 > 0
2147 +
                sl2 = im[sl - 1, ...] * sl1_mask
2148 +
                sl1_labels = sl1.flatten()[sl1.flatten() > 0]
2149 +
                sl2_labels = sl2.flatten()[sl2.flatten() > 0]
2150 +
                if sl1_labels.size != sl2_labels.size:
2151 +
                    raise Exception('The selected overlapping thickness is not '
2152 +
                                    'suitable for input image. Change '
2153 +
                                    'overlapping criteria '
2154 +
                                    'or manually input value.')
2155 +
                keys.append(sl1_labels)
2156 +
                values.append(sl2_labels)
2157 +
            im = replace_labels(array=im, keys=keys, values=values)
2158 +
            im = im.swapaxes(axis, 0)
2159 +
    im = trim_internal_slice(im=im, chunk_shape=chunk_shape)
2160 +
    im = resequence_labels(array=im)
2161 +
2162 +
    return im
2163 +
2164 +
2165 +
@njit(parallel=True)
2166 +
def copy(im, output):  # pragma: no cover
2167 +
    r"""
2168 +
    The function copy the input array and make output array that is allocated
2169 +
    in different memory space. This a numba version of copy function of numpy.
2170 +
    Because each element is copied using parallel approach this implementation
2171 +
    is facter than numpy version of copy.
2172 +
2173 +
    parameter:
2174 +
    ----------
2175 +
    array: ND-array
2176 +
        Array that needs to be copied
2177 +
2178 +
    Return:
2179 +
    -------
2180 +
    output: ND-array
2181 +
        Copied array
2182 +
    """
2183 +
2184 +
    if im.ndim == 3:
2185 +
        for i in prange(im.shape[0]):
2186 +
            for j in prange(im.shape[1]):
2187 +
                for k in prange(im.shape[2]):
2188 +
                    output[i, j, k] = im[i, j, k]
2189 +
    elif im.ndim == 2:
2190 +
        for i in prange(im.shape[0]):
2191 +
            for j in prange(im.shape[1]):
2192 +
                output[i, j] = im[i, j]
2193 +
    else:
2194 +
        for i in prange(im.shape[0]):
2195 +
            output[i] = im[i]
2196 +
2197 +
    return output
2198 +
2199 +
2200 +
@njit(parallel=True)
2201 +
def _replace(array, keys, values, ind_sort):  # pragma: no cover
2202 +
    r"""
2203 +
    This function replace keys elements in input array with new value elements.
2204 +
    This function is used as internal function of replace_relabels.
2205 +
2206 +
    Parameter:
2207 +
    ----------
2208 +
    array : ND-array
2209 +
        Array which requires replacing labels
2210 +
    keys :  1D-array
2211 +
        The unique labels that need to be replaced
2212 +
    values : 1D-array
2213 +
        The unique values that will be assigned to labels
2214 +
2215 +
    return:
2216 +
    -------
2217 +
    array : ND-array
2218 +
        Array with replaced labels.
2219 +
    """
2220 +
    # ind_sort = np.argsort(keys)
2221 +
    keys_sorted = keys[ind_sort]
2222 +
    values_sorted = values[ind_sort]
2223 +
    s_keys = set(keys)
2224 +
2225 +
    for i in prange(array.shape[0]):
2226 +
        if array[i] in s_keys:
2227 +
            ind = np.searchsorted(keys_sorted, array[i])
2228 +
            array[i] = values_sorted[ind]
2229 +
2230 +
2231 +
def replace_labels(array, keys, values):
2232 +
    r"""
2233 +
    Replace labels in array provided as keys to values.
2234 +
2235 +
    Parameter:
2236 +
    ----------
2237 +
    array : ND-array
2238 +
        Array which requires replacing labels
2239 +
    keys :  1D-array
2240 +
        The unique labels that need to be replaced
2241 +
    values : 1D-array
2242 +
        The unique values that will be assigned to labels
2243 +
2244 +
    return:
2245 +
    -------
2246 +
    array : ND-array
2247 +
        Array with replaced labels.
2248 +
    """
2249 +
    a_shape = array.shape
2250 +
    array = array.flatten()
2251 +
    keys = np.concatenate(keys, axis=0)
2252 +
    values = np.concatenate(values, axis=0)
2253 +
    ind_sort = np.argsort(keys)
2254 +
    _replace(array, keys, values, ind_sort)
2255 +
2256 +
    return array.reshape(a_shape)
2257 +
2258 +
2259 +
@njit()
2260 +
def _sequence(array, count):  # pragma: no cover
2261 +
    r"""
2262 +
    Internal function of resequnce_labels method. This function resquence array
2263 +
    elements in an ascending order using numba technique which is many folds
2264 +
    faster than make contigious funcition.
2265 +
2266 +
    parameter:
2267 +
    ----------
2268 +
    array: 1d-array
2269 +
        1d-array that needs resquencing
2270 +
    count: 1d-array
2271 +
        1d-array of zeros having same size as array
2272 +
2273 +
    return:
2274 +
    -------
2275 +
    array: 1d-array
2276 +
        The input array with elements resequenced in ascending order
2277 +
    Note: The output of this function is not same as make_contigous or
2278 +
    relabel_sequential function of scikit-image. This function resequence and
2279 +
    randomize the regions while other methods only do resequencing and output
2280 +
    sorted array.
2281 +
    """
2282 +
    a = 1
2283 +
    i = 0
2284 +
    while i < (len(array)):
2285 +
        data = array[i]
2286 +
        if data != 0:
2287 +
            if count[data] == 0:
2288 +
                count[data] = a
2289 +
                a += 1
2290 +
        array[i] = count[data]
2291 +
        i += 1
2292 +
2293 +
2294 +
@njit(parallel=True)
2295 +
def amax(array):  # pragma: no cover
2296 +
    r"""
2297 +
    Find largest element in an array using fast parallel numba technique
2298 +
2299 +
    Parameter:
2300 +
    ----------
2301 +
    array: ND-array
2302 +
        array in which largest elements needs to be calcuted
2303 +
2304 +
    return:
2305 +
    scalar: float or int
2306 +
        The largest element value in the input array
2307 +
    """
2308 +
2309 +
    return np.max(array)
2310 +
2311 +
2312 +
def resequence_labels(array):
2313 +
    r"""
2314 +
    Resequence the lablels to make them contigious.
2315 +
2316 +
    Parameter:
2317 +
    ----------
2318 +
    array: ND-array
2319 +
        Array that requires resequencing
2320 +
2321 +
    return:
2322 +
    -------
2323 +
    array : ND-array
2324 +
        Resequenced array with same shape as input array
2325 +
    """
2326 +
    a_shape = array.shape
2327 +
    array = array.ravel()
2328 +
    max_num = amax(array) + 1
2329 +
    count = np.zeros(max_num, dtype=np.uint32)
2330 +
    _sequence(array, count)
2331 +
2332 +
    return array.reshape(a_shape)
Files Coverage
porespy 88.0%
Project Totals (16 files) 88.0%
codecov-umbrella
Build #221390128 -
unittests
1
codecov:
2
  branch: dev
3

4
coverage:
5
  precision: 1
6
  round: down
7
  range: "50...100"
8

9
  status:
10
    project:
11
      default:
12
        target: auto
13
        threshold: 0.5%
14
        branches: null
15

16
    patch:
17
      default:
18
        target: auto
19
        threshold: 0.5%
20
        branches: null
21

22
comment:
23
  layout: "header, diff, changes, sunburst, uncovered"
24
  branches: null
25
  behavior: default
Sunburst
The inner-most circle is the entire project, moving away from the center are folders then, finally, a single file. The size and color of each slice is representing the number of statements and the coverage, respectively.
Icicle
The top section represents the entire project. Proceeding with folders and finally individual files. The size and color of each slice is representing the number of statements and the coverage, respectively.
Grid
Each block represents a single file in the project. The size and color of each block is represented by the number of statements and the coverage, respectively.
Loading