1442 |
1583 |
|
for s in slices: |
1443 |
1584 |
|
label_num += 1 |
1444 |
1585 |
|
# Find branch point labels the overlap current arc |
1445 |
|
- |
hits = pts_labels[s]*(arc_labels[s] == label_num) |
|
1586 |
+ |
hits = pts_labels[s] * (arc_labels[s] == label_num) |
1446 |
1587 |
|
# If image contains 2 branch points, then it's not a tail. |
1447 |
1588 |
|
if len(np.unique(hits)) == 3: |
1448 |
1589 |
|
im_result[s] += arc_labels[s] == label_num |
1449 |
1590 |
|
# Add missing branch points back to arc image to make complete skeleton |
1450 |
|
- |
im_result += skel*pts_orig |
|
1591 |
+ |
im_result += skel * pts_orig |
1451 |
1592 |
|
if iterations > 1: |
1452 |
1593 |
|
iterations -= 1 |
1453 |
1594 |
|
im_temp = np.copy(im_result) |
1454 |
1595 |
|
im_result = prune_branches(skel=im_result, |
1455 |
1596 |
|
branch_points=None, |
1456 |
|
- |
iterations=iterations) |
|
1597 |
+ |
iterations=iterations, |
|
1598 |
+ |
parallel=parallel, |
|
1599 |
+ |
divs=divs, cores=cores) |
1457 |
1600 |
|
if np.all(im_temp == im_result): |
1458 |
1601 |
|
iterations = 0 |
1459 |
1602 |
|
return im_result |
|
1603 |
+ |
|
|
1604 |
+ |
|
|
1605 |
+ |
def chunked_func(func, |
|
1606 |
+ |
overlap=None, |
|
1607 |
+ |
divs=2, |
|
1608 |
+ |
cores=None, |
|
1609 |
+ |
im_arg=["input", "image", "im"], |
|
1610 |
+ |
strel_arg=["strel", "structure", "footprint"], |
|
1611 |
+ |
**kwargs): |
|
1612 |
+ |
r""" |
|
1613 |
+ |
Performs the specfied operation "chunk-wise" in parallel using dask |
|
1614 |
+ |
|
|
1615 |
+ |
This can be used to save memory by doing one chunk at a time (``cores=1``) |
|
1616 |
+ |
or to increase computation speed by spreading the work across multiple |
|
1617 |
+ |
cores (e.g. ``cores=8``) |
|
1618 |
+ |
|
|
1619 |
+ |
This function can be used with any operation that applies a structuring |
|
1620 |
+ |
element of some sort, since this implies that the operation is local |
|
1621 |
+ |
and can be chunked. |
|
1622 |
+ |
|
|
1623 |
+ |
Parameters |
|
1624 |
+ |
---------- |
|
1625 |
+ |
func : function handle |
|
1626 |
+ |
The function which should be applied to each chunk, such as |
|
1627 |
+ |
``spipy.ndimage.binary_dilation``. |
|
1628 |
+ |
overlap : scalar or list of scalars, optional |
|
1629 |
+ |
The amount of overlap to include when dividing up the image. This |
|
1630 |
+ |
value will almost always be the size (i.e. diameter) of the |
|
1631 |
+ |
structuring element. If not specified then the amount of overlap is |
|
1632 |
+ |
inferred from the size of the structuring element, in which case the |
|
1633 |
+ |
``strel_arg`` must be specified. |
|
1634 |
+ |
divs : scalar or list of scalars (default = [2, 2, 2]) |
|
1635 |
+ |
The number of chunks to divide the image into in each direction. The |
|
1636 |
+ |
default is 2 chunks in each direction, resulting in a quartering of |
|
1637 |
+ |
the image and 8 total chunks (in 3D). A scalar is interpreted as |
|
1638 |
+ |
applying to all directions, while a list of scalars is interpreted |
|
1639 |
+ |
as applying to each individual direction. |
|
1640 |
+ |
cores : scalar |
|
1641 |
+ |
The number of cores which should be used. By default, all cores will |
|
1642 |
+ |
be used, or as many are needed for the given number of chunks, which |
|
1643 |
+ |
ever is smaller. |
|
1644 |
+ |
im_arg : string |
|
1645 |
+ |
The keyword used by ``func`` for the image to be operated on. By |
|
1646 |
+ |
default this function will look for ``image``, ``input``, and ``im`` |
|
1647 |
+ |
which are commonly used by *scipy.ndimage* and *skimage*. |
|
1648 |
+ |
strel_arg : string |
|
1649 |
+ |
The keyword used by ``func`` for the structuring element to apply. |
|
1650 |
+ |
This is only needed if ``overlap`` is not specified. By default this |
|
1651 |
+ |
function will look for ``strel``, ``structure``, and ``footprint`` |
|
1652 |
+ |
which are commonly used by *scipy.ndimage* and *skimage*. |
|
1653 |
+ |
kwargs : additional keyword arguments |
|
1654 |
+ |
All other arguments are passed to ``func`` as keyword arguments. Note |
|
1655 |
+ |
that PoreSpy will fetch the image from this list of keywords using the |
|
1656 |
+ |
value provided to ``im_arg``. |
|
1657 |
+ |
|
|
1658 |
+ |
Returns |
|
1659 |
+ |
------- |
|
1660 |
+ |
result : ND-image |
|
1661 |
+ |
An image the same size as the input image, with the specified filter |
|
1662 |
+ |
applied as though done on a single large image. There should be *no* |
|
1663 |
+ |
difference. |
|
1664 |
+ |
|
|
1665 |
+ |
Notes |
|
1666 |
+ |
----- |
|
1667 |
+ |
This function divides the image into the specified number of chunks, but |
|
1668 |
+ |
also applies a padding to each chunk to create an overlap with neighboring |
|
1669 |
+ |
chunks. This way the operation does not have any edge artifacts. The |
|
1670 |
+ |
amount of padding is usually equal to the radius of the structuring |
|
1671 |
+ |
element but some functions do not use one, such as the distance transform |
|
1672 |
+ |
and Gaussian blur. In these cases the user can specify ``overlap``. |
|
1673 |
+ |
|
|
1674 |
+ |
See Also |
|
1675 |
+ |
-------- |
|
1676 |
+ |
skikit-image.util.apply_parallel |
|
1677 |
+ |
|
|
1678 |
+ |
Examples |
|
1679 |
+ |
-------- |
|
1680 |
+ |
>>> import scipy.ndimage as spim |
|
1681 |
+ |
>>> import porespy as ps |
|
1682 |
+ |
>>> from skimage.morphology import ball |
|
1683 |
+ |
>>> im = ps.generators.blobs(shape=[100, 100, 100]) |
|
1684 |
+ |
>>> f = spim.binary_dilation |
|
1685 |
+ |
>>> im2 = ps.filters.chunked_func(func=f, overlap=7, im_arg='input', |
|
1686 |
+ |
... input=im, structure=ball(3), cores=1) |
|
1687 |
+ |
>>> im3 = spim.binary_dilation(input=im, structure=ball(3)) |
|
1688 |
+ |
>>> np.all(im2 == im3) |
|
1689 |
+ |
True |
|
1690 |
+ |
|
|
1691 |
+ |
""" |
|
1692 |
+ |
|
|
1693 |
+ |
@dask.delayed |
|
1694 |
+ |
def apply_func(func, **kwargs): |
|
1695 |
+ |
# Apply function on sub-slice of overall image |
|
1696 |
+ |
return func(**kwargs) |
|
1697 |
+ |
|
|
1698 |
+ |
# Import the array_split methods |
|
1699 |
+ |
from array_split import shape_split, ARRAY_BOUNDS |
|
1700 |
+ |
|
|
1701 |
+ |
# Determine the value for im_arg |
|
1702 |
+ |
if type(im_arg) == str: |
|
1703 |
+ |
im_arg = [im_arg] |
|
1704 |
+ |
for item in im_arg: |
|
1705 |
+ |
if item in kwargs.keys(): |
|
1706 |
+ |
im = kwargs[item] |
|
1707 |
+ |
im_arg = item |
|
1708 |
+ |
break |
|
1709 |
+ |
# Fetch image from the kwargs dict |
|
1710 |
+ |
im = kwargs[im_arg] |
|
1711 |
+ |
# Determine the number of divisions to create |
|
1712 |
+ |
divs = np.ones((im.ndim,), dtype=int) * np.array(divs) |
|
1713 |
+ |
# If overlap given then use it, otherwise search for strel in kwargs |
|
1714 |
+ |
if overlap is not None: |
|
1715 |
+ |
halo = overlap * (divs > 1) |
|
1716 |
+ |
else: |
|
1717 |
+ |
if type(strel_arg) == str: |
|
1718 |
+ |
strel_arg = [strel_arg] |
|
1719 |
+ |
for item in strel_arg: |
|
1720 |
+ |
if item in kwargs.keys(): |
|
1721 |
+ |
strel = kwargs[item] |
|
1722 |
+ |
break |
|
1723 |
+ |
halo = np.array(strel.shape) * (divs > 1) |
|
1724 |
+ |
slices = np.ravel( |
|
1725 |
+ |
shape_split( |
|
1726 |
+ |
im.shape, axis=divs, halo=halo.tolist(), tile_bounds_policy=ARRAY_BOUNDS |
|
1727 |
+ |
) |
|
1728 |
+ |
) |
|
1729 |
+ |
# Apply func to each subsection of the image |
|
1730 |
+ |
res = [] |
|
1731 |
+ |
# print('Image will be broken into the following chunks:') |
|
1732 |
+ |
for s in slices: |
|
1733 |
+ |
# Extract subsection from image and input into kwargs |
|
1734 |
+ |
kwargs[im_arg] = im[tuple(s)] |
|
1735 |
+ |
# print(kwargs[im_arg].shape) |
|
1736 |
+ |
res.append(apply_func(func=func, **kwargs)) |
|
1737 |
+ |
# Have dask actually compute the function on each subsection in parallel |
|
1738 |
+ |
# with ProgressBar(): |
|
1739 |
+ |
# ims = dask.compute(res, num_workers=cores)[0] |
|
1740 |
+ |
ims = dask.compute(res, num_workers=cores)[0] |
|
1741 |
+ |
# Finally, put the pieces back together into a single master image, im2 |
|
1742 |
+ |
im2 = np.zeros_like(im, dtype=im.dtype) |
|
1743 |
+ |
for i, s in enumerate(slices): |
|
1744 |
+ |
# Prepare new slice objects into main and sub-sliced image |
|
1745 |
+ |
a = [] # Slices into main image |
|
1746 |
+ |
b = [] # Slices into chunked image |
|
1747 |
+ |
for dim in range(im.ndim): |
|
1748 |
+ |
if s[dim].start == 0: |
|
1749 |
+ |
ax = bx = 0 |
|
1750 |
+ |
else: |
|
1751 |
+ |
ax = s[dim].start + halo[dim] |
|
1752 |
+ |
bx = halo[dim] |
|
1753 |
+ |
if s[dim].stop == im.shape[dim]: |
|
1754 |
+ |
ay = by = im.shape[dim] |
|
1755 |
+ |
else: |
|
1756 |
+ |
ay = s[dim].stop - halo[dim] |
|
1757 |
+ |
by = s[dim].stop - s[dim].start - halo[dim] |
|
1758 |
+ |
a.append(slice(ax, ay, None)) |
|
1759 |
+ |
b.append(slice(bx, by, None)) |
|
1760 |
+ |
# Convert lists of slices to tuples |
|
1761 |
+ |
a = tuple(a) |
|
1762 |
+ |
b = tuple(b) |
|
1763 |
+ |
# Insert image chunk into main image |
|
1764 |
+ |
im2[a] = ims[i][b] |
|
1765 |
+ |
return im2 |
|
1766 |
+ |
|
|
1767 |
+ |
|
|
1768 |
+ |
def snow_partitioning_parallel(im, |
|
1769 |
+ |
overlap='dt', |
|
1770 |
+ |
divs=2, |
|
1771 |
+ |
mode='parallel', |
|
1772 |
+ |
num_workers=None, |
|
1773 |
+ |
crop=True, |
|
1774 |
+ |
zoom_factor=0.5, |
|
1775 |
+ |
r_max=5, |
|
1776 |
+ |
sigma=0.4, |
|
1777 |
+ |
return_all=False): |
|
1778 |
+ |
r""" |
|
1779 |
+ |
Perform SNOW algorithm in parallel and serial mode to reduce time and |
|
1780 |
+ |
memory usage repectively by geomertirc domain decomposition of large size |
|
1781 |
+ |
image. |
|
1782 |
+ |
|
|
1783 |
+ |
Parameters |
|
1784 |
+ |
---------- |
|
1785 |
+ |
im: ND_array |
|
1786 |
+ |
A binary image of porous media with 'True' values indicating phase of |
|
1787 |
+ |
interest |
|
1788 |
+ |
|
|
1789 |
+ |
overlap: float or int or str |
|
1790 |
+ |
Overlapping thickness between two subdomains that is used to merge |
|
1791 |
+ |
watershed segmented regions at intersection of two or more subdomains. |
|
1792 |
+ |
If 'dt' the overlap will be calculated based on maximum |
|
1793 |
+ |
distance transform in the whole image. |
|
1794 |
+ |
If 'ws' the overlap will be calculated by finding the maximum dimension |
|
1795 |
+ |
of the bounding box of largest segmented region. The image is scale down |
|
1796 |
+ |
by 'zoom_factor' provided by user. |
|
1797 |
+ |
If any real number of overlap is provided then this value will be |
|
1798 |
+ |
considered as overlapping thickness. |
|
1799 |
+ |
|
|
1800 |
+ |
divs: list or int |
|
1801 |
+ |
Number of domains each axis will be divided. |
|
1802 |
+ |
If a scalar is provided then it will be assigned to all axis. |
|
1803 |
+ |
If list is provided then each respective axis will be divided by its |
|
1804 |
+ |
corresponding number in the list. For example [2, 3, 4] will divide |
|
1805 |
+ |
z, y and x axis to 2, 3, and 4 respectively. |
|
1806 |
+ |
|
|
1807 |
+ |
mode: str |
|
1808 |
+ |
if 'parallel' then all subdomains will be processed in number of cores |
|
1809 |
+ |
provided as num_workers |
|
1810 |
+ |
if 'serial' then all subdomains will be processed one by one in one core |
|
1811 |
+ |
of CPU. |
|
1812 |
+ |
|
|
1813 |
+ |
num_workers: int or None |
|
1814 |
+ |
Number of cores that will be used to parallel process all domains. |
|
1815 |
+ |
If None then all cores will be used but user can specify any integer |
|
1816 |
+ |
values to control the memory usage. |
|
1817 |
+ |
|
|
1818 |
+ |
crop: bool |
|
1819 |
+ |
If True the image shape is cropped to fit specified division. |
|
1820 |
+ |
|
|
1821 |
+ |
zoom_factor: float or int |
|
1822 |
+ |
The amount of zoom appiled to image to find overlap thickness using "ws" |
|
1823 |
+ |
overlap mode. |
|
1824 |
+ |
|
|
1825 |
+ |
return_all : boolean |
|
1826 |
+ |
If set to ``True`` a named tuple is returned containing the original |
|
1827 |
+ |
image, the distance transform, and the final |
|
1828 |
+ |
pore regions. The default is ``False`` |
|
1829 |
+ |
|
|
1830 |
+ |
Returns |
|
1831 |
+ |
---------- |
|
1832 |
+ |
regions: ND_array |
|
1833 |
+ |
Partitioned image of segmentated regions with unique labels. Each |
|
1834 |
+ |
region correspond to pore body while intersection with other region |
|
1835 |
+ |
correspond throat area. |
|
1836 |
+ |
""" |
|
1837 |
+ |
# -------------------------------------------------------------------------- |
|
1838 |
+ |
# Adjust image shape according to specified dimension |
|
1839 |
+ |
tup = namedtuple("results", field_names=["im", "dt", "regions"]) |
|
1840 |
+ |
if isinstance(divs, int): |
|
1841 |
+ |
divs = [divs for i in range(im.ndim)] |
|
1842 |
+ |
shape = [] |
|
1843 |
+ |
for i in range(im.ndim): |
|
1844 |
+ |
shape.append(divs[i] * (im.shape[i] // divs[i])) |
|
1845 |
+ |
|
|
1846 |
+ |
if shape != list(im.shape): |
|
1847 |
+ |
if crop: |
|
1848 |
+ |
for i in range(im.ndim): |
|
1849 |
+ |
im = im.swapaxes(0, i) |
|
1850 |
+ |
im = im[:shape[i], ...] |
|
1851 |
+ |
im = im.swapaxes(i, 0) |
|
1852 |
+ |
print(f'Image is cropped to shape {shape}.') |
|
1853 |
+ |
else: |
|
1854 |
+ |
print('-' * 80) |
|
1855 |
+ |
print(f"Possible image shape for specified divisions is {shape}.") |
|
1856 |
+ |
print("To crop the image please set crop argument to 'True'.") |
|
1857 |
+ |
return |
|
1858 |
+ |
# -------------------------------------------------------------------------- |
|
1859 |
+ |
# Get overlap thickness from distance transform |
|
1860 |
+ |
chunk_shape = (np.array(shape) / np.array(divs)).astype(int) |
|
1861 |
+ |
print('# Beginning parallel SNOW algorithm...') |
|
1862 |
+ |
print('=' * 80) |
|
1863 |
+ |
print('Calculating overlap thickness') |
|
1864 |
+ |
if overlap == 'dt': |
|
1865 |
+ |
dt = edt((im > 0), parallel=0) |
|
1866 |
+ |
overlap = dt.max() |
|
1867 |
+ |
elif overlap == 'ws': |
|
1868 |
+ |
rev = spim.interpolation.zoom(im, zoom=zoom_factor, order=0) |
|
1869 |
+ |
rev = rev > 0 |
|
1870 |
+ |
dt = edt(rev, parallel=0) |
|
1871 |
+ |
rev_snow = snow_partitioning(rev, dt=dt, r_max=r_max, sigma=sigma) |
|
1872 |
+ |
labels, counts = np.unique(rev_snow, return_counts=True) |
|
1873 |
+ |
node = np.where(counts == counts[1:].max())[0][0] |
|
1874 |
+ |
slices = spim.find_objects(rev_snow) |
|
1875 |
+ |
overlap = max(rev_snow[slices[node - 1]].shape) / (zoom_factor * 2.0) |
|
1876 |
+ |
dt = edt((im > 0), parallel=0) |
|
1877 |
+ |
else: |
|
1878 |
+ |
overlap = overlap / 2.0 |
|
1879 |
+ |
dt = edt((im > 0), parallel=0) |
|
1880 |
+ |
print('Overlap Thickness: ' + str(int(2.0 * overlap)) + ' voxels') |
|
1881 |
+ |
# -------------------------------------------------------------------------- |
|
1882 |
+ |
# Get overlap and trim depth of all image dimension |
|
1883 |
+ |
depth = {} |
|
1884 |
+ |
trim_depth = {} |
|
1885 |
+ |
for i in range(im.ndim): |
|
1886 |
+ |
depth[i] = int(2.0 * overlap) |
|
1887 |
+ |
trim_depth[i] = int(2.0 * overlap) - 1 |
|
1888 |
+ |
|
|
1889 |
+ |
tup.im = im |
|
1890 |
+ |
tup.dt = dt |
|
1891 |
+ |
# -------------------------------------------------------------------------- |
|
1892 |
+ |
# Applying snow to image chunks |
|
1893 |
+ |
im = da.from_array(dt, chunks=chunk_shape) |
|
1894 |
+ |
im = da.overlap.overlap(im, depth=depth, boundary='none') |
|
1895 |
+ |
im = im.map_blocks(chunked_snow, r_max=r_max, sigma=sigma) |
|
1896 |
+ |
im = da.overlap.trim_internal(im, trim_depth, boundary='none') |
|
1897 |
+ |
if mode == 'serial': |
|
1898 |
+ |
num_workers = 1 |
|
1899 |
+ |
elif mode == 'parallel': |
|
1900 |
+ |
num_workers = num_workers |
|
1901 |
+ |
else: |
|
1902 |
+ |
raise Exception('Mode of operation can either be parallel or serial') |
|
1903 |
+ |
with ProgressBar(): |
|
1904 |
+ |
# print('-' * 80) |
|
1905 |
+ |
print('Applying snow to image chunks') |
|
1906 |
+ |
regions = im.compute(num_workers=num_workers) |
|
1907 |
+ |
# -------------------------------------------------------------------------- |
|
1908 |
+ |
# Relabelling watershed chunks |
|
1909 |
+ |
# print('-' * 80) |
|
1910 |
+ |
print('Relabelling watershed chunks') |
|
1911 |
+ |
regions = relabel_chunks(im=regions, chunk_shape=chunk_shape) |
|
1912 |
+ |
# -------------------------------------------------------------------------- |
|
1913 |
+ |
# Stitching watershed chunks |
|
1914 |
+ |
# print('-' * 80) |
|
1915 |
+ |
print('Stitching watershed chunks') |
|
1916 |
+ |
regions = watershed_stitching(im=regions, chunk_shape=chunk_shape) |
|
1917 |
+ |
print('=' * 80) |
|
1918 |
+ |
if return_all: |
|
1919 |
+ |
tup.regions = regions |
|
1920 |
+ |
return tup |
|
1921 |
+ |
else: |
|
1922 |
+ |
return regions |
|
1923 |
+ |
|
|
1924 |
+ |
return regions |
|
1925 |
+ |
|
|
1926 |
+ |
|
|
1927 |
+ |
def chunked_snow(im, r_max=5, sigma=0.4): |
|
1928 |
+ |
r""" |
|
1929 |
+ |
Partitions the void space into pore regions using a marker-based watershed |
|
1930 |
+ |
algorithm, with specially filtered peaks as markers. |
|
1931 |
+ |
|
|
1932 |
+ |
The SNOW network extraction algorithm (Sub-Network of an Over-segmented |
|
1933 |
+ |
Watershed) was designed to handle to perculiarities of high porosity |
|
1934 |
+ |
materials, but it applies well to other materials as well. |
|
1935 |
+ |
|
|
1936 |
+ |
Parameters |
|
1937 |
+ |
---------- |
|
1938 |
+ |
im : array_like |
|
1939 |
+ |
Distance transform of phase of interest in a binary image |
|
1940 |
+ |
r_max : int |
|
1941 |
+ |
The radius of the spherical structuring element to use in the Maximum |
|
1942 |
+ |
filter stage that is used to find peaks. The default is 5 |
|
1943 |
+ |
sigma : float |
|
1944 |
+ |
The standard deviation of the Gaussian filter used in step 1. The |
|
1945 |
+ |
default is 0.4. If 0 is given then the filter is not applied, which is |
|
1946 |
+ |
useful if a distance transform is supplied as the ``im`` argument that |
|
1947 |
+ |
has already been processed. |
|
1948 |
+ |
|
|
1949 |
+ |
Returns |
|
1950 |
+ |
------- |
|
1951 |
+ |
image : ND-array |
|
1952 |
+ |
An image the same shape as ``im`` with the void space partitioned into |
|
1953 |
+ |
pores using a marker based watershed with the peaks found by the |
|
1954 |
+ |
SNOW algorithm [1]. |
|
1955 |
+ |
|
|
1956 |
+ |
References |
|
1957 |
+ |
---------- |
|
1958 |
+ |
[1] Gostick, J. "A versatile and efficient network extraction algorithm |
|
1959 |
+ |
using marker-based watershed segmenation". Physical Review E. (2017) |
|
1960 |
+ |
""" |
|
1961 |
+ |
|
|
1962 |
+ |
dt = spim.gaussian_filter(input=im, sigma=sigma) |
|
1963 |
+ |
peaks = find_peaks(dt=dt, r_max=r_max) |
|
1964 |
+ |
peaks = trim_saddle_points(peaks=peaks, dt=dt, max_iters=99, verbose=0) |
|
1965 |
+ |
peaks = trim_nearby_peaks(peaks=peaks, dt=dt) |
|
1966 |
+ |
peaks, N = spim.label(peaks) |
|
1967 |
+ |
regions = watershed(image=-dt, markers=peaks, mask=im > 0) |
|
1968 |
+ |
|
|
1969 |
+ |
return regions * (im > 0) |
|
1970 |
+ |
|
|
1971 |
+ |
|
|
1972 |
+ |
def pad(im, pad_width=1, constant_value=0): |
|
1973 |
+ |
r""" |
|
1974 |
+ |
Pad the image with a constant values and width. |
|
1975 |
+ |
|
|
1976 |
+ |
Parameters: |
|
1977 |
+ |
---------- |
|
1978 |
+ |
im : ND-array |
|
1979 |
+ |
The image that requires padding |
|
1980 |
+ |
pad_width : int |
|
1981 |
+ |
The number of values that will be padded from the edges. Default values |
|
1982 |
+ |
is 1. |
|
1983 |
+ |
contant_value : int |
|
1984 |
+ |
Pads with the specified constant value |
|
1985 |
+ |
|
|
1986 |
+ |
return: |
|
1987 |
+ |
------- |
|
1988 |
+ |
output: ND-array |
|
1989 |
+ |
Padded image with same dimnesions as provided image |
|
1990 |
+ |
""" |
|
1991 |
+ |
shape = np.array(im.shape) |
|
1992 |
+ |
pad_shape = shape + (2 * pad_width) |
|
1993 |
+ |
temp = np.zeros(pad_shape, dtype=np.uint32) |
|
1994 |
+ |
if constant_value != 0: |
|
1995 |
+ |
temp = temp + constant_value |
|
1996 |
+ |
if im.ndim == 3: |
|
1997 |
+ |
temp[pad_width: -pad_width, |
|
1998 |
+ |
pad_width: -pad_width, |
|
1999 |
+ |
pad_width: -pad_width] = im |
|
2000 |
+ |
elif im.ndim == 2: |
|
2001 |
+ |
temp[pad_width: -pad_width, |
|
2002 |
+ |
pad_width: -pad_width] = im |
|
2003 |
+ |
else: |
|
2004 |
+ |
temp[pad_width: -pad_width] = im |
|
2005 |
+ |
|
|
2006 |
+ |
return temp |
|
2007 |
+ |
|
|
2008 |
+ |
|
|
2009 |
+ |
def relabel_chunks(im, chunk_shape): # pragma: no cover |
|
2010 |
+ |
r""" |
|
2011 |
+ |
Assign new labels to each chunk or sub-domain of actual image. This |
|
2012 |
+ |
prevents from two or more regions to have same label. |
|
2013 |
+ |
|
|
2014 |
+ |
Parameters: |
|
2015 |
+ |
----------- |
|
2016 |
+ |
|
|
2017 |
+ |
im: ND-array |
|
2018 |
+ |
Actual image that contains repeating labels in chunks or sub-domains |
|
2019 |
+ |
|
|
2020 |
+ |
chunk_shape: tuple |
|
2021 |
+ |
The shape of chunk that will be relabeled in actual image. Note the |
|
2022 |
+ |
chunk shape should be a multiple of actual image shape otherwise some |
|
2023 |
+ |
labels will not be relabeled. |
|
2024 |
+ |
|
|
2025 |
+ |
return: |
|
2026 |
+ |
------- |
|
2027 |
+ |
output : ND-array |
|
2028 |
+ |
Relabeled image with unique label assigned to each region. |
|
2029 |
+ |
""" |
|
2030 |
+ |
im = pad(im, pad_width=1) |
|
2031 |
+ |
im_shape = np.array(im.shape, dtype=np.uint32) |
|
2032 |
+ |
max_num = 0 |
|
2033 |
+ |
c = np.array(chunk_shape, dtype=np.uint32) + 2 |
|
2034 |
+ |
num = (im_shape / c).astype(int) |
|
2035 |
+ |
|
|
2036 |
+ |
if im.ndim == 3: |
|
2037 |
+ |
for z in range(num[0]): |
|
2038 |
+ |
for y in range(num[1]): |
|
2039 |
+ |
for x in range(num[2]): |
|
2040 |
+ |
chunk = im[z * c[0]: (z + 1) * c[0], |
|
2041 |
+ |
y * c[1]: (y + 1) * c[1], |
|
2042 |
+ |
x * c[2]: (x + 1) * c[2]] |
|
2043 |
+ |
chunk += max_num |
|
2044 |
+ |
chunk[chunk == max_num] = 0 |
|
2045 |
+ |
max_num = chunk.max() |
|
2046 |
+ |
im[z * c[0]: (z + 1) * c[0], |
|
2047 |
+ |
y * c[1]: (y + 1) * c[1], |
|
2048 |
+ |
x * c[2]: (x + 1) * c[2]] = chunk |
|
2049 |
+ |
else: |
|
2050 |
+ |
for y in range(num[0]): |
|
2051 |
+ |
for x in range(num[1]): |
|
2052 |
+ |
chunk = im[y * c[0]: (y + 1) * c[0], |
|
2053 |
+ |
x * c[1]: (x + 1) * c[1]] |
|
2054 |
+ |
chunk += max_num |
|
2055 |
+ |
chunk[chunk == max_num] = 0 |
|
2056 |
+ |
max_num = chunk.max() |
|
2057 |
+ |
im[y * c[0]: (y + 1) * c[0], |
|
2058 |
+ |
x * c[1]: (x + 1) * c[1]] = chunk |
|
2059 |
+ |
|
|
2060 |
+ |
return im |
|
2061 |
+ |
|
|
2062 |
+ |
|
|
2063 |
+ |
def trim_internal_slice(im, chunk_shape): # pragma: no cover |
|
2064 |
+ |
r""" |
|
2065 |
+ |
Delete extra slices from image that were used to stitch two or more chunks |
|
2066 |
+ |
together. |
|
2067 |
+ |
|
|
2068 |
+ |
Parameters: |
|
2069 |
+ |
----------- |
|
2070 |
+ |
|
|
2071 |
+ |
im : ND-array |
|
2072 |
+ |
image that contains extra slices in x, y, z direction. |
|
2073 |
+ |
|
|
2074 |
+ |
chunk_shape : tuple |
|
2075 |
+ |
The shape of the chunk from which image is subdivided. |
|
2076 |
+ |
|
|
2077 |
+ |
Return: |
|
2078 |
+ |
------- |
|
2079 |
+ |
output : ND-array |
|
2080 |
+ |
Image without extra internal slices. The shape of the image will be |
|
2081 |
+ |
same as input image provided for waterhsed segmentation. |
|
2082 |
+ |
""" |
|
2083 |
+ |
im_shape = np.array(im.shape, dtype=np.uint32) |
|
2084 |
+ |
c1 = np.array(chunk_shape, dtype=np.uint32) + 2 |
|
2085 |
+ |
c2 = np.array(chunk_shape, dtype=np.uint32) |
|
2086 |
+ |
num = (im_shape / c1).astype(int) |
|
2087 |
+ |
out_shape = num * c2 |
|
2088 |
+ |
out = np.empty((out_shape), dtype=np.uint32) |
|
2089 |
+ |
|
|
2090 |
+ |
if im.ndim == 3: |
|
2091 |
+ |
for z in range(num[0]): |
|
2092 |
+ |
for y in range(num[1]): |
|
2093 |
+ |
for x in range(num[2]): |
|
2094 |
+ |
chunk = im[z * c1[0]: (z + 1) * c1[0], |
|
2095 |
+ |
y * c1[1]: (y + 1) * c1[1], |
|
2096 |
+ |
x * c1[2]: (x + 1) * c1[2]] |
|
2097 |
+ |
|
|
2098 |
+ |
out[z * c2[0]: (z + 1) * c2[0], |
|
2099 |
+ |
y * c2[1]: (y + 1) * c2[1], |
|
2100 |
+ |
x * c2[2]: (x + 1) * c2[2]] = chunk[1:-1, 1:-1, 1:-1] |
|
2101 |
+ |
else: |
|
2102 |
+ |
for y in range(num[0]): |
|
2103 |
+ |
for x in range(num[1]): |
|
2104 |
+ |
chunk = im[y * c1[0]: (y + 1) * c1[0], |
|
2105 |
+ |
x * c1[1]: (x + 1) * c1[1]] |
|
2106 |
+ |
|
|
2107 |
+ |
out[y * c2[0]: (y + 1) * c2[0], |
|
2108 |
+ |
x * c2[1]: (x + 1) * c2[1]] = chunk[1:-1, 1:-1] |
|
2109 |
+ |
|
|
2110 |
+ |
return out |
|
2111 |
+ |
|
|
2112 |
+ |
|
|
2113 |
+ |
def watershed_stitching(im, chunk_shape): |
|
2114 |
+ |
r""" |
|
2115 |
+ |
Stitch individual sub-domains of watershed segmentation into one big |
|
2116 |
+ |
segmentation with all boundary labels of each sub-domain relabeled to merge |
|
2117 |
+ |
boundary regions. |
|
2118 |
+ |
|
|
2119 |
+ |
Parameters: |
|
2120 |
+ |
----------- |
|
2121 |
+ |
im : ND-array |
|
2122 |
+ |
A worked image with watershed segmentation performed on all sub-domains |
|
2123 |
+ |
individually. |
|
2124 |
+ |
|
|
2125 |
+ |
chunk_shape: tuple |
|
2126 |
+ |
The shape of the sub-domain in which image segmentation is performed. |
|
2127 |
+ |
|
|
2128 |
+ |
return: |
|
2129 |
+ |
------- |
|
2130 |
+ |
output : ND-array |
|
2131 |
+ |
Stitched watershed segmentation with all sub-domains merged to form a |
|
2132 |
+ |
single watershed segmentation. |
|
2133 |
+ |
""" |
|
2134 |
+ |
|
|
2135 |
+ |
c_shape = np.array(chunk_shape) |
|
2136 |
+ |
cuts_num = (np.array(im.shape) / c_shape).astype(np.uint32) |
|
2137 |
+ |
|
|
2138 |
+ |
for axis, num in enumerate(cuts_num): |
|
2139 |
+ |
keys = [] |
|
2140 |
+ |
values = [] |
|
2141 |
+ |
if num > 1: |
|
2142 |
+ |
im = im.swapaxes(0, axis) |
|
2143 |
+ |
for i in range(1, num): |
|
2144 |
+ |
sl = i * (chunk_shape[axis] + 3) - (i - 1) |
|
2145 |
+ |
sl1 = im[sl - 3, ...] |
|
2146 |
+ |
sl1_mask = sl1 > 0 |
|
2147 |
+ |
sl2 = im[sl - 1, ...] * sl1_mask |
|
2148 |
+ |
sl1_labels = sl1.flatten()[sl1.flatten() > 0] |
|
2149 |
+ |
sl2_labels = sl2.flatten()[sl2.flatten() > 0] |
|
2150 |
+ |
if sl1_labels.size != sl2_labels.size: |
|
2151 |
+ |
raise Exception('The selected overlapping thickness is not ' |
|
2152 |
+ |
'suitable for input image. Change ' |
|
2153 |
+ |
'overlapping criteria ' |
|
2154 |
+ |
'or manually input value.') |
|
2155 |
+ |
keys.append(sl1_labels) |
|
2156 |
+ |
values.append(sl2_labels) |
|
2157 |
+ |
im = replace_labels(array=im, keys=keys, values=values) |
|
2158 |
+ |
im = im.swapaxes(axis, 0) |
|
2159 |
+ |
im = trim_internal_slice(im=im, chunk_shape=chunk_shape) |
|
2160 |
+ |
im = resequence_labels(array=im) |
|
2161 |
+ |
|
|
2162 |
+ |
return im |
|
2163 |
+ |
|
|
2164 |
+ |
|
|
2165 |
+ |
@njit(parallel=True) |
|
2166 |
+ |
def copy(im, output): # pragma: no cover |
|
2167 |
+ |
r""" |
|
2168 |
+ |
The function copy the input array and make output array that is allocated |
|
2169 |
+ |
in different memory space. This a numba version of copy function of numpy. |
|
2170 |
+ |
Because each element is copied using parallel approach this implementation |
|
2171 |
+ |
is facter than numpy version of copy. |
|
2172 |
+ |
|
|
2173 |
+ |
parameter: |
|
2174 |
+ |
---------- |
|
2175 |
+ |
array: ND-array |
|
2176 |
+ |
Array that needs to be copied |
|
2177 |
+ |
|
|
2178 |
+ |
Return: |
|
2179 |
+ |
------- |
|
2180 |
+ |
output: ND-array |
|
2181 |
+ |
Copied array |
|
2182 |
+ |
""" |
|
2183 |
+ |
|
|
2184 |
+ |
if im.ndim == 3: |
|
2185 |
+ |
for i in prange(im.shape[0]): |
|
2186 |
+ |
for j in prange(im.shape[1]): |
|
2187 |
+ |
for k in prange(im.shape[2]): |
|
2188 |
+ |
output[i, j, k] = im[i, j, k] |
|
2189 |
+ |
elif im.ndim == 2: |
|
2190 |
+ |
for i in prange(im.shape[0]): |
|
2191 |
+ |
for j in prange(im.shape[1]): |
|
2192 |
+ |
output[i, j] = im[i, j] |
|
2193 |
+ |
else: |
|
2194 |
+ |
for i in prange(im.shape[0]): |
|
2195 |
+ |
output[i] = im[i] |
|
2196 |
+ |
|
|
2197 |
+ |
return output |
|
2198 |
+ |
|
|
2199 |
+ |
|
|
2200 |
+ |
@njit(parallel=True) |
|
2201 |
+ |
def _replace(array, keys, values, ind_sort): # pragma: no cover |
|
2202 |
+ |
r""" |
|
2203 |
+ |
This function replace keys elements in input array with new value elements. |
|
2204 |
+ |
This function is used as internal function of replace_relabels. |
|
2205 |
+ |
|
|
2206 |
+ |
Parameter: |
|
2207 |
+ |
---------- |
|
2208 |
+ |
array : ND-array |
|
2209 |
+ |
Array which requires replacing labels |
|
2210 |
+ |
keys : 1D-array |
|
2211 |
+ |
The unique labels that need to be replaced |
|
2212 |
+ |
values : 1D-array |
|
2213 |
+ |
The unique values that will be assigned to labels |
|
2214 |
+ |
|
|
2215 |
+ |
return: |
|
2216 |
+ |
------- |
|
2217 |
+ |
array : ND-array |
|
2218 |
+ |
Array with replaced labels. |
|
2219 |
+ |
""" |
|
2220 |
+ |
# ind_sort = np.argsort(keys) |
|
2221 |
+ |
keys_sorted = keys[ind_sort] |
|
2222 |
+ |
values_sorted = values[ind_sort] |
|
2223 |
+ |
s_keys = set(keys) |
|
2224 |
+ |
|
|
2225 |
+ |
for i in prange(array.shape[0]): |
|
2226 |
+ |
if array[i] in s_keys: |
|
2227 |
+ |
ind = np.searchsorted(keys_sorted, array[i]) |
|
2228 |
+ |
array[i] = values_sorted[ind] |
|
2229 |
+ |
|
|
2230 |
+ |
|
|
2231 |
+ |
def replace_labels(array, keys, values): |
|
2232 |
+ |
r""" |
|
2233 |
+ |
Replace labels in array provided as keys to values. |
|
2234 |
+ |
|
|
2235 |
+ |
Parameter: |
|
2236 |
+ |
---------- |
|
2237 |
+ |
array : ND-array |
|
2238 |
+ |
Array which requires replacing labels |
|
2239 |
+ |
keys : 1D-array |
|
2240 |
+ |
The unique labels that need to be replaced |
|
2241 |
+ |
values : 1D-array |
|
2242 |
+ |
The unique values that will be assigned to labels |
|
2243 |
+ |
|
|
2244 |
+ |
return: |
|
2245 |
+ |
------- |
|
2246 |
+ |
array : ND-array |
|
2247 |
+ |
Array with replaced labels. |
|
2248 |
+ |
""" |
|
2249 |
+ |
a_shape = array.shape |
|
2250 |
+ |
array = array.flatten() |
|
2251 |
+ |
keys = np.concatenate(keys, axis=0) |
|
2252 |
+ |
values = np.concatenate(values, axis=0) |
|
2253 |
+ |
ind_sort = np.argsort(keys) |
|
2254 |
+ |
_replace(array, keys, values, ind_sort) |
|
2255 |
+ |
|
|
2256 |
+ |
return array.reshape(a_shape) |
|
2257 |
+ |
|
|
2258 |
+ |
|
|
2259 |
+ |
@njit() |
|
2260 |
+ |
def _sequence(array, count): # pragma: no cover |
|
2261 |
+ |
r""" |
|
2262 |
+ |
Internal function of resequnce_labels method. This function resquence array |
|
2263 |
+ |
elements in an ascending order using numba technique which is many folds |
|
2264 |
+ |
faster than make contigious funcition. |
|
2265 |
+ |
|
|
2266 |
+ |
parameter: |
|
2267 |
+ |
---------- |
|
2268 |
+ |
array: 1d-array |
|
2269 |
+ |
1d-array that needs resquencing |
|
2270 |
+ |
count: 1d-array |
|
2271 |
+ |
1d-array of zeros having same size as array |
|
2272 |
+ |
|
|
2273 |
+ |
return: |
|
2274 |
+ |
------- |
|
2275 |
+ |
array: 1d-array |
|
2276 |
+ |
The input array with elements resequenced in ascending order |
|
2277 |
+ |
Note: The output of this function is not same as make_contigous or |
|
2278 |
+ |
relabel_sequential function of scikit-image. This function resequence and |
|
2279 |
+ |
randomize the regions while other methods only do resequencing and output |
|
2280 |
+ |
sorted array. |
|
2281 |
+ |
""" |
|
2282 |
+ |
a = 1 |
|
2283 |
+ |
i = 0 |
|
2284 |
+ |
while i < (len(array)): |
|
2285 |
+ |
data = array[i] |
|
2286 |
+ |
if data != 0: |
|
2287 |
+ |
if count[data] == 0: |
|
2288 |
+ |
count[data] = a |
|
2289 |
+ |
a += 1 |
|
2290 |
+ |
array[i] = count[data] |
|
2291 |
+ |
i += 1 |
|
2292 |
+ |
|
|
2293 |
+ |
|
|
2294 |
+ |
@njit(parallel=True) |
|
2295 |
+ |
def amax(array): # pragma: no cover |
|
2296 |
+ |
r""" |
|
2297 |
+ |
Find largest element in an array using fast parallel numba technique |
|
2298 |
+ |
|
|
2299 |
+ |
Parameter: |
|
2300 |
+ |
---------- |
|
2301 |
+ |
array: ND-array |
|
2302 |
+ |
array in which largest elements needs to be calcuted |
|
2303 |
+ |
|
|
2304 |
+ |
return: |
|
2305 |
+ |
scalar: float or int |
|
2306 |
+ |
The largest element value in the input array |
|
2307 |
+ |
""" |
|
2308 |
+ |
|
|
2309 |
+ |
return np.max(array) |
|
2310 |
+ |
|
|
2311 |
+ |
|
|
2312 |
+ |
def resequence_labels(array): |
|
2313 |
+ |
r""" |
|
2314 |
+ |
Resequence the lablels to make them contigious. |
|
2315 |
+ |
|
|
2316 |
+ |
Parameter: |
|
2317 |
+ |
---------- |
|
2318 |
+ |
array: ND-array |
|
2319 |
+ |
Array that requires resequencing |
|
2320 |
+ |
|
|
2321 |
+ |
return: |
|
2322 |
+ |
------- |
|
2323 |
+ |
array : ND-array |
|
2324 |
+ |
Resequenced array with same shape as input array |
|
2325 |
+ |
""" |
|
2326 |
+ |
a_shape = array.shape |
|
2327 |
+ |
array = array.ravel() |
|
2328 |
+ |
max_num = amax(array) + 1 |
|
2329 |
+ |
count = np.zeros(max_num, dtype=np.uint32) |
|
2330 |
+ |
_sequence(array, count) |
|
2331 |
+ |
|
|
2332 |
+ |
return array.reshape(a_shape) |