BUG: Use manager to set title
1 |
# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
|
|
2 |
# vi: set ft=python sts=4 ts=4 sw=4 et:
|
|
3 |
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
|
|
4 |
#
|
|
5 |
# See COPYING file distributed along with the NiBabel package for the
|
|
6 |
# copyright and license terms.
|
|
7 |
#
|
|
8 |
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
|
|
9 | 33 |
""" Image processing functions for:
|
10 |
|
|
11 |
* smoothing
|
|
12 |
* resampling
|
|
13 |
* converting sd to and from FWHM
|
|
14 |
|
|
15 |
Smoothing and resampling routines need scipy
|
|
16 |
"""
|
|
17 |
|
|
18 | 33 |
import numpy as np |
19 | 33 |
import numpy.linalg as npl |
20 |
|
|
21 | 33 |
from .optpkg import optional_package |
22 | 33 |
spnd, _, _ = optional_package('scipy.ndimage') |
23 |
|
|
24 | 33 |
from .affines import AffineError, to_matvec, from_matvec, append_diag, rescale_affine |
25 | 33 |
from .spaces import vox2out_vox |
26 | 33 |
from .nifti1 import Nifti1Image |
27 | 33 |
from .orientations import axcodes2ornt, io_orientation, ornt_transform |
28 | 33 |
from .imageclasses import spatial_axes_first |
29 |
|
|
30 | 33 |
SIGMA2FWHM = np.sqrt(8 * np.log(2)) |
31 |
|
|
32 |
|
|
33 | 33 |
def fwhm2sigma(fwhm): |
34 |
""" Convert a FWHM value to sigma in a Gaussian kernel.
|
|
35 |
|
|
36 |
Parameters
|
|
37 |
----------
|
|
38 |
fwhm : array-like
|
|
39 |
FWHM value or values
|
|
40 |
|
|
41 |
Returns
|
|
42 |
-------
|
|
43 |
sigma : array or float
|
|
44 |
sigma values corresponding to `fwhm` values
|
|
45 |
|
|
46 |
Examples
|
|
47 |
--------
|
|
48 |
>>> sigma = fwhm2sigma(6)
|
|
49 |
>>> sigmae = fwhm2sigma([6, 7, 8])
|
|
50 |
>>> sigma == sigmae[0]
|
|
51 |
True
|
|
52 |
"""
|
|
53 | 33 |
return np.asarray(fwhm) / SIGMA2FWHM |
54 |
|
|
55 |
|
|
56 | 33 |
def sigma2fwhm(sigma): |
57 |
""" Convert a sigma in a Gaussian kernel to a FWHM value
|
|
58 |
|
|
59 |
Parameters
|
|
60 |
----------
|
|
61 |
sigma : array-like
|
|
62 |
sigma value or values
|
|
63 |
|
|
64 |
Returns
|
|
65 |
-------
|
|
66 |
fwhm : array or float
|
|
67 |
fwhm values corresponding to `sigma` values
|
|
68 |
|
|
69 |
Examples
|
|
70 |
--------
|
|
71 |
>>> fwhm = sigma2fwhm(3)
|
|
72 |
>>> fwhms = sigma2fwhm([3, 4, 5])
|
|
73 |
>>> fwhm == fwhms[0]
|
|
74 |
True
|
|
75 |
"""
|
|
76 | 33 |
return np.asarray(sigma) * SIGMA2FWHM |
77 |
|
|
78 |
|
|
79 | 33 |
def adapt_affine(affine, n_dim): |
80 |
""" Adapt input / output dimensions of spatial `affine` for `n_dims`
|
|
81 |
|
|
82 |
Adapts a spatial (4, 4) affine that is being applied to an image with fewer
|
|
83 |
than 3 spatial dimensions, or more than 3 dimensions. If there are more
|
|
84 |
than three dimensions, assume an identity transformation for these
|
|
85 |
dimensions.
|
|
86 |
|
|
87 |
Parameters
|
|
88 |
----------
|
|
89 |
affine : array-like
|
|
90 |
affine transform. Usually shape (4, 4). For what follows ``N, M =
|
|
91 |
affine.shape``
|
|
92 |
n_dims : int
|
|
93 |
Number of dimensions of underlying array, and therefore number of input
|
|
94 |
dimensions for affine.
|
|
95 |
|
|
96 |
Returns
|
|
97 |
-------
|
|
98 |
adapted : shape (M, n_dims+1) array
|
|
99 |
Affine array adapted to number of input dimensions. Columns of the
|
|
100 |
affine corresponding to missing input dimensions have been dropped,
|
|
101 |
columns corresponding to extra input dimensions have an extra identity
|
|
102 |
column added
|
|
103 |
"""
|
|
104 | 33 |
affine = np.asarray(affine) |
105 | 33 |
rzs, trans = to_matvec(affine) |
106 |
# For missing input dimensions, drop columns in rzs
|
|
107 | 33 |
rzs = rzs[:, :n_dim] |
108 | 33 |
adapted = from_matvec(rzs, trans) |
109 | 33 |
n_extra_columns = n_dim - adapted.shape[1] + 1 |
110 |
if n_extra_columns > 0: |
|
111 | 33 |
adapted = append_diag(adapted, np.ones((n_extra_columns,))) |
112 | 33 |
return adapted |
113 |
|
|
114 |
|
|
115 | 33 |
def resample_from_to(from_img, |
116 |
to_vox_map, |
|
117 |
order=3, |
|
118 |
mode='constant', |
|
119 |
cval=0., |
|
120 |
out_class=Nifti1Image): |
|
121 |
""" Resample image `from_img` to mapped voxel space `to_vox_map`
|
|
122 |
|
|
123 |
Resample using N-d spline interpolation.
|
|
124 |
|
|
125 |
Parameters
|
|
126 |
----------
|
|
127 |
from_img : object
|
|
128 |
Object having attributes ``dataobj``, ``affine``, ``header`` and
|
|
129 |
``shape``. If `out_class` is not None, ``img.__class__`` should be able
|
|
130 |
to construct an image from data, affine and header.
|
|
131 |
to_vox_map : image object or length 2 sequence
|
|
132 |
If object, has attributes ``shape`` giving input voxel shape, and
|
|
133 |
``affine`` giving mapping of input voxels to output space. If length 2
|
|
134 |
sequence, elements are (shape, affine) with same meaning as above. The
|
|
135 |
affine is a (4, 4) array-like.
|
|
136 |
order : int, optional
|
|
137 |
The order of the spline interpolation, default is 3. The order has to
|
|
138 |
be in the range 0-5 (see ``scipy.ndimage.affine_transform``)
|
|
139 |
mode : str, optional
|
|
140 |
Points outside the boundaries of the input are filled according
|
|
141 |
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
|
|
142 |
Default is 'constant' (see ``scipy.ndimage.affine_transform``)
|
|
143 |
cval : scalar, optional
|
|
144 |
Value used for points outside the boundaries of the input if
|
|
145 |
``mode='constant'``. Default is 0.0 (see
|
|
146 |
``scipy.ndimage.affine_transform``)
|
|
147 |
out_class : None or SpatialImage class, optional
|
|
148 |
Class of output image. If None, use ``from_img.__class__``.
|
|
149 |
|
|
150 |
Returns
|
|
151 |
-------
|
|
152 |
out_img : object
|
|
153 |
Image of instance specified by `out_class`, containing data output from
|
|
154 |
resampling `from_img` into axes aligned to the output space of
|
|
155 |
``from_img.affine``
|
|
156 |
"""
|
|
157 |
# This check requires `shape` attribute of image
|
|
158 |
if not spatial_axes_first(from_img): |
|
159 | 21 |
raise ValueError('Cannot predict position of spatial axes for Image ' |
160 |
'type ' + str(type(from_img))) |
|
161 | 21 |
try: |
162 | 21 |
to_shape, to_affine = to_vox_map.shape, to_vox_map.affine |
163 | 21 |
except AttributeError: |
164 | 21 |
to_shape, to_affine = to_vox_map |
165 | 21 |
a_to_affine = adapt_affine(to_affine, len(to_shape)) |
166 |
if out_class is None: |
|
167 | 21 |
out_class = from_img.__class__ |
168 | 21 |
from_n_dim = len(from_img.shape) |
169 |
if from_n_dim < 3: |
|
170 | 21 |
raise AffineError('from_img must be at least 3D') |
171 | 21 |
a_from_affine = adapt_affine(from_img.affine, from_n_dim) |
172 | 21 |
to_vox2from_vox = npl.inv(a_from_affine).dot(a_to_affine) |
173 | 21 |
rzs, trans = to_matvec(to_vox2from_vox) |
174 | 21 |
data = spnd.affine_transform(from_img.dataobj, |
175 |
rzs, |
|
176 |
trans, |
|
177 |
to_shape, |
|
178 |
order=order, |
|
179 |
mode=mode, |
|
180 |
cval=cval) |
|
181 | 21 |
return out_class(data, to_affine, from_img.header) |
182 |
|
|
183 |
|
|
184 | 33 |
def resample_to_output(in_img, |
185 |
voxel_sizes=None, |
|
186 |
order=3, |
|
187 |
mode='constant', |
|
188 |
cval=0., |
|
189 |
out_class=Nifti1Image): |
|
190 |
""" Resample image `in_img` to output voxel axes (world space)
|
|
191 |
|
|
192 |
Parameters
|
|
193 |
----------
|
|
194 |
in_img : object
|
|
195 |
Object having attributes ``dataobj``, ``affine``, ``header``. If
|
|
196 |
`out_class` is not None, ``img.__class__`` should be able to construct
|
|
197 |
an image from data, affine and header.
|
|
198 |
voxel_sizes : None or sequence
|
|
199 |
Gives the diagonal entries of ``out_img.affine` (except the trailing 1
|
|
200 |
for the homogenous coordinates) (``out_img.affine ==
|
|
201 |
np.diag(voxel_sizes + [1])``). If None, return identity
|
|
202 |
`out_img.affine`. If scalar, interpret as vector ``[voxel_sizes] *
|
|
203 |
len(in_img.shape)``.
|
|
204 |
order : int, optional
|
|
205 |
The order of the spline interpolation, default is 3. The order has to
|
|
206 |
be in the range 0-5 (see ``scipy.ndimage.affine_transform``).
|
|
207 |
mode : str, optional
|
|
208 |
Points outside the boundaries of the input are filled according to the
|
|
209 |
given mode ('constant', 'nearest', 'reflect' or 'wrap'). Default is
|
|
210 |
'constant' (see ``scipy.ndimage.affine_transform``).
|
|
211 |
cval : scalar, optional
|
|
212 |
Value used for points outside the boundaries of the input if
|
|
213 |
``mode='constant'``. Default is 0.0 (see
|
|
214 |
``scipy.ndimage.affine_transform``).
|
|
215 |
out_class : None or SpatialImage class, optional
|
|
216 |
Class of output image. If None, use ``in_img.__class__``.
|
|
217 |
|
|
218 |
Returns
|
|
219 |
-------
|
|
220 |
out_img : object
|
|
221 |
Image of instance specified by `out_class`, containing data output from
|
|
222 |
resampling `in_img` into axes aligned to the output space of
|
|
223 |
``in_img.affine``
|
|
224 |
"""
|
|
225 |
if out_class is None: |
|
226 | 21 |
out_class = in_img.__class__ |
227 | 21 |
in_shape = in_img.shape |
228 | 21 |
n_dim = len(in_shape) |
229 |
if voxel_sizes is not None: |
|
230 | 21 |
voxel_sizes = np.asarray(voxel_sizes) |
231 |
if voxel_sizes.ndim == 0: # Scalar |
|
232 | 21 |
voxel_sizes = np.repeat(voxel_sizes, n_dim) |
233 |
# Allow 2D images by promoting to 3D. We might want to see what a slice
|
|
234 |
# looks like when resampled into world coordinates
|
|
235 |
if n_dim < 3: # Expand image to 3D, make voxel sizes match |
|
236 | 21 |
new_shape = in_shape + (1,) * (3 - n_dim) |
237 | 21 |
data = np.asanyarray(in_img.dataobj).reshape(new_shape) # 2D data should be small |
238 | 21 |
in_img = out_class(data, in_img.affine, in_img.header) |
239 |
if voxel_sizes is not None and len(voxel_sizes) == n_dim: |
|
240 |
# Need to pad out voxel sizes to match new image dimensions
|
|
241 | 21 |
voxel_sizes = tuple(voxel_sizes) + (1,) * (3 - n_dim) |
242 | 21 |
out_vox_map = vox2out_vox((in_img.shape, in_img.affine), voxel_sizes) |
243 | 21 |
return resample_from_to(in_img, out_vox_map, order, mode, cval, out_class) |
244 |
|
|
245 |
|
|
246 | 33 |
def smooth_image(img, |
247 |
fwhm, |
|
248 |
mode='nearest', |
|
249 |
cval=0., |
|
250 |
out_class=Nifti1Image): |
|
251 |
""" Smooth image `img` along voxel axes by FWHM `fwhm` millimeters
|
|
252 |
|
|
253 |
Parameters
|
|
254 |
----------
|
|
255 |
img : object
|
|
256 |
Object having attributes ``dataobj``, ``affine``, ``header`` and
|
|
257 |
``shape``. If `out_class` is not None, ``img.__class__`` should be able
|
|
258 |
to construct an image from data, affine and header.
|
|
259 |
fwhm : scalar or length 3 sequence
|
|
260 |
FWHM *in mm* over which to smooth. The smoothing applies to the voxel
|
|
261 |
axes, not to the output axes, but is in millimeters. The function
|
|
262 |
adjusts the FWHM to voxels using the voxel sizes calculated from the
|
|
263 |
affine. A scalar implies the same smoothing across the spatial
|
|
264 |
dimensions of the image, but 0 smoothing over any further dimensions
|
|
265 |
such as time. A vector should be the same length as the number of
|
|
266 |
image dimensions.
|
|
267 |
mode : str, optional
|
|
268 |
Points outside the boundaries of the input are filled according
|
|
269 |
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
|
|
270 |
Default is 'nearest'. This is different from the default for
|
|
271 |
``scipy.ndimage.affine_transform``, which is 'constant'. 'nearest'
|
|
272 |
might be a better choice when smoothing to the edge of an image where
|
|
273 |
there is still strong brain signal, otherwise this signal will get
|
|
274 |
blurred towards zero.
|
|
275 |
cval : scalar, optional
|
|
276 |
Value used for points outside the boundaries of the input if
|
|
277 |
``mode='constant'``. Default is 0.0 (see
|
|
278 |
``scipy.ndimage.affine_transform``).
|
|
279 |
out_class : None or SpatialImage class, optional
|
|
280 |
Class of output image. If None, use ``img.__class__``.
|
|
281 |
|
|
282 |
Returns
|
|
283 |
-------
|
|
284 |
smoothed_img : object
|
|
285 |
Image of instance specified by `out_class`, containing data output from
|
|
286 |
smoothing `img` data by given FWHM kernel.
|
|
287 |
"""
|
|
288 |
# This check requires `shape` attribute of image
|
|
289 |
if not spatial_axes_first(img): |
|
290 | 21 |
raise ValueError('Cannot predict position of spatial axes for Image ' |
291 |
'type ' + str(type(img))) |
|
292 |
if out_class is None: |
|
293 | 21 |
out_class = img.__class__ |
294 | 21 |
n_dim = len(img.shape) |
295 |
# TODO: make sure time axis is last
|
|
296 |
# Pad out fwhm from scalar, adding 0 for fourth etc (time etc) dimensions
|
|
297 | 21 |
fwhm = np.asarray(fwhm) |
298 |
if fwhm.size == 1: |
|
299 | 21 |
fwhm_scalar = fwhm |
300 | 21 |
fwhm = np.zeros((n_dim,)) |
301 | 21 |
fwhm[:3] = fwhm_scalar |
302 |
# Voxel sizes
|
|
303 | 21 |
RZS = img.affine[:, :n_dim] |
304 | 21 |
vox = np.sqrt(np.sum(RZS ** 2, 0)) |
305 |
# Smoothing in terms of voxels
|
|
306 | 21 |
vox_fwhm = fwhm / vox |
307 | 21 |
vox_sd = fwhm2sigma(vox_fwhm) |
308 |
# Do the smoothing
|
|
309 | 21 |
sm_data = spnd.gaussian_filter(img.dataobj, |
310 |
vox_sd, |
|
311 |
mode=mode, |
|
312 |
cval=cval) |
|
313 | 21 |
return out_class(sm_data, img.affine, img.header) |
314 |
|
|
315 |
|
|
316 | 33 |
def conform(from_img, |
317 |
out_shape=(256, 256, 256), |
|
318 |
voxel_size=(1.0, 1.0, 1.0), |
|
319 |
order=3, |
|
320 |
cval=0.0, |
|
321 |
orientation='RAS', |
|
322 |
out_class=None): |
|
323 |
""" Resample image to ``out_shape`` with voxels of size ``voxel_size``.
|
|
324 |
|
|
325 |
Using the default arguments, this function is meant to replicate most parts
|
|
326 |
of FreeSurfer's ``mri_convert --conform`` command. Specifically, this
|
|
327 |
function:
|
|
328 |
- Resamples data to ``output_shape``
|
|
329 |
- Resamples voxel sizes to ``voxel_size``
|
|
330 |
- Reorients to RAS (``mri_convert --conform`` reorients to LIA)
|
|
331 |
|
|
332 |
Unlike ``mri_convert --conform``, this command does not:
|
|
333 |
- Transform data to range [0, 255]
|
|
334 |
- Cast to unsigned eight-bit integer
|
|
335 |
|
|
336 |
Parameters
|
|
337 |
----------
|
|
338 |
from_img : object
|
|
339 |
Object having attributes ``dataobj``, ``affine``, ``header`` and
|
|
340 |
``shape``. If `out_class` is not None, ``img.__class__`` should be able
|
|
341 |
to construct an image from data, affine and header.
|
|
342 |
out_shape : sequence, optional
|
|
343 |
The shape of the output volume. Default is (256, 256, 256).
|
|
344 |
voxel_size : sequence, optional
|
|
345 |
The size in millimeters of the voxels in the resampled output. Default
|
|
346 |
is 1mm isotropic.
|
|
347 |
order : int, optional
|
|
348 |
The order of the spline interpolation, default is 3. The order has to
|
|
349 |
be in the range 0-5 (see ``scipy.ndimage.affine_transform``)
|
|
350 |
cval : scalar, optional
|
|
351 |
Value used for points outside the boundaries of the input if
|
|
352 |
``mode='constant'``. Default is 0.0 (see
|
|
353 |
``scipy.ndimage.affine_transform``)
|
|
354 |
orientation : str, optional
|
|
355 |
Orientation of output image. Default is "RAS".
|
|
356 |
out_class : None or SpatialImage class, optional
|
|
357 |
Class of output image. If None, use ``from_img.__class__``.
|
|
358 |
|
|
359 |
Returns
|
|
360 |
-------
|
|
361 |
out_img : object
|
|
362 |
Image of instance specified by `out_class`, containing data output from
|
|
363 |
resampling `from_img` into axes aligned to the output space of
|
|
364 |
``from_img.affine``
|
|
365 |
"""
|
|
366 |
# Only support 3D images. This can be made more general in the future, once tests
|
|
367 |
# are written.
|
|
368 | 21 |
required_ndim = 3 |
369 |
if from_img.ndim != required_ndim: |
|
370 | 21 |
raise ValueError("Only 3D images are supported.") |
371 |
elif len(out_shape) != required_ndim: |
|
372 | 21 |
raise ValueError(f"`out_shape` must have {required_ndim} values") |
373 |
elif len(voxel_size) != required_ndim: |
|
374 | 21 |
raise ValueError(f"`voxel_size` must have {required_ndim} values") |
375 |
|
|
376 | 21 |
start_ornt = io_orientation(from_img.affine) |
377 | 21 |
end_ornt = axcodes2ornt(orientation) |
378 | 21 |
transform = ornt_transform(start_ornt, end_ornt) |
379 |
|
|
380 |
# Reorient first to ensure shape matches expectations
|
|
381 | 21 |
reoriented = from_img.as_reoriented(transform) |
382 |
|
|
383 | 21 |
out_aff = rescale_affine(reoriented.affine, reoriented.shape, voxel_size, out_shape) |
384 |
|
|
385 |
# Resample input image.
|
|
386 | 21 |
out_img = resample_from_to( |
387 |
from_img=from_img, to_vox_map=(out_shape, out_aff), order=order, mode="constant", |
|
388 |
cval=cval, out_class=out_class) |
|
389 |
|
|
390 | 21 |
return out_img |
Read our documentation on viewing source code .