1
# -*- coding: utf-8 -*-
2 30
"""
3
General benchmark template for all registration methods.
4
It also serves for evaluating the input registration pairs
5
(while no registration is performed, there is only the initial deformation)
6

7
Sample run (usage)::
8

9
    mkdir ./results
10
    python benchmarks/bm_registration.py \
11
        -t data-images/pairs-imgs-lnds_histol.csv -d ./data-images \
12
        -o ./results --unique
13

14
Copyright (C) 2016-2019 Jiri Borovec <jiri.borovec@fel.cvut.cz>
15
"""
16

17 30
import logging
18 30
import os
19 30
import shutil
20 30
import sys
21 30
import time
22 30
from functools import partial
23

24 30
import numpy as np
25 30
import pandas as pd
26 30
from skimage.color import rgb2gray
27

28
# this is used while calling this file as a script
29 30
sys.path += [os.path.abspath('.'), os.path.abspath('..')]  # Add path to root
30 30
from birl.utilities.data_io import update_path, create_folder, image_sizes, load_landmarks, load_image, save_image
31 30
from birl.utilities.dataset import image_histogram_matching, common_landmarks
32 30
from birl.utilities.evaluate import (
33
    compute_target_regist_error_statistic, compute_affine_transf_diff, compute_tre_robustness
34
)
35 30
from birl.utilities.experiments import (
36
    nb_workers, exec_commands, string_dict, iterate_mproc_map, create_basic_parser, parse_arg_params, Experiment
37
)
38 30
from birl.utilities.drawing import (export_figure, draw_image_points, draw_images_warped_landmarks, overlap_two_images)
39 30
from birl.utilities.registration import estimate_affine_transform
40

41
#: In case provided dataset and complete (true) dataset differ
42 30
COL_PAIRED_LANDMARKS = 'Ration matched landmarks'
43

44

45 30
class ImRegBenchmark(Experiment):
46
    """ General benchmark class for all registration methods.
47
    It also serves for evaluating the input registration pairs.
48

49
    :param dict params: dictionary with experiment configuration,
50
        the required options are names in `REQUIRED_PARAMS`,
51
        note that the basic parameters are inherited
52

53
    The benchmark has following steps:
54

55
    1. check all necessary paths and required parameters
56
    2. load cover file and set all paths as absolute
57
    3. run individual registration experiment in sequence or in parallel
58
        (nb_workers > 1); if the particular experiment folder exist (assume
59
        completed experiment) and skip it:
60

61
            a) create experiment folder and init experiment
62
            b) generate execution command
63
            c) run the command (an option to lock it in single thread)
64
            d) evaluate experiment, set the expected outputs and visualisation
65
            e) clean all extra files if any
66

67
    4. visualise results abd evaluate registration results
68

69
    .. note:: The actual implementation simulates the "IDEAL" registration while
70
     it blindly copies the reference landmarks as results of the registration.
71
     In contrast to the right registration, it copies the moving images so there
72
     is alignment (consistent warping) between resulting landmarks and image.
73

74
    Examples
75
    --------
76
    >>> # Running in single thread:
77
    >>> from birl.utilities.data_io import create_folder, update_path
78
    >>> path_out = create_folder('temp_results')
79
    >>> path_csv = os.path.join(update_path('data-images'), 'pairs-imgs-lnds_mix.csv')
80
    >>> params = {'path_table': path_csv,
81
    ...           'path_out': path_out,
82
    ...           'nb_workers': 1,
83
    ...           'unique': False,
84
    ...           'visual': True}
85
    >>> benchmark = ImRegBenchmark(params)
86
    >>> benchmark.run()
87
    True
88
    >>> del benchmark
89
    >>> shutil.rmtree(path_out, ignore_errors=True)
90

91
    >>> # Running in multiple parallel threads:
92
    >>> from birl.utilities.data_io import create_folder, update_path
93
    >>> path_out = create_folder('temp_results')
94
    >>> path_csv = os.path.join(update_path('data-images'), 'pairs-imgs-lnds_mix.csv')
95
    >>> params = {'path_table': path_csv,
96
    ...           'path_out': path_out,
97
    ...           'nb_workers': 2,
98
    ...           'unique': False,
99
    ...           'visual': True}
100
    >>> benchmark = ImRegBenchmark(params)
101
    >>> benchmark.run()
102
    True
103
    >>> del benchmark
104
    >>> shutil.rmtree(path_out, ignore_errors=True)
105
    """
106

107
    #: timeout for executing single image registration, NOTE: does not work for Py2
108 30
    EXECUTE_TIMEOUT = 60 * 60  # default = 1 hour
109
    #: default number of threads used by benchmarks
110 30
    NB_WORKERS_USED = nb_workers(0.8)
111
    #: some needed files
112 30
    NAME_CSV_REGISTRATION_PAIRS = 'registration-results.csv'
113
    #: default file for exporting results in table format
114 30
    NAME_RESULTS_CSV = 'results-summary.csv'
115
    #: default file for exporting results in formatted text format
116 30
    NAME_RESULTS_TXT = 'results-summary.txt'
117
    #: logging file for registration experiments
118 30
    NAME_LOG_REGISTRATION = 'registration.log'
119
    #: output image name in experiment folder for reg. results - overlap of reference and warped image
120 30
    NAME_IMAGE_REF_WARP = 'image_refence-warped.jpg'
121
    #: output image name in experiment folder for reg. results - image and landmarks are warped
122 30
    NAME_IMAGE_MOVE_WARP_POINTS = 'image_warped_landmarks_warped.jpg'
123
    #: output image name in experiment folder for reg. results - warped landmarks in reference image
124 30
    NAME_IMAGE_REF_POINTS_WARP = 'image_ref_landmarks_warped.jpg'
125
    #: output image name in experiment folder for showing improved alignment by used registration
126 30
    NAME_IMAGE_WARPED_VISUAL = 'registration_visual_landmarks.jpg'
127
    # columns names in cover and also registration table
128
    #: reference (registration target) image
129 30
    COL_IMAGE_REF = 'Target image'
130
    #: moving (registration source) image
131 30
    COL_IMAGE_MOVE = 'Source image'
132
    #: reference image warped to the moving frame
133 30
    COL_IMAGE_REF_WARP = 'Warped target image'
134
    #: moving image warped to the reference frame
135 30
    COL_IMAGE_MOVE_WARP = 'Warped source image'
136
    #: reference (registration target) landmarks
137 30
    COL_POINTS_REF = 'Target landmarks'
138
    #: moving (registration source) landmarks
139 30
    COL_POINTS_MOVE = 'Source landmarks'
140
    #: reference landmarks warped to the moving frame
141 30
    COL_POINTS_REF_WARP = 'Warped target landmarks'
142
    #: moving landmarks warped to the reference frame
143 30
    COL_POINTS_MOVE_WARP = 'Warped source landmarks'
144
    #: registration folder for each particular experiment
145 30
    COL_REG_DIR = 'Registration folder'
146
    #: define robustness as improved image alignment from initial state
147 30
    COL_ROBUSTNESS = 'Robustness'
148
    #: measured time of image registration in minutes
149 30
    COL_TIME = 'Execution time [minutes]'
150
    #: measured time of image pre-processing in minutes
151 30
    COL_TIME_PREPROC = 'Pre-processing time [minutes]'
152
    #: tuple of image size
153 30
    COL_IMAGE_SIZE = 'Image size [pixels]'
154
    #: image diagonal in pixels
155 30
    COL_IMAGE_DIAGONAL = 'Image diagonal [pixels]'
156
    #: define train / test status
157 30
    COL_STATUS = 'status'
158
    #: extension to the image column name for temporary pre-process image
159 30
    COL_IMAGE_EXT_TEMP = ' TEMP'
160
    #: number of landmarks in dataset (min of moving and reference)
161 30
    COL_NB_LANDMARKS_INPUT = 'nb. dataset landmarks'
162
    #: number of warped landmarks
163 30
    COL_NB_LANDMARKS_WARP = 'nb. warped landmarks'
164
    #: required experiment parameters
165 30
    REQUIRED_PARAMS = Experiment.REQUIRED_PARAMS + ['path_table']
166

167
    # list of columns in cover csv
168 30
    COVER_COLUMNS = (COL_IMAGE_REF, COL_IMAGE_MOVE, COL_POINTS_REF, COL_POINTS_MOVE)
169 30
    COVER_COLUMNS_EXT = tuple(list(COVER_COLUMNS) + [COL_IMAGE_SIZE, COL_IMAGE_DIAGONAL])
170 30
    COVER_COLUMNS_WRAP = tuple(
171
        list(COVER_COLUMNS) + [COL_IMAGE_REF_WARP, COL_IMAGE_MOVE_WARP, COL_POINTS_REF_WARP, COL_POINTS_MOVE_WARP]
172
    )
173

174 30
    def __init__(self, params):
175
        """ initialise benchmark
176

177
        :param dict params:  parameters
178
        """
179 30
        assert 'unique' in params, 'missing "unique" among %r' % params.keys()
180 30
        super(ImRegBenchmark, self).__init__(params, params['unique'])
181 30
        logging.info(self.__doc__)
182 30
        self._df_overview = None
183 30
        self._df_experiments = None
184 30
        self.nb_workers = params.get('nb_workers', nb_workers(0.25))
185 30
        self._path_csv_regist = os.path.join(self.params['path_exp'], self.NAME_CSV_REGISTRATION_PAIRS)
186

187 30
    def _absolute_path(self, path, destination='data', base_path=None):
188
        """ update te path to the dataset or output
189

190
        :param str path: original path
191
        :param str destination: type of update
192
            `data` for data source and `expt` for output experimental folder
193
        :param str destination: type of update
194
        :return str: updated path
195
        """
196 30
        if destination and destination == 'data' and 'path_dataset' in self.params:
197 30
            path = os.path.join(self.params['path_dataset'], path)
198 30
        elif destination and destination == 'expt' and 'path_exp' in self.params:
199 30
            path = os.path.join(self.params['path_exp'], path)
200 30
        path = update_path(path, absolute=True)
201 30
        return path
202

203 30
    def _relativize_path(self, path, destination='path_exp'):
204
        """ extract relative path according given parameter
205

206
        :param str path: the original path to file/folder
207
        :param str destination: use path from parameters
208
        :return str: relative or the original path
209
        """
210 30
        if path is None or not os.path.exists(path):
211 30
            logging.debug('Source path does not exists: %s', path)
212 30
            return path
213 30
        assert destination in self.params, 'Missing path in params: %s' % destination
214 30
        base_path = self.params['path_exp']
215 30
        base_dir = os.path.basename(base_path)
216 30
        path_split = path.split(os.sep)
217

218 30
        if base_dir not in path_split:
219 0
            logging.debug('Missing requested folder "%s" in source path: %s', base_dir, path_split)
220 0
            return path
221 30
        path_split = path_split[path_split.index(base_dir) + 1:]
222 30
        path_rltv = os.sep.join(path_split)
223

224 30
        if os.path.exists(os.path.join(self.params[destination], path_rltv)):
225 30
            path = path_rltv
226
        else:
227 0
            logging.debug('Not existing relative path: %s', path)
228 30
        return path
229

230 30
    def _copy_config_to_expt(self, field_path):
231
        """ copy particular configuration to the experiment folder
232

233
        :param str field_path: field from parameters containing a path to file
234
        """
235 30
        path_source = self.params.get(field_path, '')
236 30
        path_config = os.path.join(self.params['path_exp'], os.path.basename(path_source))
237 30
        if path_source and os.path.isfile(path_source):
238 30
            shutil.copy(path_source, path_config)
239 30
            self.params[field_path] = path_config
240
        else:
241 30
            logging.warning('Missing config: %s', path_source)
242

243 30
    def _get_paths(self, item, prefer_pproc=True):
244
        """ expand the relative paths to absolute, if TEMP path is used, take it
245

246
        :param dict item: row from cover file with relative paths
247
        :param bool prefer_pproc: prefer using preprocess images
248
        :return tuple(str,str,str,str): path to reference and moving image
249
            and reference and moving landmarks
250
        """
251

252 30
        def __path_img(col):
253 30
            is_temp = isinstance(item.get(col + self.COL_IMAGE_EXT_TEMP, None), str)
254 30
            if prefer_pproc and is_temp:
255 30
                path = self._absolute_path(item[col + self.COL_IMAGE_EXT_TEMP], destination='expt')
256
            else:
257 30
                path = self._absolute_path(item[col], destination='data')
258 30
            return path
259

260 30
        paths = [__path_img(col) for col in (self.COL_IMAGE_REF, self.COL_IMAGE_MOVE)]
261 30
        paths += [
262
            self._absolute_path(item[col], destination='data') for col in (self.COL_POINTS_REF, self.COL_POINTS_MOVE)
263
        ]
264 30
        return paths
265

266 30
    def _get_path_reg_dir(self, item):
267 30
        return self._absolute_path(str(item[self.COL_REG_DIR]), destination='expt')
268

269 30
    def _load_data(self):
270
        """ loading data, the cover file with all registration pairs """
271 30
        logging.info('-> loading data...')
272
        # loading the csv cover file
273 30
        assert os.path.isfile(self.params['path_table']), \
274
            'path to csv cover is not defined - %s' % self.params['path_table']
275 30
        self._df_overview = pd.read_csv(self.params['path_table'], index_col=None)
276 30
        self._df_overview = _df_drop_unnamed(self._df_overview)
277 30
        assert all(col in self._df_overview.columns for col in self.COVER_COLUMNS), \
278
            'Some required columns are missing in the cover file.'
279

280 30
    def _run(self):
281
        """ perform complete benchmark experiment """
282 30
        logging.info('-> perform set of experiments...')
283

284
        # load existing result of create new entity
285 30
        if os.path.isfile(self._path_csv_regist):
286 30
            logging.info('loading existing csv: "%s"', self._path_csv_regist)
287 30
            self._df_experiments = pd.read_csv(self._path_csv_regist, index_col=None)
288 30
            self._df_experiments = _df_drop_unnamed(self._df_experiments)
289 30
            if 'ID' in self._df_experiments.columns:
290 30
                self._df_experiments.set_index('ID', inplace=True)
291
        else:
292 30
            self._df_experiments = pd.DataFrame()
293

294
        # run the experiment in parallel of single thread
295 30
        self.__execute_method(
296
            self._perform_registration,
297
            self._df_overview,
298
            self._path_csv_regist,
299
            'registration experiments',
300
            aggr_experiments=True,
301
        )
302

303 30
    def __execute_method(self, method, input_table, path_csv=None, desc='', aggr_experiments=False, nb_workers=None):
304
        """ execute a method in sequence or parallel
305

306
        :param func method: used method
307
        :param DF input_table: iterate over table
308
        :param str path_csv: path to the output temporal csv
309
        :param str desc: name of the running process
310
        :param bool aggr_experiments: append output to experiment DF
311
        :param int|None nb_workers: number of jobs, by default using class setting
312
        :return:
313
        """
314
        # setting the temporal split
315 30
        self._main_thread = False
316
        # run the experiment in parallel of single thread
317 30
        nb_workers = self.nb_workers if nb_workers is None else nb_workers
318 30
        iter_table = ((idx, dict(row)) for idx, row, in input_table.iterrows())
319 30
        for res in iterate_mproc_map(method, iter_table, nb_workers=nb_workers, desc=desc):
320 30
            if res is not None and aggr_experiments:
321 30
                self._df_experiments = self._df_experiments.append(res, ignore_index=True)
322 30
                self.__export_df_experiments(path_csv)
323 30
        self._main_thread = True
324

325 30
    def __export_df_experiments(self, path_csv=None):
326
        """ export the DataFrame with registration results
327

328
        :param str | None path_csv: path to output CSV file
329
        """
330 30
        if path_csv is not None:
331 30
            if 'ID' in self._df_experiments.columns:
332 30
                self._df_experiments.set_index('ID').to_csv(path_csv)
333
            else:
334 30
                self._df_experiments.to_csv(path_csv, index=None)
335

336 30
    def __check_exist_regist(self, idx, path_dir_reg):
337
        """ check whether the particular experiment already exists and have results
338

339
        if the folder with experiment already exist and it is also part
340
        of the loaded finished experiments, sometimes the oder may mean
341
        failed experiment
342

343
        :param int idx: index of particular
344
        :param str path_dir_reg:
345
        :return bool:
346
        """
347 30
        b_df_col = ('ID' in self._df_experiments.columns and idx in self._df_experiments['ID'])
348 30
        b_df_idx = idx in self._df_experiments.index
349 30
        check = os.path.exists(path_dir_reg) and (b_df_col or b_df_idx)
350 30
        if check:
351 0
            logging.warning('particular registration experiment already exists: "%r"', idx)
352 30
        return check
353

354 30
    def __images_preprocessing(self, item):
355
        """ create some pre-process images, convert to gray scale and histogram matching
356

357
        :param dict item: the input record
358
        :return dict: updated item with optionally added pre-process images
359
        """
360 30
        path_dir = self._get_path_reg_dir(item)
361

362 30
        def __path_img(path_img, pproc):
363 30
            img_name, img_ext = os.path.splitext(os.path.basename(path_img))
364 30
            return os.path.join(path_dir, img_name + '_' + pproc + img_ext)
365

366 30
        def __save_img(col, path_img_new, img):
367 30
            col_temp = col + self.COL_IMAGE_EXT_TEMP
368 30
            if isinstance(item.get(col_temp, None), str):
369 30
                path_img = self._absolute_path(item[col_temp], destination='expt')
370 30
                os.remove(path_img)
371 30
            save_image(path_img_new, img)
372 30
            return self._relativize_path(path_img_new, destination='path_exp'), col
373

374 30
        def __convert_gray(path_img_col):
375 30
            path_img, col = path_img_col
376 30
            path_img_new = __path_img(path_img, 'gray')
377 30
            __save_img(col, path_img_new, rgb2gray(load_image(path_img)))
378 30
            return self._relativize_path(path_img_new, destination='path_exp'), col
379

380 30
        for pproc in self.params.get('preprocessing', []):
381 30
            path_img_ref, path_img_move, _, _ = self._get_paths(item, prefer_pproc=True)
382 30
            if pproc.startswith('match'):
383 30
                color_space = pproc.split('-')[-1]
384 30
                path_img_new = __path_img(path_img_move, pproc)
385 30
                img = image_histogram_matching(
386
                    load_image(path_img_move),
387
                    load_image(path_img_ref),
388
                    use_color=color_space,
389
                )
390 30
                path_img_new, col = __save_img(self.COL_IMAGE_MOVE, path_img_new, img)
391 30
                item[col + self.COL_IMAGE_EXT_TEMP] = path_img_new
392 30
            elif pproc in ('gray', 'grey'):
393 30
                argv_params = [(path_img_ref, self.COL_IMAGE_REF), (path_img_move, self.COL_IMAGE_MOVE)]
394
                # IDEA: find a way how to convert images in parallel inside mproc pool
395
                #  problem is in calling class method inside the pool which is ot static
396 30
                for path_img, col in iterate_mproc_map(__convert_gray, argv_params, nb_workers=1, desc=None):
397 30
                    item[col + self.COL_IMAGE_EXT_TEMP] = path_img
398
            else:
399 0
                logging.warning('unrecognized pre-processing: %s', pproc)
400 30
        return item
401

402 30
    def __remove_pproc_images(self, item):
403
        """ remove preprocess (temporary) image if they are not also final
404

405
        :param dict item: the input record
406
        :return dict: updated item with optionally removed temp images
407
        """
408
        # clean only if some pre-processing was required
409 30
        if not self.params.get('preprocessing', []):
410 30
            return item
411
        # iterate over both - target and source images
412 30
        for col_in, col_warp in [(self.COL_IMAGE_REF, self.COL_IMAGE_REF_WARP),
413
                                 (self.COL_IMAGE_MOVE, self.COL_IMAGE_MOVE_WARP)]:
414 30
            col_temp = col_in + self.COL_IMAGE_EXT_TEMP
415 30
            is_temp = isinstance(item.get(col_temp, None), str)
416
            # skip if the field is empty
417 30
            if not is_temp:
418 0
                continue
419
            # the warped image is not the same as pre-process image is equal
420 30
            elif item.get(col_warp, None) != item.get(col_temp, None):
421
                # update the path to the pre-process image in experiment folder
422 30
                path_img = self._absolute_path(item[col_temp], destination='expt')
423
                # remove image and from the field
424 30
                os.remove(path_img)
425 30
            del item[col_temp]
426 30
        return item
427

428 30
    def _perform_registration(self, df_row):
429
        """ run single registration experiment with all sub-stages
430

431
        :param tuple(int,dict) df_row: row from iterated table
432
        """
433 30
        idx, row = df_row
434 30
        logging.debug('-> perform single registration #%d...', idx)
435
        # create folder for this particular experiment
436 30
        row['ID'] = idx
437 30
        row[self.COL_REG_DIR] = str(idx)
438 30
        path_dir_reg = self._get_path_reg_dir(row)
439
        # check whether the particular experiment already exists and have result
440 30
        if self.__check_exist_regist(idx, path_dir_reg):
441 0
            return None
442 30
        create_folder(path_dir_reg)
443

444 30
        time_start = time.time()
445
        # do some requested pre-processing if required
446 30
        row = self.__images_preprocessing(row)
447 30
        row[self.COL_TIME_PREPROC] = (time.time() - time_start) / 60.
448 30
        row = self._prepare_img_registration(row)
449
        # if the pre-processing failed, return back None
450 30
        if not row:
451 0
            return None
452

453
        # measure execution time
454 30
        time_start = time.time()
455 30
        row = self._execute_img_registration(row)
456
        # if the experiment failed, return back None
457 30
        if not row:
458 0
            return None
459
        # compute the registration time in minutes
460 30
        row[self.COL_TIME] = (time.time() - time_start) / 60.
461
        # remove some temporary images
462 30
        row = self.__remove_pproc_images(row)
463

464 30
        row = self._parse_regist_results(row)
465
        # if the post-processing failed, return back None
466 30
        if not row:
467 0
            return None
468 30
        row = self._clear_after_registration(row)
469

470 30
        if self.params.get('visual', False):
471 30
            logging.debug('-> visualise results of experiment: %r', idx)
472 30
            self.visualise_registration(
473
                (idx, row),
474
                path_dataset=self.params.get('path_dataset', None),
475
                path_experiment=self.params.get('path_exp', None),
476
            )
477

478 30
        return row
479

480 30
    def _evaluate(self):
481
        """ evaluate complete benchmark experiment """
482 30
        logging.info('-> evaluate experiment...')
483
        # load _df_experiments and compute stat
484 30
        _compute_landmarks_statistic = partial(
485
            self.compute_registration_statistic,
486
            df_experiments=self._df_experiments,
487
            path_dataset=self.params.get('path_dataset', None),
488
            path_experiment=self.params.get('path_exp', None),
489
        )
490 30
        self.__execute_method(_compute_landmarks_statistic, self._df_experiments, desc='compute TRE', nb_workers=1)
491

492 30
    def _summarise(self):
493
        """ summarise benchmark experiment """
494
        # export stat to csv
495 30
        if self._df_experiments.empty:
496 30
            logging.warning('no experimental results were collected')
497 30
            return
498 30
        self.__export_df_experiments(self._path_csv_regist)
499
        # export simple stat to txt
500 30
        export_summary_results(self._df_experiments, self.params['path_exp'], self.params)
501

502 30
    @classmethod
503 6
    def _prepare_img_registration(cls, item):
504
        """ prepare the experiment folder if it is required,
505
        eq. copy some extra files
506

507
        :param dict item: dictionary with regist. params
508
        :return dict: the same or updated registration info
509
        """
510 30
        logging.debug('.. no preparing before registration experiment')
511 30
        return item
512

513 30
    def _execute_img_registration(self, item):
514
        """ execute the image registration itself
515

516
        :param dict item: record
517
        :return dict: record
518
        """
519 30
        logging.debug('.. execute image registration as command line')
520 30
        path_dir_reg = self._get_path_reg_dir(item)
521

522 30
        commands = self._generate_regist_command(item)
523
        # in case it is just one command
524 30
        if not isinstance(commands, (list, tuple)):
525 0
            commands = [commands]
526

527 30
        path_log = os.path.join(path_dir_reg, self.NAME_LOG_REGISTRATION)
528
        # TODO, add lock to single thread, create pool with possible thread ids
529
        # (USE taskset [native], numactl [need install])
530 30
        if not isinstance(commands, (list, tuple)):
531 0
            commands = [commands]
532
        # measure execution time
533 30
        cmd_result = exec_commands(commands, path_log, timeout=self.EXECUTE_TIMEOUT)
534
        # if the experiment failed, return back None
535 30
        if not cmd_result:
536 0
            item = None
537 30
        return item
538

539 30
    def _generate_regist_command(self, item):
540
        """ generate the registration command(s)
541

542
        :param dict item: dictionary with registration params
543
        :return str|list(str): the execution commands
544
        """
545 30
        logging.debug('.. simulate registration: copy the target image and landmarks, simulate ideal case')
546 30
        path_im_ref, _, _, path_lnds_move = self._get_paths(item)
547 30
        path_reg_dir = self._get_path_reg_dir(item)
548 30
        name_img = os.path.basename(item[self.COL_IMAGE_MOVE])
549 30
        cmd_img = 'cp %s %s' % (path_im_ref, os.path.join(path_reg_dir, name_img))
550 30
        name_lnds = os.path.basename(item[self.COL_POINTS_MOVE])
551 30
        cmd_lnds = 'cp %s %s' % (path_lnds_move, os.path.join(path_reg_dir, name_lnds))
552 30
        commands = [cmd_img, cmd_lnds]
553 30
        return commands
554

555 30
    @classmethod
556 6
    def _extract_warped_image_landmarks(self, item):
557
        """ get registration results - warped registered images and landmarks
558

559
        :param dict item: dictionary with registration params
560
        :return dict: paths to warped images/landmarks
561
        """
562
        # detect image
563 30
        path_img = os.path.join(item[self.COL_REG_DIR], os.path.basename(item[self.COL_IMAGE_MOVE]))
564
        # detect landmarks
565 30
        path_lnd = os.path.join(item[self.COL_REG_DIR], os.path.basename(item[self.COL_POINTS_MOVE]))
566
        # return formatted results
567 30
        return {
568
            self.COL_IMAGE_REF_WARP: None,
569
            self.COL_IMAGE_MOVE_WARP: path_img,
570
            self.COL_POINTS_REF_WARP: path_lnd,
571
            self.COL_POINTS_MOVE_WARP: None,
572
        }
573

574 30
    def _extract_execution_time(self, item):
575
        """ if needed update the execution time
576

577
        :param dict item: dictionary {str: value} with registration params
578
        :return float|None: time in minutes
579
        """
580 30
        _ = self._get_path_reg_dir(item)
581 30
        return None
582

583 30
    def _parse_regist_results(self, item):
584
        """ evaluate rests of the experiment and identity the registered image
585
        and landmarks when the process finished
586

587
        :param dict item: dictionary {str: value} with registration params
588
        :return dict:
589
        """
590
        # Update the registration outputs / paths
591 30
        res_paths = self._extract_warped_image_landmarks(item)
592

593 30
        for col in (k for k in res_paths if res_paths[k] is not None):
594 30
            path = res_paths[col]
595
            # detect image and landmarks
596 30
            path = self._relativize_path(path, 'path_exp')
597 30
            if os.path.isfile(self._absolute_path(path, destination='expt')):
598 30
                item[col] = path
599

600
        # Update the registration time
601 30
        exec_time = self._extract_execution_time(item)
602 30
        if exec_time:
603
            # compute the registration time in minutes
604 30
            item[self.COL_TIME] = exec_time
605

606 30
        return item
607

608 30
    @classmethod
609 6
    def _clear_after_registration(self, item):
610
        """ clean unnecessarily files after the registration
611

612
        :param dict item: dictionary with regist. information
613
        :return dict: the same or updated regist. info
614
        """
615 30
        logging.debug('.. no cleaning after registration experiment')
616 30
        return item
617

618 30
    @staticmethod
619 6
    def extend_parse(arg_parser):
620 0
        return arg_parser
621

622 30
    @classmethod
623 30
    def main(cls, params=None):
624
        """ run the Main of selected experiment
625

626
        :param cls: class of selected benchmark
627
        :param dict params: set of input parameters
628
        """
629 30
        if not params:
630 0
            arg_parser = create_basic_parser(cls.__name__)
631 0
            arg_parser = cls.extend_parse(arg_parser)
632 0
            params = parse_arg_params(arg_parser)
633

634 30
        logging.info('running...')
635 30
        benchmark = cls(params)
636 30
        benchmark.run()
637 30
        path_expt = benchmark.params['path_exp']
638 30
        logging.info('Done.')
639 30
        return params, path_expt
640

641 30
    @classmethod
642 30
    def _image_diag(cls, item, path_img_ref=None):
643
        """ get the image diagonal from several sources
644
            1. diagonal exists in the table
645
            2. image size exist in the table
646
            3. reference image exists
647

648
        :param dict|DF item: one row from the table
649
        :param str path_img_ref: optional path to the reference image
650
        :return float|None: image diagonal
651
        """
652 30
        img_diag = dict(item).get(cls.COL_IMAGE_DIAGONAL, None)
653 30
        if not img_diag and path_img_ref and os.path.isfile(path_img_ref):
654 30
            _, img_diag = image_sizes(path_img_ref)
655 30
        return img_diag
656

657 30
    @classmethod
658 6
    def _load_landmarks(cls, item, path_dataset):
659 30
        path_img_ref, _, path_lnds_ref, path_lnds_move = \
660
            [update_path(item[col], pre_path=path_dataset) for col in cls.COVER_COLUMNS]
661 30
        points_ref = load_landmarks(path_lnds_ref)
662 30
        points_move = load_landmarks(path_lnds_move)
663 30
        return points_ref, points_move, path_img_ref
664

665 30
    @classmethod
666 30
    def compute_registration_statistic(
667
        cls,
668
        idx_row,
669
        df_experiments,
670
        path_dataset=None,
671
        path_experiment=None,
672
        path_reference=None,
673
    ):
674
        """ after successful registration load initial nad estimated landmarks
675
        afterwords compute various statistic for init, and final alignment
676

677
        :param tuple(int,dict) df_row: row from iterated table
678
        :param DF df_experiments: DataFrame with experiments
679
        :param str|None path_dataset: path to the provided dataset folder
680
        :param str|None path_reference: path to the complete landmark collection folder
681
        :param str|None path_experiment: path to the experiment folder
682
        """
683 30
        idx, row = idx_row
684 30
        row = dict(row)  # convert even series to dictionary
685
        # load common landmarks and image size
686 30
        points_ref, points_move, path_img_ref = cls._load_landmarks(row, path_dataset)
687 30
        img_diag = cls._image_diag(row, path_img_ref)
688 30
        df_experiments.loc[idx, cls.COL_IMAGE_DIAGONAL] = img_diag
689

690
        # compute landmarks statistic
691 30
        cls.compute_registration_accuracy(
692
            df_experiments, idx, points_ref, points_move, 'init', img_diag, wo_affine=False
693
        )
694

695
        # define what is the target and init state according to the experiment results
696 30
        use_move_warp = isinstance(row.get(cls.COL_POINTS_MOVE_WARP, None), str)
697 30
        if use_move_warp:
698 30
            points_init, points_target = points_move, points_ref
699 30
            col_source, col_target = cls.COL_POINTS_MOVE, cls.COL_POINTS_REF
700 30
            col_lnds_warp = cls.COL_POINTS_MOVE_WARP
701
        else:
702 30
            points_init, points_target = points_ref, points_move
703 30
            col_lnds_warp = cls.COL_POINTS_REF_WARP
704 30
            col_source, col_target = cls.COL_POINTS_REF, cls.COL_POINTS_MOVE
705

706
        # optional filtering
707 30
        if path_reference:
708 0
            ratio, points_target, _ = \
709
                filter_paired_landmarks(row, path_dataset, path_reference, col_source, col_target)
710 0
            df_experiments.loc[idx, COL_PAIRED_LANDMARKS] = np.round(ratio, 2)
711

712
        # load transformed landmarks
713 30
        if (cls.COL_POINTS_MOVE_WARP not in row) and (cls.COL_POINTS_REF_WARP not in row):
714 0
            logging.error('Statistic: no output landmarks')
715 0
            return
716

717
        # check if there are reference landmarks
718 30
        if points_target is None:
719 0
            logging.warning('Missing landmarks in "%s"', cls.COL_POINTS_REF if use_move_warp else cls.COL_POINTS_MOVE)
720 0
            return
721
        # load warped landmarks
722 30
        path_lnds_warp = update_path(row[col_lnds_warp], pre_path=path_experiment)
723 30
        if path_lnds_warp and os.path.isfile(path_lnds_warp):
724 30
            points_warp = load_landmarks(path_lnds_warp)
725 30
            points_warp = np.nan_to_num(points_warp)
726
        else:
727 0
            logging.warning('Invalid path to the landmarks: "%s" <- "%s"', path_lnds_warp, row[col_lnds_warp])
728 0
            return
729 30
        df_experiments.loc[idx, cls.COL_NB_LANDMARKS_INPUT] = min(len(points_init), len(points_target))
730 30
        df_experiments.loc[idx, cls.COL_NB_LANDMARKS_WARP] = len(points_warp)
731

732
        # compute Affine statistic
733 30
        affine_diff = compute_affine_transf_diff(points_init, points_target, points_warp)
734 30
        for name in affine_diff:
735 30
            df_experiments.loc[idx, name] = affine_diff[name]
736

737
        # compute landmarks statistic
738 30
        cls.compute_registration_accuracy(
739
            df_experiments, idx, points_target, points_warp, 'elastic', img_diag, wo_affine=True
740
        )
741
        # compute landmarks statistic
742 30
        cls.compute_registration_accuracy(
743
            df_experiments, idx, points_target, points_warp, 'target', img_diag, wo_affine=False
744
        )
745 30
        row_ = dict(df_experiments.loc[idx])
746
        # compute the robustness
747 30
        if 'TRE Mean' in row_:
748 30
            df_experiments.loc[idx, cls.COL_ROBUSTNESS] = \
749
                compute_tre_robustness(points_target, points_init, points_warp)
750

751 30
    @classmethod
752 30
    def compute_registration_accuracy(
753
        cls,
754
        df_experiments,
755
        idx,
756
        points1,
757
        points2,
758
        state='',
759
        img_diag=None,
760
        wo_affine=False,
761
    ):
762
        """ compute statistic on two points sets
763

764
        IRE - Initial Registration Error
765
        TRE - Target Registration Error
766

767
        :param DF df_experiments: DataFrame with experiments
768
        :param int idx: index of tha particular record
769
        :param ndarray points1: np.array<nb_points, dim>
770
        :param ndarray points2: np.array<nb_points, dim>
771
        :param str state: whether it was before of after registration
772
        :param float img_diag: target image diagonal
773
        :param bool wo_affine: without affine transform, assume only local/elastic deformation
774
        """
775 30
        if wo_affine and points1 is not None and points2 is not None:
776
            # removing the affine transform and assume only local/elastic deformation
777 30
            _, _, points1, _ = estimate_affine_transform(points1, points2)
778

779 30
        _, stats = compute_target_regist_error_statistic(points1, points2)
780 30
        if img_diag is not None:
781 30
            df_experiments.at[idx, cls.COL_IMAGE_DIAGONAL] = img_diag
782
        # update particular idx
783 30
        for n_stat in (n for n in stats if n not in ['overlap points']):
784
            # if it not one of the simplified names
785 30
            if state and state not in ('init', 'final', 'target'):
786 30
                name = 'TRE %s (%s)' % (n_stat, state)
787
            else:
788
                # for initial ise IRE, else TRE
789 30
                name = '%s %s' % ('IRE' if state == 'init' else 'TRE', n_stat)
790 30
            if img_diag is not None:
791 30
                df_experiments.at[idx, 'r%s' % name] = stats[n_stat] / img_diag
792 30
            df_experiments.at[idx, name] = stats[n_stat]
793 30
        for n_stat in ['overlap points']:
794 30
            df_experiments.at[idx, '%s (%s)' % (n_stat, state)] = stats[n_stat]
795

796 30
    @classmethod
797 30
    def _load_warped_image(cls, item, path_experiment=None):
798
        """load the wapted image if it exists
799

800
        :param dict item: row with the experiment
801
        :param str|None path_experiment: path to the experiment folder
802
        :return ndarray:
803
        """
804 30
        name_img = item.get(cls.COL_IMAGE_MOVE_WARP, None)
805 30
        if not isinstance(name_img, str):
806 0
            logging.warning('Missing registered image in "%s"', cls.COL_IMAGE_MOVE_WARP)
807 0
            image_warp = None
808
        else:
809 30
            path_img_warp = update_path(name_img, pre_path=path_experiment)
810 30
            if os.path.isfile(path_img_warp):
811 30
                image_warp = load_image(path_img_warp)
812
            else:
813 0
                logging.warning('Define image is missing: %s', path_img_warp)
814 0
                image_warp = None
815 30
        return image_warp
816

817 30
    @classmethod
818 30
    def _visual_image_move_warp_lnds_move_warp(cls, item, path_dataset=None, path_experiment=None):
819
        """ visualise the case with warped moving image and landmarks
820
        to the reference frame so they are simple to overlap
821

822
        :param dict item: row with the experiment
823
        :param str|None path_dataset: path to the dataset folder
824
        :param str|None path_experiment: path to the experiment folder
825
        :return obj|None:
826
        """
827 30
        assert isinstance(item.get(cls.COL_POINTS_MOVE_WARP, None), str), \
828
            'Missing registered points in "%s"' % cls.COL_POINTS_MOVE_WARP
829 30
        path_points_warp = update_path(item[cls.COL_POINTS_MOVE_WARP], pre_path=path_experiment)
830 30
        if not os.path.isfile(path_points_warp):
831 30
            logging.warning('missing warped landmarks for: %r', dict(item))
832 30
            return
833

834 30
        points_ref, points_move, path_img_ref = cls._load_landmarks(item, path_dataset)
835

836 30
        image_warp = cls._load_warped_image(item, path_experiment)
837 30
        points_warp = load_landmarks(path_points_warp)
838 30
        if not list(points_warp):
839 0
            return
840
        # draw image with landmarks
841 30
        image = draw_image_points(image_warp, points_warp)
842 30
        _path = update_path(item[cls.COL_REG_DIR], pre_path=path_experiment)
843 30
        save_image(os.path.join(_path, cls.NAME_IMAGE_MOVE_WARP_POINTS), image)
844 30
        del image
845

846
        # visualise the landmarks move during registration
847 30
        image_ref = load_image(path_img_ref)
848 30
        fig = draw_images_warped_landmarks(image_ref, image_warp, points_move, points_ref, points_warp)
849 30
        del image_ref, image_warp
850 30
        return fig
851

852 30
    @classmethod
853 30
    def _visual_image_move_warp_lnds_ref_warp(cls, item, path_dataset=None, path_experiment=None):
854
        """ visualise the case with warped reference landmarks to the move frame
855

856
        :param dict item: row with the experiment
857
        :param str|None path_dataset: path to the dataset folder
858
        :param str|None path_experiment: path to the experiment folder
859
        :return obj|None:
860
        """
861 30
        assert isinstance(item.get(cls.COL_POINTS_REF_WARP, None), str), \
862
            'Missing registered points in "%s"' % cls.COL_POINTS_REF_WARP
863 30
        path_points_warp = update_path(item[cls.COL_POINTS_REF_WARP], pre_path=path_experiment)
864 30
        if not os.path.isfile(path_points_warp):
865 30
            logging.warning('missing warped landmarks for: %r', dict(item))
866 30
            return
867

868 30
        points_ref, points_move, path_img_ref = cls._load_landmarks(item, path_dataset)
869

870 30
        points_warp = load_landmarks(path_points_warp)
871 30
        if not list(points_warp):
872 0
            return
873
        # draw image with landmarks
874 30
        image_move = load_image(update_path(item[cls.COL_IMAGE_MOVE], pre_path=path_dataset))
875 30
        image = draw_image_points(image_move, points_warp)
876 30
        _path = update_path(item[cls.COL_REG_DIR], pre_path=path_experiment)
877 30
        save_image(os.path.join(_path, cls.NAME_IMAGE_REF_POINTS_WARP), image)
878 30
        del image
879

880 30
        image_ref = load_image(path_img_ref)
881 30
        image_warp = cls._load_warped_image(item, path_experiment)
882 30
        image = overlap_two_images(image_ref, image_warp)
883 30
        _path = update_path(item[cls.COL_REG_DIR], pre_path=path_experiment)
884 30
        save_image(os.path.join(_path, cls.NAME_IMAGE_REF_WARP), image)
885 30
        del image, image_warp
886

887
        # visualise the landmarks move during registration
888 30
        fig = draw_images_warped_landmarks(image_ref, image_move, points_ref, points_move, points_warp)
889 30
        del image_ref, image_move
890 30
        return fig
891

892 30
    @classmethod
893 30
    def visualise_registration(cls, idx_row, path_dataset=None, path_experiment=None):
894
        """ visualise the registration results according what landmarks were
895
        estimated - in registration or moving frame
896

897
        :param tuple(int,dict) df_row: row from iterated table
898
        :param str path_dataset: path to the dataset folder
899
        :param str path_experiment: path to the experiment folder
900
        """
901 30
        _, row = idx_row
902 30
        row = dict(row)  # convert even series to dictionary
903 30
        fig, path_fig = None, None
904
        # visualise particular experiment by idx
905 30
        if isinstance(row.get(cls.COL_POINTS_MOVE_WARP, None), str):
906 30
            fig = cls._visual_image_move_warp_lnds_move_warp(row, path_dataset, path_experiment)
907 30
        elif isinstance(row.get(cls.COL_POINTS_REF_WARP, None), str):
908 30
            fig = cls._visual_image_move_warp_lnds_ref_warp(row, path_dataset, path_experiment)
909
        else:
910 30
            logging.error('Visualisation: no output image or landmarks')
911

912 30
        if fig is not None:
913 30
            path_fig = os.path.join(
914
                update_path(row[cls.COL_REG_DIR], pre_path=path_experiment), cls.NAME_IMAGE_WARPED_VISUAL
915
            )
916 30
            export_figure(path_fig, fig)
917

918 30
        return path_fig
919

920

921 30
def _df_drop_unnamed(df):
922
    """Drop columns was index without name and was loaded as `Unnamed: 0.`"""
923 30
    df = df[list(filter(lambda c: not c.startswith('Unnamed:'), df.columns))]
924 30
    return df
925

926

927 30
def filter_paired_landmarks(item, path_dataset, path_reference, col_source, col_target):
928
    """ filter all relevant landmarks which were used and copy them to experiment
929

930
    The case is that in certain challenge stage users had provided just a subset
931
     of all image landmarks which could be laos shuffled. The idea is to filter identify
932
     all user used (provided in dataset) landmarks and filter them from temporary
933
     reference dataset.
934

935
    :param dict|Series item: experiment DataFrame
936
    :param str path_dataset: path to provided landmarks
937
    :param str path_reference: path to the complete landmark collection
938
    :param str col_source: column name of landmarks to be transformed
939
    :param str col_target: column name of landmarks to be compared
940
    :return tuple(float,ndarray,ndarray): match ratio, filtered ref and move landmarks
941

942
    >>> p_data = update_path('data-images')
943
    >>> p_csv = os.path.join(p_data, 'pairs-imgs-lnds_histol.csv')
944
    >>> df = pd.read_csv(p_csv)
945
    >>> ratio, lnds_ref, lnds_move = filter_paired_landmarks(dict(df.iloc[0]), p_data, p_data,
946
    ...     ImRegBenchmark.COL_POINTS_MOVE, ImRegBenchmark.COL_POINTS_REF)
947
    >>> ratio
948
    1.0
949
    >>> lnds_ref.shape == lnds_move.shape
950
    True
951
    """
952 30
    path_ref = update_path(item[col_source], pre_path=path_reference)
953 30
    assert os.path.isfile(path_ref), 'missing landmarks: %s' % path_ref
954 30
    path_load = update_path(item[col_source], pre_path=path_dataset)
955 30
    assert os.path.isfile(path_load), 'missing landmarks: %s' % path_load
956 30
    pairs = common_landmarks(load_landmarks(path_ref), load_landmarks(path_load), threshold=1)
957 30
    if not pairs.size:
958 0
        logging.warning('there is not pairing between landmarks or dataset and user reference')
959 0
        return 0., np.empty([0]), np.empty([0])
960

961 30
    pairs = sorted(pairs.tolist(), key=lambda p: p[1])
962 30
    ind_ref = np.asarray(pairs)[:, 0]
963 30
    nb_common = min([
964
        len(load_landmarks(update_path(item[col], pre_path=path_reference))) for col in (col_target, col_source)
965
    ])
966 30
    ind_ref = ind_ref[ind_ref < nb_common]
967

968 30
    path_lnd_ref = update_path(item[col_target], pre_path=path_reference)
969 30
    lnds_filter_ref = load_landmarks(path_lnd_ref)[ind_ref]
970 30
    path_lnd_move = update_path(item[col_source], pre_path=path_reference)
971 30
    lnds_filter_move = load_landmarks(path_lnd_move)[ind_ref]
972

973 30
    ratio_matches = len(ind_ref) / float(nb_common)
974 30
    assert ratio_matches <= 1, 'suspicious ratio for %i paired and %i common landmarks' \
975
                               % (len(pairs), nb_common)
976 30
    return ratio_matches, lnds_filter_ref, lnds_filter_move
977

978

979 30
def export_summary_results(
980
    df_experiments,
981
    path_out,
982
    params=None,
983
    name_txt=ImRegBenchmark.NAME_RESULTS_TXT,
984
    name_csv=ImRegBenchmark.NAME_RESULTS_CSV,
985
):
986
    """ export the summary as CSV and TXT
987

988
    :param DF df_experiments: DataFrame with experiments
989
    :param str path_out: path to the output folder
990
    :param dict|None params: experiment parameters
991
    :param str name_csv: results file name
992
    :param str name_txt: results file name
993

994
    >>> export_summary_results(pd.DataFrame(), '')
995
    """
996 30
    costume_percentiles = np.arange(0., 1., 0.05)
997 30
    if df_experiments.empty:
998 30
        logging.error('No registration results found.')
999 30
        return
1000 30
    if 'ID' in df_experiments.columns:
1001 30
        df_experiments.set_index('ID', inplace=True)
1002 30
    df_summary = df_experiments.describe(percentiles=costume_percentiles).T
1003 30
    df_summary['median'] = df_experiments.median()
1004 30
    nb_missing = np.sum(df_experiments['IRE Mean'].isnull())\
1005
        if 'IRE Mean' in df_experiments.columns else len(df_experiments)
1006 30
    df_summary['missing'] = nb_missing / float(len(df_experiments))
1007 30
    df_summary.sort_index(inplace=True)
1008 30
    path_csv = os.path.join(path_out, name_csv)
1009 30
    logging.debug('exporting CSV summary: %s', path_csv)
1010 30
    df_summary.to_csv(path_csv)
1011

1012 30
    path_txt = os.path.join(path_out, name_txt)
1013 30
    logging.debug('exporting TXT summary: %s', path_txt)
1014 30
    pd.set_option('display.float_format', '{:10,.3f}'.format)
1015 30
    pd.set_option('expand_frame_repr', False)
1016 30
    with open(path_txt, 'w') as fp:
1017 30
        if params:
1018 30
            fp.write(string_dict(params, 'CONFIGURATION:'))
1019 30
        fp.write('\n' * 3 + 'RESULTS:\n')
1020 30
        fp.write('completed registration experiments: %i' % len(df_experiments))
1021 30
        fp.write('\n' * 2)
1022 30
        fp.write(repr(df_summary[['mean', 'std', 'median', 'min', 'max', 'missing', '5%', '25%', '50%', '75%', '95%']]))

Read our documentation on viewing source code .

Loading