#56 Fix measure performance frequency + Add loss log

Open Emanuele Ghelfi EmanueleGhelfi

No flags found

Use flags to group coverage reports by test type, project and/or folders.
Then setup custom commit statuses and notifications for each flag.

e.g., #unittest #integration

#production #enterprise

#frontend #backend

Learn more about Codecov Flags here.


@@ -24,19 +24,20 @@
Loading
24 24
class ClassifierLoss(Executor):
25 25
    r"""Classifier Loss Executor using the classifier model, instantiated with a fn."""
26 26
27 -
    def __init__(self, fn: tf.keras.losses.Loss) -> None:
27 +
    def __init__(self, fn: tf.keras.losses.Loss, name: str = "ClassifierLoss") -> None:
28 28
        r"""
29 29
        Initialize :py:class:`ClassifierLoss`.
30 30
31 31
        Args:
32 32
            fn (:py:class:`tf.keras.losses.Loss`): Classification Loss function, should
33 33
                take as input labels and prediction.
34 +
            name (str): Name of the loss. It will be used for logging in Tensorboard.
34 35
35 36
        Returns:
36 37
            :py:obj:`None`
37 38
38 39
        """
39 -
        super().__init__(fn)
40 +
        super().__init__(fn, name=name)
40 41
41 42
    @Executor.reduce_loss
42 43
    def call(
@@ -69,4 +70,5 @@
Loading
69 70
            lambda: loss,
70 71
            lambda: tf.expand_dims(tf.expand_dims(loss, axis=-1), axis=-1),
71 72
        )
72 -
        return tf.reduce_mean(loss, axis=[1, 2])
73 +
        loss = tf.reduce_mean(loss, axis=[1, 2])
74 +
        return loss

@@ -17,7 +17,7 @@
Loading
17 17
from __future__ import annotations
18 18
19 19
from pathlib import Path
20 -
from typing import TYPE_CHECKING, Any, Callable, Dict, Union
20 +
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Union
21 21
22 22
import tensorflow as tf  # pylint: disable=import-error
23 23
from ashpy.metrics.metric import Metric
@@ -90,6 +90,7 @@
Loading
90 90
    def __init__(
91 91
        self,
92 92
        metric: tf.keras.metrics.Metric,
93 +
        name: Optional[str] = None,
93 94
        model_selection_operator: Callable = None,
94 95
        logdir: Union[Path, str] = Path().cwd() / "log",
95 96
        processing_predictions=None,
@@ -100,6 +101,7 @@
Loading
100 101
        Args:
101 102
            metric (:py:class:`tf.keras.metrics.Metric`): The Keras Metric to use with
102 103
                the classifier (e.g.: Accuracy()).
104 +
            name (str): The name of the metric, if None uses the metric.name property.
103 105
            model_selection_operator (:py:obj:`typing.Callable`): The operation that will
104 106
                be used when `model_selection` is triggered to compare the metrics,
105 107
                used by the `update_state`.
@@ -116,8 +118,10 @@
Loading
116 118
                keyword-arguments. Defaults to {"fn": tf.argmax, "kwargs": {"axis": -1}}.
117 119
118 120
        """
121 +
        if name is None:
122 +
            name = metric.name
119 123
        super().__init__(
120 -
            name=metric.name,
124 +
            name=name,
121 125
            metric=metric,
122 126
            model_selection_operator=model_selection_operator,
123 127
            logdir=logdir,

@@ -173,6 +173,33 @@
Loading
173 173
                raise ValueError(f"{obj_type} should have unique names.")
174 174
            buffer.append(obj.name)
175 175
176 +
    @staticmethod
177 +
    def _check_loss_name_collision(losses: List[ashpy.losses.Executor]):
178 +
        """
179 +
        Check that all losses have unique names.
180 +
181 +
        Args:
182 +
            losses (List[:py:class:`ashpy.losses.Executor`]): List of losses
183 +
                used by the current trainer.
184 +
        Raises:
185 +
            ValueError if there are losses with conflicting names
186 +
187 +
        """
188 +
        names = []
189 +
190 +
        for loss in losses:
191 +
            if loss.name in names:
192 +
                raise ValueError(f"Losses should have unique names.")
193 +
            names.append(loss.name)
194 +
195 +
            if isinstance(loss, ashpy.losses.SumExecutor):
196 +
                loss: ashpy.losses.SumExecutor
197 +
                sublosses_names = [subloss.name for subloss in loss.sublosses]
198 +
                for subloss_name in sublosses_names:
199 +
                    if loss.name in names:
200 +
                        raise ValueError(f"Losses should have unique names.")
201 +
                    names.append(subloss_name)
202 +
176 203
    def _validate_metrics(self):
177 204
        """Check if every metric is an :py:class:`ashpy.metrics.Metric`."""
178 205
        validate_objects(self._metrics, Metric)

@@ -71,6 +71,10 @@
Loading
71 71
                # the same as the input shape
72 72
                print(output.shape)
73 73
74 +
            .. testoutput::
75 +
76 +
                (1, 10, 10, 64)
77 +
74 78
        * Inside a Model:
75 79
76 80
            .. testcode::

@@ -29,12 +29,13 @@
Loading
29 29
class Executor:
30 30
    """Carry a function and the way of executing it. Given a context."""
31 31
32 -
    def __init__(self, fn: tf.keras.losses.Loss = None) -> None:
32 +
    def __init__(self, fn: tf.keras.losses.Loss = None, name: str = "loss") -> None:
33 33
        """
34 34
        Initialize the Executor.
35 35
36 36
        Args:
37 37
            fn (:py:class:`tf.keras.losses.Loss`): A Keras Loss to execute.
38 +
            name (str): Name of the loss. It will be be used for logging in TensorBoard.
38 39
39 40
        Returns:
40 41
            :py:obj:`None`
@@ -48,6 +49,13 @@
Loading
48 49
        self._distribute_strategy = tf.distribute.get_strategy()
49 50
        self._global_batch_size = -1
50 51
        self._weight = lambda _: 1.0
52 +
        self._name = name
53 +
        self._loss_value = 0
54 +
55 +
    @property
56 +
    def name(self) -> str:
57 +
        """Return the name of the loss."""
58 +
        return self._name
51 59
52 60
    @property
53 61
    def weight(self) -> Callable[..., float]:
@@ -153,17 +161,29 @@
Loading
153 161
            :py:obj:`tf.Tensor`: Output Tensor.
154 162
155 163
        """
156 -
        return self._weight(context.global_step) * self.call(context, **kwargs)
164 +
        self._loss_value = self._weight(context.global_step) * self.call(
165 +
            context, **kwargs
166 +
        )
167 +
        return self._loss_value
157 168
158 -
    def __add__(self, other) -> SumExecutor:
169 +
    def log(self, step: tf.Variable):
170 +
        """
171 +
        Log the loss on Tensorboard.
172 +
173 +
        Args:
174 +
            step (tf.Variable): current training step.
175 +
        """
176 +
        tf.summary.scalar(f"ashpy/losses/{self._name}", self._loss_value, step=step)
177 +
178 +
    def __add__(self, other: Union[SumExecutor, Executor]) -> SumExecutor:
159 179
        """Concatenate Executors together into a SumExecutor."""
160 180
        if isinstance(other, SumExecutor):
161 181
            other_executors = other.executors
162 182
        else:
163 183
            other_executors = [other]
164 184
165 185
        all_executors = [self] + other_executors
166 -
        return SumExecutor(all_executors)
186 +
        return SumExecutor(all_executors, name=f"{self._name}+{other._name}")
167 187
168 188
    def __mul__(self, other: Union[Callable[..., float], float, int, tf.Tensor]):
169 189
        """
@@ -185,8 +205,8 @@
Loading
185 205
            self._weight = lambda step: weight(step) * __other(step)
186 206
        return self
187 207
188 -
    def __rmul__(self, other):
189 -
        """See `__mul__` method."""
208 +
    def __rmul__(self, other: Union[SumExecutor, Executor]):
209 +
        """See ``__mul__`` method."""
190 210
        return self * other
191 211
192 212
@@ -198,19 +218,20 @@
Loading
198 218
    then summed together.
199 219
    """
200 220
201 -
    def __init__(self, executors) -> None:
221 +
    def __init__(self, executors: List[Executor], name: str = "LossSum") -> None:
202 222
        """
203 223
        Initialize the SumExecutor.
204 224
205 225
        Args:
206 226
            executors (:py:obj:`list` of [:py:class:`ashpy.executors.Executor`]): Array of
207 227
                :py:obj:`ashpy.executors.Executor` to sum evaluate and sum together.
228 +
            name (str): Name of the loss. It will be used to log in TensorBoard.
208 229
209 230
        Returns:
210 231
            :py:obj:`None`
211 232
212 233
        """
213 -
        super().__init__()
234 +
        super().__init__(name=name)
214 235
        self._executors = executors
215 236
        self._global_batch_size = 1
216 237
@@ -219,6 +240,11 @@
Loading
219 240
        """Return the List of Executors."""
220 241
        return self._executors
221 242
243 +
    @property
244 +
    def sublosses(self) -> List[Executor]:
245 +
        """Return the List of Executors."""
246 +
        return self._executors
247 +
222 248
    @Executor.global_batch_size.setter  # pylint: disable=no-member
223 249
    def global_batch_size(self, global_batch_size: int) -> None:
224 250
        """Set global batch size property."""
@@ -235,8 +261,21 @@
Loading
235 261
            :py:classes:`tf.Tensor`: Output Tensor.
236 262
237 263
        """
238 -
        result = tf.add_n([executor(*args, **kwargs) for executor in self._executors])
239 -
        return result
264 +
        self._loss_value = tf.add_n(
265 +
            [executor(*args, **kwargs) for executor in self._executors]
266 +
        )
267 +
        return self._loss_value
268 +
269 +
    def log(self, step: tf.Variable):
270 +
        """
271 +
        Log the loss + all the sub-losses on Tensorboard.
272 +
273 +
        Args:
274 +
            step: current step
275 +
        """
276 +
        super().log(step)
277 +
        for executor in self._executors:
278 +
            executor.log(step)
240 279
241 280
    def __add__(self, other: Union[SumExecutor, Executor]):
242 281
        """Concatenate Executors together into a SumExecutor."""

Click to load this diff.
Loading diff...

Click to load this diff.
Loading diff...

Click to load this diff.
Loading diff...

Click to load this diff.
Loading diff...

Click to load this diff.
Loading diff...

Learn more Showing 4 files with coverage changes found.

Changes in src/ashpy/trainers/trainer.py
-1
+1
Loading file...
Changes in src/ashpy/losses/gan.py
-1
+1
Loading file...
Changes in src/ashpy/keras/losses.py
-3
+3
Loading file...
Changes in src/ashpy/losses/executor.py
-5
+5
Loading file...

23 Commits

Hiding 4 contexual commits
+20
+39
-19
+4
+4
+2
+2
Hiding 3 contexual commits
+9
+4
+5
+1
+1
Hiding 2 contexual commits
+8
+3
+5
Pull Request Base Commit
Files Coverage
src/ashpy 0.49% 87.17%
Project Totals (56 files) 87.17%
Loading