Showing 38 of 93 files from the diff.
Other files ignored by Codecov
tox.ini has changed.

@@ -0,0 +1,5 @@
Loading
1 +
"""Utility functions for Markov processes."""
2 +
3 +
from ._generate_measurements import generate_artificial_measurements
4 +
5 +
__all__ = ["generate_artificial_measurements"]
0 6
imilarity index 88%
1 7
ename from src/probnum/statespace/generate_samples.py
2 8
ename to src/probnum/randprocs/markov/utils/_generate_measurements.py

@@ -0,0 +1,250 @@
Loading
1 +
"""Integrated Ornstein-Uhlenbeck processes."""
2 +
import warnings
3 +
4 +
import numpy as np
5 +
6 +
try:
7 +
    # cached_property is only available in Python >=3.8
8 +
    from functools import cached_property
9 +
except ImportError:
10 +
    from cached_property import cached_property
11 +
12 +
from probnum import randvars
13 +
from probnum.randprocs.markov import _markov_process, continuous
14 +
from probnum.randprocs.markov.integrator import _integrator, _preconditioner
15 +
16 +
17 +
class IntegratedOrnsteinUhlenbeckProcess(_markov_process.MarkovProcess):
18 +
    r"""Integrated Ornstein-Uhlenbeck process.
19 +
20 +
    Convenience access to :math:`\nu` times integrated (:math:`d` dimensional) Ornstein-Uhlenbeck processes.
21 +
22 +
    Parameters
23 +
    ----------
24 +
    driftspeed
25 +
        Drift-speed of the underlying OrnsteinUhlenbeck process.
26 +
    initarg
27 +
        Initial time point.
28 +
    num_derivatives
29 +
        Number of modelled derivatives of the integrated process (''order'', ''number of integrations'').
30 +
        Optional. Default is :math:`\nu=1`.
31 +
    wiener_process_dimension
32 +
        Dimension of the underlying Wiener process.
33 +
        Optional. Default is :math:`d=1`.
34 +
        The dimension of the integrated Wiener process itself is :math:`d(\nu + 1)`.
35 +
    initrv
36 +
        Law of the integrated Wiener process at the initial time point.
37 +
        Optional. Default is a :math:`d(\nu + 1)` dimensional standard-normal distribution.
38 +
    diffuse
39 +
        Whether to instantiate a diffuse prior. A diffuse prior has large initial variances.
40 +
        Optional. Default is `False`.
41 +
        If `True`, and if an initial random variable is not passed, an initial random variable is created,
42 +
        where the initial covariance is of the form :math:`\kappa I_{d(\nu + 1)}`
43 +
        with :math:`\kappa=10^6`.
44 +
        Diffuse priors are used when initial distributions are not known.
45 +
        They are common for filtering-based probabilistic ODE solvers.
46 +
    forward_implementation
47 +
        Implementation of the forward-propagation in the underlying transitions.
48 +
        Optional. Default is `classic`. `sqrt` implementation is more computationally expensive, but also more stable.
49 +
    backward_implementation
50 +
        Implementation of the backward-conditioning in the underlying transitions.
51 +
        Optional. Default is `classic`. `sqrt` implementation is more computationally expensive, but also more stable.
52 +
53 +
    Raises
54 +
    ------
55 +
    Warning
56 +
        If `initrv` is not None and `diffuse` is True.
57 +
58 +
    Examples
59 +
    --------
60 +
    >>> ioup1 = IntegratedOrnsteinUhlenbeckProcess(driftspeed=1., initarg=0.)
61 +
    >>> print(ioup1)
62 +
    <IntegratedOrnsteinUhlenbeckProcess with input_dim=1, output_dim=2, dtype=float64>
63 +
64 +
    >>> ioup2 = IntegratedOrnsteinUhlenbeckProcess(driftspeed=1.,initarg=0., num_derivatives=2)
65 +
    >>> print(ioup2)
66 +
    <IntegratedOrnsteinUhlenbeckProcess with input_dim=1, output_dim=3, dtype=float64>
67 +
68 +
    >>> ioup3 = IntegratedOrnsteinUhlenbeckProcess(driftspeed=1.,initarg=0., wiener_process_dimension=10)
69 +
    >>> print(ioup3)
70 +
    <IntegratedOrnsteinUhlenbeckProcess with input_dim=1, output_dim=20, dtype=float64>
71 +
72 +
    >>> ioup4 = IntegratedOrnsteinUhlenbeckProcess(driftspeed=1.,initarg=0., num_derivatives=4, wiener_process_dimension=1)
73 +
    >>> print(ioup4)
74 +
    <IntegratedOrnsteinUhlenbeckProcess with input_dim=1, output_dim=5, dtype=float64>
75 +
    """
76 +
77 +
    def __init__(
78 +
        self,
79 +
        driftspeed,
80 +
        initarg,
81 +
        num_derivatives=1,
82 +
        wiener_process_dimension=1,
83 +
        initrv=None,
84 +
        diffuse=False,
85 +
        forward_implementation="classic",
86 +
        backward_implementation="classic",
87 +
    ):
88 +
        ioup_transition = IntegratedOrnsteinUhlenbeckTransition(
89 +
            num_derivatives=num_derivatives,
90 +
            wiener_process_dimension=wiener_process_dimension,
91 +
            driftspeed=driftspeed,
92 +
            forward_implementation=forward_implementation,
93 +
            backward_implementation=backward_implementation,
94 +
        )
95 +
        if initrv is not None and diffuse:
96 +
            warnings.warn(
97 +
                "Parameter `diffuse` has no effect, because an `initrv` has been provided."
98 +
            )
99 +
        if initrv is None:
100 +
            if diffuse:
101 +
                scale_cholesky = 1e3
102 +
            else:
103 +
                scale_cholesky = 1.0
104 +
            zeros = np.zeros(ioup_transition.dimension)
105 +
            cov_cholesky = scale_cholesky * np.eye(ioup_transition.dimension)
106 +
            initrv = randvars.Normal(
107 +
                mean=zeros, cov=cov_cholesky ** 2, cov_cholesky=cov_cholesky
108 +
            )
109 +
110 +
        super().__init__(transition=ioup_transition, initrv=initrv, initarg=initarg)
111 +
112 +
113 +
class IntegratedOrnsteinUhlenbeckTransition(
114 +
    _integrator.IntegratorTransition, continuous.LTISDE
115 +
):
116 +
    """Integrated Ornstein-Uhlenbeck process in :math:`d` dimensions."""
117 +
118 +
    def __init__(
119 +
        self,
120 +
        num_derivatives: int,
121 +
        wiener_process_dimension: int,
122 +
        driftspeed: float,
123 +
        forward_implementation="classic",
124 +
        backward_implementation="classic",
125 +
    ):
126 +
        self.driftspeed = driftspeed
127 +
128 +
        _integrator.IntegratorTransition.__init__(
129 +
            self,
130 +
            num_derivatives=num_derivatives,
131 +
            wiener_process_dimension=wiener_process_dimension,
132 +
        )
133 +
        continuous.LTISDE.__init__(
134 +
            self,
135 +
            driftmat=self._driftmat,
136 +
            forcevec=self._forcevec,
137 +
            dispmat=self._dispmat,
138 +
            forward_implementation=forward_implementation,
139 +
            backward_implementation=backward_implementation,
140 +
        )
141 +
142 +
    @cached_property
143 +
    def _driftmat(self):
144 +
        driftmat_1d = np.diag(np.ones(self.num_derivatives), 1)
145 +
        driftmat_1d[-1, -1] = -self.driftspeed
146 +
        return np.kron(np.eye(self.wiener_process_dimension), driftmat_1d)
147 +
148 +
    @cached_property
149 +
    def _forcevec(self):
150 +
        force_1d = np.zeros(self.num_derivatives + 1)
151 +
        return np.kron(np.ones(self.wiener_process_dimension), force_1d)
152 +
153 +
    @cached_property
154 +
    def _dispmat(self):
155 +
        dispmat_1d = np.zeros(self.num_derivatives + 1)
156 +
        dispmat_1d[-1] = 1.0  # Unit Diffusion
157 +
        return np.kron(np.eye(self.wiener_process_dimension), dispmat_1d).T
158 +
159 +
    def forward_rv(
160 +
        self,
161 +
        rv,
162 +
        t,
163 +
        dt=None,
164 +
        compute_gain=False,
165 +
        _diffusion=1.0,
166 +
        **kwargs,
167 +
    ):
168 +
        if dt is None:
169 +
            raise ValueError(
170 +
                "Continuous-time transitions require a time-increment ``dt``."
171 +
            )
172 +
173 +
        # Fetch things into preconditioned space
174 +
        rv = _preconditioner.apply_precon(self.precon.inverse(dt), rv)
175 +
176 +
        # Apply preconditioning to system matrices
177 +
        self.driftmat = self.precon.inverse(dt) @ self.driftmat @ self.precon(dt)
178 +
        self.forcevec = self.precon.inverse(dt) @ self.forcevec
179 +
        self.dispmat = self.precon.inverse(dt) @ self.dispmat
180 +
181 +
        # Discretise and propagate
182 +
        discretised_model = self.discretise(dt=dt)
183 +
        rv, info = discretised_model.forward_rv(
184 +
            rv, t, compute_gain=compute_gain, _diffusion=_diffusion
185 +
        )
186 +
187 +
        # Undo preconditioning and return
188 +
        rv = _preconditioner.apply_precon(self.precon(dt), rv)
189 +
        info["crosscov"] = self.precon(dt) @ info["crosscov"] @ self.precon(dt).T
190 +
        if "gain" in info:
191 +
            info["gain"] = self.precon(dt) @ info["gain"] @ self.precon.inverse(dt).T
192 +
193 +
        self.driftmat = self.precon(dt) @ self.driftmat @ self.precon.inverse(dt)
194 +
        self.forcevec = self.precon(dt) @ self.forcevec
195 +
        self.dispmat = self.precon(dt) @ self.dispmat
196 +
197 +
        return rv, info
198 +
199 +
    def backward_rv(
200 +
        self,
201 +
        rv_obtained,
202 +
        rv,
203 +
        rv_forwarded=None,
204 +
        gain=None,
205 +
        t=None,
206 +
        dt=None,
207 +
        _diffusion=1.0,
208 +
        **kwargs,
209 +
    ):
210 +
        if dt is None:
211 +
            raise ValueError(
212 +
                "Continuous-time transitions require a time-increment ``dt``."
213 +
            )
214 +
215 +
        # Fetch things into preconditioned space
216 +
        rv_obtained = _preconditioner.apply_precon(self.precon.inverse(dt), rv_obtained)
217 +
        rv = _preconditioner.apply_precon(self.precon.inverse(dt), rv)
218 +
        rv_forwarded = (
219 +
            _preconditioner.apply_precon(self.precon.inverse(dt), rv_forwarded)
220 +
            if rv_forwarded is not None
221 +
            else None
222 +
        )
223 +
        gain = (
224 +
            self.precon.inverse(dt) @ gain @ self.precon.inverse(dt).T
225 +
            if gain is not None
226 +
            else None
227 +
        )
228 +
229 +
        # Apply preconditioning to system matrices
230 +
        self.driftmat = self.precon.inverse(dt) @ self.driftmat @ self.precon(dt)
231 +
        self.forcevec = self.precon.inverse(dt) @ self.forcevec
232 +
        self.dispmat = self.precon.inverse(dt) @ self.dispmat
233 +
234 +
        # Discretise and propagate
235 +
        discretised_model = self.discretise(dt=dt)
236 +
        rv, info = discretised_model.backward_rv(
237 +
            rv_obtained=rv_obtained,
238 +
            rv=rv,
239 +
            rv_forwarded=rv_forwarded,
240 +
            gain=gain,
241 +
            t=t,
242 +
            _diffusion=_diffusion,
243 +
        )
244 +
245 +
        # Undo preconditioning and return
246 +
        rv = _preconditioner.apply_precon(self.precon(dt), rv)
247 +
        self.driftmat = self.precon(dt) @ self.driftmat @ self.precon.inverse(dt)
248 +
        self.forcevec = self.precon(dt) @ self.forcevec
249 +
        self.dispmat = self.precon(dt) @ self.dispmat
250 +
        return rv, info

@@ -0,0 +1,5 @@
Loading
1 +
"""Utility functions for integrators."""
2 +
3 +
from ._convert import convert_coordwise_to_derivwise, convert_derivwise_to_coordwise
4 +
5 +
__all__ = ["convert_derivwise_to_coordwise", "convert_coordwise_to_derivwise"]

@@ -0,0 +1,95 @@
Loading
1 +
"""Coordinate changes in state space models."""
2 +
3 +
import abc
4 +
5 +
try:
6 +
    # cached_property is only available in Python >=3.8
7 +
    from functools import cached_property
8 +
except ImportError:
9 +
    from cached_property import cached_property
10 +
11 +
import numpy as np
12 +
import scipy.special  # for vectorised factorial
13 +
14 +
from probnum import config, linops, randvars
15 +
16 +
17 +
def apply_precon(precon, rv):
18 +
    # public (because it is needed in some integrator implementations),
19 +
    # but not exposed to the 'randprocs' namespace
20 +
    # (i.e. not imported in any __init__.py).
21 +
22 +
    # There is no way of checking whether `rv` has its Cholesky factor computed already or not.
23 +
    # Therefore, since we need to update the Cholesky factor for square-root filtering,
24 +
    # we also update the Cholesky factor for non-square-root algorithms here,
25 +
    # which implies additional cost.
26 +
    # See Issues #319 and #329.
27 +
    # When they are resolved, this function here will hopefully be superfluous.
28 +
29 +
    new_mean = precon @ rv.mean
30 +
    new_cov_cholesky = precon @ rv.cov_cholesky  # precon is diagonal, so this is valid
31 +
    new_cov = new_cov_cholesky @ new_cov_cholesky.T
32 +
33 +
    return randvars.Normal(new_mean, new_cov, cov_cholesky=new_cov_cholesky)
34 +
35 +
36 +
class Preconditioner(abc.ABC):
37 +
    """Coordinate change transformations as preconditioners in state space models.
38 +
39 +
    For some models, this makes the filtering and smoothing steps more numerically
40 +
    stable.
41 +
    """
42 +
43 +
    @abc.abstractmethod
44 +
    def __call__(self, step) -> np.ndarray:
45 +
        # if more than step is needed, add them into the signature in the future
46 +
        raise NotImplementedError
47 +
48 +
    @cached_property
49 +
    def inverse(self) -> "Preconditioner":
50 +
        raise NotImplementedError
51 +
52 +
53 +
class NordsieckLikeCoordinates(Preconditioner):
54 +
    """Nordsieck-like coordinates.
55 +
56 +
    Similar to Nordsieck coordinates (which store the Taylor coefficients instead of the
57 +
    derivatives), but better for ODE filtering and smoothing. Used in integrator-transitions, e.g. in
58 +
    :class:`IntegratedWienerTransition`.
59 +
    """
60 +
61 +
    def __init__(self, powers, scales, dimension):
62 +
        # Clean way of assembling these coordinates cheaply,
63 +
        # because the powers and scales of the inverse
64 +
        # are better read off than inverted
65 +
        self.powers = powers
66 +
        self.scales = scales
67 +
        self.dimension = dimension
68 +
69 +
    @classmethod
70 +
    def from_order(cls, order, dimension):
71 +
        # used to conveniently initialise in the beginning
72 +
        powers = np.arange(order, -1, -1)
73 +
        scales = scipy.special.factorial(powers)
74 +
        return cls(
75 +
            powers=powers + 0.5,
76 +
            scales=scales,
77 +
            dimension=dimension,
78 +
        )
79 +
80 +
    def __call__(self, step):
81 +
        scaling_vector = np.abs(step) ** self.powers / self.scales
82 +
        if config.lazy_linalg:
83 +
            return linops.Kronecker(
84 +
                A=linops.Identity(self.dimension),
85 +
                B=linops.Scaling(factors=scaling_vector),
86 +
            )
87 +
        return np.kron(np.eye(self.dimension), np.diag(scaling_vector))
88 +
89 +
    @cached_property
90 +
    def inverse(self) -> "NordsieckLikeCoordinates":
91 +
        return NordsieckLikeCoordinates(
92 +
            powers=-self.powers,
93 +
            scales=1.0 / self.scales,
94 +
            dimension=self.dimension,
95 +
        )

@@ -8,12 +8,11 @@
Loading
8 8
import scipy.linalg
9 9
10 10
from probnum import config, linops, randvars
11 +
from probnum.randprocs.markov import _transition
12 +
from probnum.randprocs.markov.discrete import _condition_state
11 13
from probnum.typing import FloatArgType, IntArgType
12 14
from probnum.utils.linalg import cholesky_update, tril_to_positive_tril
13 15
14 -
from . import transition as trans
15 -
from .discrete_transition_utils import condition_state_on_rv
16 -
17 16
try:
18 17
    # functools.cached_property is only available in Python >=3.8
19 18
    from functools import cached_property  # pylint: disable=ungrouped-imports
@@ -22,7 +21,7 @@
Loading
22 21
    from cached_property import cached_property
23 22
24 23
25 -
class DiscreteGaussian(trans.Transition):
24 +
class DiscreteGaussian(_transition.Transition):
26 25
    r"""Discrete transitions with additive Gaussian noise.
27 26
28 27
    .. math:: x_{i+1} \sim \mathcal{N}(g(t_i, x_i), S(t_i))
@@ -148,7 +147,10 @@
Loading
148 147
            )
149 148
            gain = info_forwarded["gain"]
150 149
        info = {"rv_forwarded": rv_forwarded}
151 -
        return condition_state_on_rv(rv_obtained, rv_forwarded, rv, gain), info
150 +
        return (
151 +
            _condition_state.condition_state_on_rv(rv_obtained, rv_forwarded, rv, gain),
152 +
            info,
153 +
        )
152 154
153 155
    @lru_cache(maxsize=None)
154 156
    def proc_noise_cov_cholesky_fun(self, t):
@@ -165,7 +167,6 @@
Loading
165 167
        evlvar=0.0,
166 168
    ):
167 169
168 -
        spatialdim = prior.spatialdim
169 170
        h0 = prior.proj2coord(coord=0)
170 171
        h1 = prior.proj2coord(coord=1)
171 172
@@ -173,17 +174,17 @@
Loading
173 174
            return h1 @ x - ode.f(t, h0 @ x)
174 175
175 176
        def diff(t):
176 -
            return evlvar * np.eye(spatialdim)
177 +
            return evlvar * np.eye(ode.dimension)
177 178
178 179
        def diff_cholesky(t):
179 -
            return np.sqrt(evlvar) * np.eye(spatialdim)
180 +
            return np.sqrt(evlvar) * np.eye(ode.dimension)
180 181
181 182
        def jacobian(t, x):
182 183
            return h1 - ode.df(t, h0 @ x) @ h0
183 184
184 185
        return cls(
185 186
            input_dim=prior.dimension,
186 -
            output_dim=prior.spatialdim,
187 +
            output_dim=ode.dimension,
187 188
            state_trans_fun=dyna,
188 189
            jacob_state_trans_fun=jacobian,
189 190
            proc_noise_cov_mat_fun=diff,

@@ -8,6 +8,6 @@
Loading
8 8
9 9
    def __init__(
10 10
        self,
11 -
        prior_process: randprocs.MarkovProcess,
11 +
        prior_process: randprocs.markov.MarkovProcess,
12 12
    ):
13 13
        self.prior_process = prior_process

@@ -7,14 +7,13 @@
Loading
7 7
import scipy.linalg
8 8
9 9
from probnum import randvars
10 +
from probnum.randprocs.markov import _transition, discrete
11 +
from probnum.randprocs.markov.continuous import _mfd
10 12
from probnum.typing import FloatArgType, IntArgType
11 13
from probnum.utils.linalg import tril_to_positive_tril
12 14
13 -
from . import discrete_transition, transition
14 -
from .sde_utils import matrix_fraction_decomposition
15 15
16 -
17 -
class SDE(transition.Transition):
16 +
class SDE(_transition.Transition):
18 17
    """Stochastic differential equation.
19 18
20 19
    .. math:: d x(t) = g(t, x(t)) d t + L(t) d w(t),
@@ -326,8 +325,8 @@
Loading
326 325
    def _setup_vectorized_mde_forward_classic(self, initrv, _diffusion=1.0):
327 326
        """Set up forward moment differential equations (MDEs).
328 327
329 -
        Compute an ODE vector field that represents the MDEs and is
330 -
        compatible with scipy.solve_ivp.
328 +
        Compute an ODE vector field that represents the MDEs and is compatible with
329 +
        scipy.solve_ivp.
331 330
        """
332 331
        dim = len(initrv.mean)
333 332
@@ -451,8 +450,8 @@
Loading
451 450
    def _setup_vectorized_mde_backward(self, finalrv_obtained, _diffusion=1.0):
452 451
        """Set up backward moment differential equations (MDEs).
453 452
454 -
        Compute an ODE vector field that represents the MDEs and is
455 -
        compatible with scipy.solve_ivp.
453 +
        Compute an ODE vector field that represents the MDEs and is compatible with
454 +
        scipy.solve_ivp.
456 455
        """
457 456
        dim = len(finalrv_obtained.mean)
458 457
@@ -591,16 +590,20 @@
Loading
591 590
            eye = np.eye(self.dimension)
592 591
            driftmat = np.block([[self.driftmat, eye], [zeros, zeros]])
593 592
            dispmat = np.concatenate((self.dispmat, np.zeros(self.dispmat.shape)))
594 -
            ah_stack, qh_stack, _ = matrix_fraction_decomposition(driftmat, dispmat, dt)
593 +
            ah_stack, qh_stack, _ = _mfd.matrix_fraction_decomposition(
594 +
                driftmat, dispmat, dt
595 +
            )
595 596
            proj = np.eye(self.dimension, 2 * self.dimension)
596 597
            proj_rev = np.flip(proj, axis=1)
597 598
            ah = proj @ ah_stack @ proj.T
598 599
            sh = proj @ ah_stack @ proj_rev.T @ self.forcevec
599 600
            qh = proj @ qh_stack @ proj.T
600 601
        else:
601 -
            ah, qh, _ = matrix_fraction_decomposition(self.driftmat, self.dispmat, dt)
602 +
            ah, qh, _ = _mfd.matrix_fraction_decomposition(
603 +
                self.driftmat, self.dispmat, dt
604 +
            )
602 605
            sh = np.zeros(len(ah))
603 -
        return discrete_transition.DiscreteLTIGaussian(
606 +
        return discrete.DiscreteLTIGaussian(
604 607
            ah,
605 608
            sh,
606 609
            qh,

@@ -2,7 +2,7 @@
Loading
2 2
3 3
import numpy as np
4 4
5 -
from probnum import filtsmooth, problems, randprocs, randvars, statespace
5 +
from probnum import filtsmooth, problems, randprocs, randvars
6 6
from probnum.typing import FloatArgType, IntArgType
7 7
8 8
from .. import diffeq  # diffeq zoo
@@ -20,7 +20,7 @@
Loading
20 20
    rng: np.random.Generator,
21 21
    measurement_variance: FloatArgType = 0.5,
22 22
    process_diffusion: FloatArgType = 1.0,
23 -
    model_ordint: IntArgType = 1,
23 +
    num_prior_derivatives: IntArgType = 1,
24 24
    timespan: Tuple[FloatArgType, FloatArgType] = (0.0, 20.0),
25 25
    step: FloatArgType = 0.2,
26 26
    initrv: Optional[randvars.RandomVariable] = None,
@@ -64,7 +64,7 @@
Loading
64 64
        Marginal measurement variance.
65 65
    process_diffusion
66 66
        Diffusion constant for the dynamics.
67 -
    model_ordint
67 +
    num_prior_derivatives
68 68
        Order of integration for the dynamics model. Defaults to one, which corresponds
69 69
        to a Wiener velocity model.
70 70
    timespan
@@ -94,11 +94,11 @@
Loading
94 94
95 95
    """
96 96
    state_dim = 2
97 -
    model_dim = state_dim * (model_ordint + 1)
97 +
    model_dim = state_dim * (num_prior_derivatives + 1)
98 98
    measurement_dim = 2
99 -
    dynamics_model = statespace.IBM(
100 -
        ordint=model_ordint,
101 -
        spatialdim=state_dim,
99 +
    dynamics_model = randprocs.markov.integrator.IntegratedWienerTransition(
100 +
        num_derivatives=num_prior_derivatives,
101 +
        wiener_process_dimension=state_dim,
102 102
        forward_implementation=forward_implementation,
103 103
        backward_implementation=backward_implementation,
104 104
    )
@@ -109,7 +109,7 @@
Loading
109 109
    measurement_matrix = np.eye(measurement_dim, model_dim)
110 110
    measurement_cov = measurement_variance * np.eye(measurement_dim)
111 111
    measurement_cov_cholesky = np.sqrt(measurement_variance) * np.eye(measurement_dim)
112 -
    measurement_model = statespace.DiscreteLTIGaussian(
112 +
    measurement_model = randprocs.markov.discrete.DiscreteLTIGaussian(
113 113
        state_trans_mat=measurement_matrix,
114 114
        shift_vec=np.zeros(measurement_dim),
115 115
        proc_noise_cov_mat=measurement_cov,
@@ -128,11 +128,11 @@
Loading
128 128
    # Set up regression problem
129 129
    time_grid = np.arange(*timespan, step=step)
130 130
131 -
    prior_process = randprocs.MarkovProcess(
131 +
    prior_process = randprocs.markov.MarkovProcess(
132 132
        transition=discrete_dynamics_model, initrv=initrv, initarg=time_grid[0]
133 133
    )
134 134
135 -
    states, obs = statespace.generate_artificial_measurements(
135 +
    states, obs = randprocs.markov.utils.generate_artificial_measurements(
136 136
        rng=rng,
137 137
        prior_process=prior_process,
138 138
        measmod=measurement_model,
@@ -211,16 +211,16 @@
Loading
211 211
        Cambridge University Press, 2019
212 212
    """
213 213
214 -
    dynamics_model = statespace.IOUP(
215 -
        ordint=0,
216 -
        spatialdim=1,
214 +
    dynamics_model = randprocs.markov.integrator.IntegratedOrnsteinUhlenbeckTransition(
215 +
        num_derivatives=0,
216 +
        wiener_process_dimension=1,
217 217
        driftspeed=driftspeed,
218 218
        forward_implementation=forward_implementation,
219 219
        backward_implementation=backward_implementation,
220 220
    )
221 221
    dynamics_model.dispmat *= process_diffusion
222 222
223 -
    measurement_model = statespace.DiscreteLTIGaussian(
223 +
    measurement_model = randprocs.markov.discrete.DiscreteLTIGaussian(
224 224
        state_trans_mat=np.eye(1),
225 225
        shift_vec=np.zeros(1),
226 226
        proc_noise_cov_mat=measurement_variance * np.eye(1),
@@ -235,10 +235,10 @@
Loading
235 235
    if time_grid is None:
236 236
        time_grid = np.arange(0.0, 20.0, step=0.2)
237 237
238 -
    prior_process = randprocs.MarkovProcess(
238 +
    prior_process = randprocs.markov.MarkovProcess(
239 239
        transition=dynamics_model, initrv=initrv, initarg=time_grid[0]
240 240
    )
241 -
    states, obs = statespace.generate_artificial_measurements(
241 +
    states, obs = randprocs.markov.utils.generate_artificial_measurements(
242 242
        rng=rng, prior_process=prior_process, measmod=measurement_model, times=time_grid
243 243
    )
244 244
@@ -356,7 +356,7 @@
Loading
356 356
        + np.diag(np.array([step ** 2 / 2]), -1)
357 357
    )
358 358
359 -
    dynamics_model = statespace.DiscreteGaussian(
359 +
    dynamics_model = randprocs.markov.discrete.DiscreteGaussian(
360 360
        input_dim=2,
361 361
        output_dim=2,
362 362
        state_trans_fun=f,
@@ -364,7 +364,7 @@
Loading
364 364
        jacob_state_trans_fun=df,
365 365
    )
366 366
367 -
    measurement_model = statespace.DiscreteGaussian(
367 +
    measurement_model = randprocs.markov.discrete.DiscreteGaussian(
368 368
        input_dim=2,
369 369
        output_dim=1,
370 370
        state_trans_fun=h,
@@ -380,11 +380,11 @@
Loading
380 380
381 381
    if initarg is None:
382 382
        initarg = time_grid[0]
383 -
    prior_process = randprocs.MarkovProcess(
383 +
    prior_process = randprocs.markov.MarkovProcess(
384 384
        transition=dynamics_model, initrv=initrv, initarg=initarg
385 385
    )
386 386
387 -
    states, obs = statespace.generate_artificial_measurements(
387 +
    states, obs = randprocs.markov.utils.generate_artificial_measurements(
388 388
        rng=rng,
389 389
        prior_process=prior_process,
390 390
        measmod=measurement_model,
@@ -462,8 +462,10 @@
Loading
462 462
    if initrv is None:
463 463
        initrv = randvars.Normal(np.zeros(1), 3.0 * np.eye(1))
464 464
465 -
    dynamics_model = statespace.SDE(dimension=1, driftfun=f, dispmatfun=l, jacobfun=df)
466 -
    measurement_model = statespace.DiscreteLTIGaussian(
465 +
    dynamics_model = randprocs.markov.continuous.SDE(
466 +
        dimension=1, driftfun=f, dispmatfun=l, jacobfun=df
467 +
    )
468 +
    measurement_model = randprocs.markov.discrete.DiscreteLTIGaussian(
467 469
        state_trans_mat=np.eye(1),
468 470
        shift_vec=np.zeros(1),
469 471
        proc_noise_cov_mat=measurement_variance * np.eye(1),
@@ -478,14 +480,14 @@
Loading
478 480
        non_linear_model=dynamics_model
479 481
    )
480 482
481 -
    prior_process = randprocs.MarkovProcess(
483 +
    prior_process = randprocs.markov.MarkovProcess(
482 484
        transition=dynamics_model, initrv=initrv, initarg=time_grid[0]
483 485
    )
484 -
    prior_process_with_linearized_dynamics = randprocs.MarkovProcess(
486 +
    prior_process_with_linearized_dynamics = randprocs.markov.MarkovProcess(
485 487
        transition=linearized_dynamics_model, initrv=initrv, initarg=time_grid[0]
486 488
    )
487 489
488 -
    states, obs = statespace.generate_artificial_measurements(
490 +
    states, obs = randprocs.markov.utils.generate_artificial_measurements(
489 491
        rng=rng,
490 492
        prior_process=prior_process_with_linearized_dynamics,
491 493
        measmod=measurement_model,
@@ -565,9 +567,9 @@
Loading
565 567
566 568
    t0, tmax = timespan
567 569
    logistic_ivp = diffeq.logistic(t0=t0, tmax=tmax, y0=y0, params=params)
568 -
    dynamics_model = statespace.IBM(
569 -
        ordint=order,
570 -
        spatialdim=1,
570 +
    dynamics_model = randprocs.markov.integrator.IntegratedWienerTransition(
571 +
        num_derivatives=order,
572 +
        wiener_process_dimension=1,
571 573
        forward_implementation=forward_implementation,
572 574
        backward_implementation=backward_implementation,
573 575
    )
@@ -595,7 +597,7 @@
Loading
595 597
        solution=solution,
596 598
    )
597 599
598 -
    prior_process = randprocs.MarkovProcess(
600 +
    prior_process = randprocs.markov.MarkovProcess(
599 601
        transition=dynamics_model, initrv=initrv, initarg=time_grid[0]
600 602
    )
601 603
    info = dict(

@@ -8,7 +8,9 @@
Loading
8 8
9 9
    def __init__(self, ivp, order):
10 10
        self.ivp = ivp
11 -
        self.order = order  # e.g.: RK45 has order=5, IBM(q) has order=q
11 +
        self.order = (
12 +
            order  # e.g.: RK45 has order=5, IntegratedWienerTransition(nu) has order=nu
13 +
        )
12 14
        self.num_steps = 0
13 15
14 16
    def solve(self, steprule):

@@ -0,0 +1,34 @@
Loading
1 +
"""Transitions for integrated systems (e.g. integrated Wiener processes)."""
2 +
3 +
from . import convert
4 +
from ._integrator import IntegratorTransition
5 +
from ._ioup import (
6 +
    IntegratedOrnsteinUhlenbeckProcess,
7 +
    IntegratedOrnsteinUhlenbeckTransition,
8 +
)
9 +
from ._iwp import IntegratedWienerProcess, IntegratedWienerTransition
10 +
from ._matern import MaternProcess, MaternTransition
11 +
from ._preconditioner import NordsieckLikeCoordinates, Preconditioner
12 +
13 +
__all__ = [
14 +
    "IntegratorTransition",
15 +
    "IntegratedWienerProcess",
16 +
    "IntegratedWienerTransition",
17 +
    "IntegratedOrnsteinUhlenbeckProcess",
18 +
    "IntegratedOrnsteinUhlenbeckTransition",
19 +
    "MaternProcess",
20 +
    "MaternTransition",
21 +
    "Preconditioner",
22 +
    "NordsieckLikeCoordinates",
23 +
]
24 +
25 +
# Set correct module paths. Corrects links and module paths in documentation.
26 +
IntegratorTransition.__module__ = "probnum.randprocs.markov.integrator"
27 +
IntegratedWienerProcess.__module__ = "probnum.randprocs.markov.integrator"
28 +
IntegratedWienerTransition.__module__ = "probnum.randprocs.markov.integrator"
29 +
IntegratedOrnsteinUhlenbeckProcess.__module__ = "probnum.randprocs.markov.integrator"
30 +
IntegratedOrnsteinUhlenbeckTransition.__module__ = "probnum.randprocs.markov.integrator"
31 +
MaternProcess.__module__ = "probnum.randprocs.markov.integrator"
32 +
MaternTransition.__module__ = "probnum.randprocs.markov.integrator"
33 +
Preconditioner.__module__ = "probnum.randprocs.markov.integrator"
34 +
NordsieckLikeCoordinates.__module__ = "probnum.randprocs.markov.integrator"

@@ -0,0 +1,254 @@
Loading
1 +
"""Matern processes."""
2 +
import warnings
3 +
4 +
import numpy as np
5 +
import scipy.special
6 +
7 +
try:
8 +
    # cached_property is only available in Python >=3.8
9 +
    from functools import cached_property
10 +
except ImportError:
11 +
    from cached_property import cached_property
12 +
13 +
from probnum import randvars
14 +
from probnum.randprocs.markov import _markov_process, continuous
15 +
from probnum.randprocs.markov.integrator import _integrator, _preconditioner
16 +
17 +
18 +
class MaternProcess(_markov_process.MarkovProcess):
19 +
    r"""Matern process.
20 +
21 +
    Convenience access to (:math:`d` dimensional) Matern(:math:`\nu`) processes.
22 +
23 +
    Parameters
24 +
    ----------
25 +
    lengthscale
26 +
        Lengthscale of the Matern process.
27 +
    initarg
28 +
        Initial time point.
29 +
    num_derivatives
30 +
        Number of modelled derivatives of the integrated process (''order'', ''number of integrations'').
31 +
        Optional. Default is :math:`\nu=1`.
32 +
    wiener_process_dimension
33 +
        Dimension of the underlying Wiener process.
34 +
        Optional. Default is :math:`d=1`.
35 +
        The dimension of the integrated Wiener process itself is :math:`d(\nu + 1)`.
36 +
    initrv
37 +
        Law of the integrated Wiener process at the initial time point.
38 +
        Optional. Default is a :math:`d(\nu + 1)` dimensional standard-normal distribution.
39 +
    diffuse
40 +
        Whether to instantiate a diffuse prior. A diffuse prior has large initial variances.
41 +
        Optional. Default is `False`.
42 +
        If `True`, and if an initial random variable is not passed, an initial random variable is created,
43 +
        where the initial covariance is of the form :math:`\kappa I_{d(\nu + 1)}`
44 +
        with :math:`\kappa=10^6`.
45 +
        Diffuse priors are used when initial distributions are not known.
46 +
        They are common for filtering-based probabilistic ODE solvers.
47 +
    forward_implementation
48 +
        Implementation of the forward-propagation in the underlying transitions.
49 +
        Optional. Default is `classic`. `sqrt` implementation is more computationally expensive, but also more stable.
50 +
    backward_implementation
51 +
        Implementation of the backward-conditioning in the underlying transitions.
52 +
        Optional. Default is `classic`. `sqrt` implementation is more computationally expensive, but also more stable.
53 +
54 +
    Raises
55 +
    ------
56 +
    Warning
57 +
        If `initrv` is not None and `diffuse` is True.
58 +
59 +
    Examples
60 +
    --------
61 +
    >>> matern1 = MaternProcess(lengthscale=1., initarg=0.)
62 +
    >>> print(matern1)
63 +
    <MaternProcess with input_dim=1, output_dim=2, dtype=float64>
64 +
65 +
    >>> matern2 = MaternProcess(lengthscale=1.,initarg=0., num_derivatives=2)
66 +
    >>> print(matern2)
67 +
    <MaternProcess with input_dim=1, output_dim=3, dtype=float64>
68 +
69 +
    >>> matern3 = MaternProcess(lengthscale=1.,initarg=0., wiener_process_dimension=10)
70 +
    >>> print(matern3)
71 +
    <MaternProcess with input_dim=1, output_dim=20, dtype=float64>
72 +
73 +
    >>> matern4 = MaternProcess(lengthscale=1.,initarg=0., num_derivatives=4, wiener_process_dimension=1)
74 +
    >>> print(matern4)
75 +
    <MaternProcess with input_dim=1, output_dim=5, dtype=float64>
76 +
    """
77 +
78 +
    def __init__(
79 +
        self,
80 +
        lengthscale,
81 +
        initarg,
82 +
        num_derivatives=1,
83 +
        wiener_process_dimension=1,
84 +
        initrv=None,
85 +
        diffuse=False,
86 +
        forward_implementation="classic",
87 +
        backward_implementation="classic",
88 +
    ):
89 +
        matern_transition = MaternTransition(
90 +
            num_derivatives=num_derivatives,
91 +
            wiener_process_dimension=wiener_process_dimension,
92 +
            lengthscale=lengthscale,
93 +
            forward_implementation=forward_implementation,
94 +
            backward_implementation=backward_implementation,
95 +
        )
96 +
        if initrv is not None and diffuse:
97 +
            warnings.warn(
98 +
                "Parameter `diffuse` has no effect, because an `initrv` has been provided."
99 +
            )
100 +
        if initrv is None:
101 +
            if diffuse:
102 +
                scale_cholesky = 1e3
103 +
            else:
104 +
                scale_cholesky = 1.0
105 +
            zeros = np.zeros(matern_transition.dimension)
106 +
            cov_cholesky = scale_cholesky * np.eye(matern_transition.dimension)
107 +
            initrv = randvars.Normal(
108 +
                mean=zeros, cov=cov_cholesky ** 2, cov_cholesky=cov_cholesky
109 +
            )
110 +
111 +
        super().__init__(transition=matern_transition, initrv=initrv, initarg=initarg)
112 +
113 +
114 +
class MaternTransition(_integrator.IntegratorTransition, continuous.LTISDE):
115 +
    """Matern process in :math:`d` dimensions."""
116 +
117 +
    def __init__(
118 +
        self,
119 +
        num_derivatives: int,
120 +
        wiener_process_dimension: int,
121 +
        lengthscale: float,
122 +
        forward_implementation="classic",
123 +
        backward_implementation="classic",
124 +
    ):
125 +
126 +
        self.lengthscale = lengthscale
127 +
128 +
        _integrator.IntegratorTransition.__init__(
129 +
            self,
130 +
            num_derivatives=num_derivatives,
131 +
            wiener_process_dimension=wiener_process_dimension,
132 +
        )
133 +
        continuous.LTISDE.__init__(
134 +
            self,
135 +
            driftmat=self._driftmat,
136 +
            forcevec=self._forcevec,
137 +
            dispmat=self._dispmat,
138 +
            forward_implementation=forward_implementation,
139 +
            backward_implementation=backward_implementation,
140 +
        )
141 +
142 +
    @cached_property
143 +
    def _driftmat(self):
144 +
        driftmat = np.diag(np.ones(self.num_derivatives), 1)
145 +
        nu = self.num_derivatives + 0.5
146 +
        D, lam = self.num_derivatives + 1, np.sqrt(2 * nu) / self.lengthscale
147 +
        driftmat[-1, :] = np.array(
148 +
            [-scipy.special.binom(D, i) * lam ** (D - i) for i in range(D)]
149 +
        )
150 +
        return np.kron(np.eye(self.wiener_process_dimension), driftmat)
151 +
152 +
    @cached_property
153 +
    def _forcevec(self):
154 +
        force_1d = np.zeros(self.num_derivatives + 1)
155 +
        return np.kron(np.ones(self.wiener_process_dimension), force_1d)
156 +
157 +
    @cached_property
158 +
    def _dispmat(self):
159 +
        dispmat_1d = np.zeros(self.num_derivatives + 1)
160 +
        dispmat_1d[-1] = 1.0  # Unit diffusion
161 +
        return np.kron(np.eye(self.wiener_process_dimension), dispmat_1d).T
162 +
163 +
    def forward_rv(
164 +
        self,
165 +
        rv,
166 +
        t,
167 +
        dt=None,
168 +
        compute_gain=False,
169 +
        _diffusion=1.0,
170 +
        **kwargs,
171 +
    ):
172 +
        if dt is None:
173 +
            raise ValueError(
174 +
                "Continuous-time transitions require a time-increment ``dt``."
175 +
            )
176 +
177 +
        # Fetch things into preconditioned space
178 +
        rv = _preconditioner.apply_precon(self.precon.inverse(dt), rv)
179 +
180 +
        # Apply preconditioning to system matrices
181 +
        self.driftmat = self.precon.inverse(dt) @ self.driftmat @ self.precon(dt)
182 +
        self.forcevec = self.precon.inverse(dt) @ self.forcevec
183 +
        self.dispmat = self.precon.inverse(dt) @ self.dispmat
184 +
185 +
        # Discretise and propagate
186 +
        discretised_model = self.discretise(dt=dt)
187 +
        rv, info = discretised_model.forward_rv(
188 +
            rv, t, compute_gain=compute_gain, _diffusion=_diffusion
189 +
        )
190 +
191 +
        # Undo preconditioning and return
192 +
        rv = _preconditioner.apply_precon(self.precon(dt), rv)
193 +
        info["crosscov"] = self.precon(dt) @ info["crosscov"] @ self.precon(dt).T
194 +
        if "gain" in info:
195 +
            info["gain"] = self.precon(dt) @ info["gain"] @ self.precon.inverse(dt).T
196 +
197 +
        self.driftmat = self.precon(dt) @ self.driftmat @ self.precon.inverse(dt)
198 +
        self.forcevec = self.precon(dt) @ self.forcevec
199 +
        self.dispmat = self.precon(dt) @ self.dispmat
200 +
201 +
        return rv, info
202 +
203 +
    def backward_rv(
204 +
        self,
205 +
        rv_obtained,
206 +
        rv,
207 +
        rv_forwarded=None,
208 +
        gain=None,
209 +
        t=None,
210 +
        dt=None,
211 +
        _diffusion=1.0,
212 +
        **kwargs,
213 +
    ):
214 +
        if dt is None:
215 +
            raise ValueError(
216 +
                "Continuous-time transitions require a time-increment ``dt``."
217 +
            )
218 +
219 +
        # Fetch things into preconditioned space
220 +
        rv_obtained = _preconditioner.apply_precon(self.precon.inverse(dt), rv_obtained)
221 +
        rv = _preconditioner.apply_precon(self.precon.inverse(dt), rv)
222 +
        rv_forwarded = (
223 +
            _preconditioner.apply_precon(self.precon.inverse(dt), rv_forwarded)
224 +
            if rv_forwarded is not None
225 +
            else None
226 +
        )
227 +
        gain = (
228 +
            self.precon.inverse(dt) @ gain @ self.precon.inverse(dt).T
229 +
            if gain is not None
230 +
            else None
231 +
        )
232 +
233 +
        # Apply preconditioning to system matrices
234 +
        self.driftmat = self.precon.inverse(dt) @ self.driftmat @ self.precon(dt)
235 +
        self.forcevec = self.precon.inverse(dt) @ self.forcevec
236 +
        self.dispmat = self.precon.inverse(dt) @ self.dispmat
237 +
238 +
        # Discretise and propagate
239 +
        discretised_model = self.discretise(dt=dt)
240 +
        rv, info = discretised_model.backward_rv(
241 +
            rv_obtained=rv_obtained,
242 +
            rv=rv,
243 +
            rv_forwarded=rv_forwarded,
244 +
            gain=gain,
245 +
            t=t,
246 +
            _diffusion=_diffusion,
247 +
        )
248 +
249 +
        # Undo preconditioning and return
250 +
        rv = _preconditioner.apply_precon(self.precon(dt), rv)
251 +
        self.driftmat = self.precon(dt) @ self.driftmat @ self.precon.inverse(dt)
252 +
        self.forcevec = self.precon(dt) @ self.forcevec
253 +
        self.dispmat = self.precon(dt) @ self.dispmat
254 +
        return rv, info

@@ -0,0 +1,20 @@
Loading
1 +
"""Markov processes and probabilistic state-space model routines.
2 +
3 +
This package implements continuous-discrete and discrete-discrete state space models,
4 +
which are the basis for Bayesian filtering and smoothing, but also for probabilistic ODE
5 +
solvers.
6 +
"""
7 +
8 +
from . import continuous, discrete, integrator, utils
9 +
from ._markov_process import MarkovProcess
10 +
from ._transition import Transition
11 +
12 +
# Public classes and functions. Order is reflected in documentation.
13 +
__all__ = [
14 +
    "MarkovProcess",
15 +
    "Transition",
16 +
]
17 +
18 +
# Set correct module paths. Corrects links and module paths in documentation.
19 +
MarkovProcess.__module__ = "probnum.randprocs.markov"
20 +
Transition.__module__ = "probnum.randprocs.markov"
0 21
imilarity index 93%
1 22
ename from src/probnum/randprocs/_markov_process.py
2 23
ename to src/probnum/randprocs/markov/_markov_process.py

@@ -26,7 +26,6 @@
Loading
26 26
    quad,
27 27
    randprocs,
28 28
    randvars,
29 -
    statespace,
30 29
    utils,
31 30
)
32 31
from ._probabilistic_numerical_method import ProbabilisticNumericalMethod

@@ -9,7 +9,7 @@
Loading
9 9
10 10
import numpy as np
11 11
12 -
from probnum import randvars, statespace
12 +
from probnum import randprocs, randvars
13 13
from probnum.filtsmooth.gaussian.approx import _unscentedtransform
14 14
from probnum.typing import FloatArgType
15 15
@@ -38,7 +38,7 @@
Loading
38 38
        return self.ut.sigma_points(at_this_rv)
39 39
40 40
41 -
class ContinuousUKFComponent(UKFComponent, statespace.SDE):
41 +
class ContinuousUKFComponent(UKFComponent, randprocs.markov.continuous.SDE):
42 42
    """Continuous-time unscented Kalman filter transition.
43 43
44 44
    Parameters
@@ -72,7 +72,7 @@
Loading
72 72
            priorpar=priorpar,
73 73
            special_scale=special_scale,
74 74
        )
75 -
        statespace.SDE.__init__(
75 +
        randprocs.markov.continuous.SDE.__init__(
76 76
            self,
77 77
            non_linear_model.dimension,
78 78
            non_linear_model.driftfun,
@@ -146,7 +146,7 @@
Loading
146 146
        raise NotImplementedError("Not available (yet).")
147 147
148 148
149 -
class DiscreteUKFComponent(UKFComponent, statespace.DiscreteGaussian):
149 +
class DiscreteUKFComponent(UKFComponent, randprocs.markov.discrete.DiscreteGaussian):
150 150
    """Discrete unscented Kalman filter transition."""
151 151
152 152
    def __init__(
@@ -164,7 +164,7 @@
Loading
164 164
            special_scale=special_scale,
165 165
        )
166 166
167 -
        statespace.DiscreteGaussian.__init__(
167 +
        randprocs.markov.discrete.DiscreteGaussian.__init__(
168 168
            self,
169 169
            non_linear_model.input_dim,
170 170
            non_linear_model.output_dim,
@@ -262,7 +262,7 @@
Loading
262 262
        prior,
263 263
        evlvar=0.0,
264 264
    ):
265 -
        discrete_model = statespace.DiscreteGaussian.from_ode(
265 +
        discrete_model = randprocs.discrete.DiscreteGaussian.from_ode(
266 266
            ode=ode, prior=prior, evlvar=evlvar
267 267
        )
268 268
        return cls(discrete_model)

@@ -32,10 +32,10 @@
Loading
32 32
    Examples
33 33
    --------
34 34
    >>> import numpy as np
35 -
    >>> from probnum import statespace
35 +
    >>> from probnum import randprocs
36 36
    >>> obs = [11.4123, -15.5123]
37 37
    >>> loc = [0.1, 0.2]
38 -
    >>> model = statespace.DiscreteLTIGaussian(np.ones((1, 1)), np.ones(1), np.ones((1,1)))
38 +
    >>> model = randprocs.markov.discrete.DiscreteLTIGaussian(np.ones((1, 1)), np.ones(1), np.ones((1,1)))
39 39
    >>> measurement_models = [model, model]
40 40
    >>> rp = TimeSeriesRegressionProblem(observations=obs, locations=loc, measurement_models=measurement_models)
41 41
    >>> rp

@@ -5,7 +5,7 @@
Loading
5 5
import numpy as np
6 6
import scipy.linalg
7 7
8 -
from probnum import filtsmooth, problems, randprocs, randvars, statespace, utils
8 +
from probnum import filtsmooth, problems, randprocs, randvars, utils
9 9
from probnum.diffeq import _odesolver
10 10
from probnum.diffeq.odefiltsmooth import _kalman_odesolution, initialization_routines
11 11
@@ -49,16 +49,19 @@
Loading
49 49
    def __init__(
50 50
        self,
51 51
        ivp: problems.InitialValueProblem,
52 -
        prior_process: randprocs.MarkovProcess,
53 -
        measurement_model: statespace.DiscreteGaussian,
52 +
        prior_process: randprocs.markov.MarkovProcess,
53 +
        measurement_model: randprocs.markov.discrete.DiscreteGaussian,
54 54
        with_smoothing: bool,
55 55
        initialization_routine: initialization_routines.InitializationRoutine,
56 -
        diffusion_model: Optional[statespace.Diffusion] = None,
56 +
        diffusion_model: Optional[randprocs.markov.continuous.Diffusion] = None,
57 57
        _reference_coordinates: Optional[int] = 0,
58 58
    ):
59 -
        if not isinstance(prior_process.transition, statespace.Integrator):
59 +
        if not isinstance(
60 +
            prior_process.transition,
61 +
            randprocs.markov.integrator.IntegratorTransition,
62 +
        ):
60 63
            raise ValueError(
61 -
                "Please initialise a Gaussian filter with an Integrator (see `probnum.statespace`)"
64 +
                "Please initialise a Gaussian filter with an Integrator (see `probnum.randprocs.markov.integrator`)"
62 65
            )
63 66
64 67
        self.prior_process = prior_process
@@ -67,11 +70,11 @@
Loading
67 70
        self.sigma_squared_mle = 1.0
68 71
        self.with_smoothing = with_smoothing
69 72
        self.initialization_routine = initialization_routine
70 -
        super().__init__(ivp=ivp, order=prior_process.transition.ordint)
73 +
        super().__init__(ivp=ivp, order=prior_process.transition.num_derivatives)
71 74
72 75
        # Set up the diffusion_model style: constant or piecewise constant.
73 76
        self.diffusion_model = (
74 -
            statespace.PiecewiseConstantDiffusion(t0=self.ivp.t0)
77 +
            randprocs.markov.continuous.PiecewiseConstantDiffusion(t0=self.ivp.t0)
75 78
            if diffusion_model is None
76 79
            else diffusion_model
77 80
        )
@@ -81,7 +84,7 @@
Loading
81 84
        # with a global diffusion estimate. The choices here depend on the
82 85
        # employed diffusion model.
83 86
        is_dynamic = isinstance(
84 -
            self.diffusion_model, statespace.PiecewiseConstantDiffusion
87 +
            self.diffusion_model, randprocs.markov.continuous.PiecewiseConstantDiffusion
85 88
        )
86 89
        self._calibrate_at_each_step = is_dynamic
87 90
        self._calibrate_all_states_post_hoc = not self._calibrate_at_each_step

@@ -0,0 +1,318 @@
Loading
1 +
"""Integrated Brownian motion."""
2 +
3 +
try:
4 +
    # cached_property is only available in Python >=3.8
5 +
    from functools import cached_property
6 +
except ImportError:
7 +
    from cached_property import cached_property
8 +
9 +
import warnings
10 +
11 +
import numpy as np
12 +
import scipy.special
13 +
14 +
from probnum import config, linops, randvars
15 +
from probnum.randprocs.markov import _markov_process, continuous, discrete
16 +
from probnum.randprocs.markov.integrator import _integrator, _preconditioner
17 +
18 +
19 +
class IntegratedWienerProcess(_markov_process.MarkovProcess):
20 +
    r"""Integrated Wiener process.
21 +
22 +
    Convenience access to :math:`\nu` times integrated (:math:`d` dimensional) Wiener processes.
23 +
24 +
    Parameters
25 +
    ----------
26 +
    initarg
27 +
        Initial time point.
28 +
    num_derivatives
29 +
        Number of modelled derivatives of the integrated process (''order'', ''number of integrations'').
30 +
        Optional. Default is :math:`\nu=1`.
31 +
    wiener_process_dimension
32 +
        Dimension of the underlying Wiener process.
33 +
        Optional. Default is :math:`d=1`.
34 +
        The dimension of the integrated Wiener process itself is :math:`d(\nu + 1)`.
35 +
    initrv
36 +
        Law of the integrated Wiener process at the initial time point.
37 +
        Optional. Default is a :math:`d(\nu + 1)` dimensional standard-normal distribution.
38 +
    diffuse
39 +
        Whether to instantiate a diffuse prior. A diffuse prior has large initial variances.
40 +
        Optional. Default is `False`.
41 +
        If `True`, and if an initial random variable is not passed, an initial random variable is created,
42 +
        where the initial covariance is of the form :math:`\kappa I_{d(\nu + 1)}`
43 +
        with :math:`\kappa=10^6`.
44 +
        Diffuse priors are used when initial distributions are not known.
45 +
        They are common for filtering-based probabilistic ODE solvers.
46 +
    forward_implementation
47 +
        Implementation of the forward-propagation in the underlying transitions.
48 +
        Optional. Default is `classic`. `sqrt` implementation is more computationally expensive, but also more stable.
49 +
    backward_implementation
50 +
        Implementation of the backward-conditioning in the underlying transitions.
51 +
        Optional. Default is `classic`. `sqrt` implementation is more computationally expensive, but also more stable.
52 +
53 +
    Raises
54 +
    ------
55 +
    Warning
56 +
        If `initrv` is not None and `diffuse` is True.
57 +
58 +
    Examples
59 +
    --------
60 +
    >>> iwp1 = IntegratedWienerProcess(initarg=0.)
61 +
    >>> print(iwp1)
62 +
    <IntegratedWienerProcess with input_dim=1, output_dim=2, dtype=float64>
63 +
64 +
    >>> iwp2 = IntegratedWienerProcess(initarg=0., num_derivatives=2)
65 +
    >>> print(iwp2)
66 +
    <IntegratedWienerProcess with input_dim=1, output_dim=3, dtype=float64>
67 +
68 +
    >>> iwp3 = IntegratedWienerProcess(initarg=0., wiener_process_dimension=10)
69 +
    >>> print(iwp3)
70 +
    <IntegratedWienerProcess with input_dim=1, output_dim=20, dtype=float64>
71 +
72 +
    >>> iwp4 = IntegratedWienerProcess(initarg=0., num_derivatives=4, wiener_process_dimension=1)
73 +
    >>> print(iwp4)
74 +
    <IntegratedWienerProcess with input_dim=1, output_dim=5, dtype=float64>
75 +
    """
76 +
77 +
    def __init__(
78 +
        self,
79 +
        initarg,
80 +
        num_derivatives=1,
81 +
        wiener_process_dimension=1,
82 +
        initrv=None,
83 +
        diffuse=False,
84 +
        forward_implementation="classic",
85 +
        backward_implementation="classic",
86 +
    ):
87 +
        iwp_transition = IntegratedWienerTransition(
88 +
            num_derivatives=num_derivatives,
89 +
            wiener_process_dimension=wiener_process_dimension,
90 +
            forward_implementation=forward_implementation,
91 +
            backward_implementation=backward_implementation,
92 +
        )
93 +
        if initrv is not None and diffuse:
94 +
            warnings.warn(
95 +
                "Parameter `diffuse` has no effect, because an `initrv` has been provided."
96 +
            )
97 +
        if initrv is None:
98 +
            if diffuse:
99 +
                scale_cholesky = 1e3
100 +
            else:
101 +
                scale_cholesky = 1.0
102 +
            zeros = np.zeros(iwp_transition.dimension)
103 +
            cov_cholesky = scale_cholesky * np.eye(iwp_transition.dimension)
104 +
            initrv = randvars.Normal(
105 +
                mean=zeros, cov=cov_cholesky ** 2, cov_cholesky=cov_cholesky
106 +
            )
107 +
108 +
        super().__init__(transition=iwp_transition, initrv=initrv, initarg=initarg)
109 +
110 +
111 +
class IntegratedWienerTransition(_integrator.IntegratorTransition, continuous.LTISDE):
112 +
    """Integrated Brownian motion in :math:`d` dimensions."""
113 +
114 +
    def __init__(
115 +
        self,
116 +
        num_derivatives,
117 +
        wiener_process_dimension,
118 +
        forward_implementation="classic",
119 +
        backward_implementation="classic",
120 +
    ):
121 +
        # initialise BOTH superclasses' inits.
122 +
        # I don't like it either, but it does the job.
123 +
        _integrator.IntegratorTransition.__init__(
124 +
            self,
125 +
            num_derivatives=num_derivatives,
126 +
            wiener_process_dimension=wiener_process_dimension,
127 +
        )
128 +
        continuous.LTISDE.__init__(
129 +
            self,
130 +
            driftmat=self._driftmat,
131 +
            forcevec=self._forcevec,
132 +
            dispmat=self._dispmat,
133 +
            forward_implementation=forward_implementation,
134 +
            backward_implementation=backward_implementation,
135 +
        )
136 +
137 +
    @cached_property
138 +
    def _driftmat(self):
139 +
        driftmat_1d = np.diag(np.ones(self.num_derivatives), 1)
140 +
        if config.lazy_linalg:
141 +
            return linops.Kronecker(
142 +
                A=linops.Identity(self.wiener_process_dimension),
143 +
                B=linops.Matrix(A=driftmat_1d),
144 +
            )
145 +
        return np.kron(np.eye(self.wiener_process_dimension), driftmat_1d)
146 +
147 +
    @cached_property
148 +
    def _forcevec(self):
149 +
        return np.zeros((self.wiener_process_dimension * (self.num_derivatives + 1)))
150 +
151 +
    @cached_property
152 +
    def _dispmat(self):
153 +
        dispmat_1d = np.zeros(self.num_derivatives + 1)
154 +
        dispmat_1d[-1] = 1.0  # Unit diffusion
155 +
156 +
        if config.lazy_linalg:
157 +
            return linops.Kronecker(
158 +
                A=linops.Identity(self.wiener_process_dimension),
159 +
                B=linops.Matrix(A=dispmat_1d.reshape(-1, 1)),
160 +
            )
161 +
        return np.kron(np.eye(self.wiener_process_dimension), dispmat_1d).T
162 +
163 +
    @cached_property
164 +
    def equivalent_discretisation_preconditioned(self):
165 +
        """Discretised IN THE PRECONDITIONED SPACE.
166 +
167 +
        The preconditioned state transition is the flipped Pascal matrix.
168 +
        The preconditioned process noise covariance is the flipped Hilbert matrix.
169 +
        The shift is always zero.
170 +
171 +
        Reference: https://arxiv.org/abs/2012.10106
172 +
        """
173 +
174 +
        state_transition_1d = np.flip(
175 +
            scipy.linalg.pascal(self.num_derivatives + 1, kind="lower", exact=False)
176 +
        )
177 +
        if config.lazy_linalg:
178 +
            state_transition = linops.Kronecker(
179 +
                A=linops.Identity(self.wiener_process_dimension),
180 +
                B=linops.aslinop(state_transition_1d),
181 +
            )
182 +
        else:
183 +
            state_transition = np.kron(
184 +
                np.eye(self.wiener_process_dimension), state_transition_1d
185 +
            )
186 +
        process_noise_1d = np.flip(scipy.linalg.hilbert(self.num_derivatives + 1))
187 +
        if config.lazy_linalg:
188 +
            process_noise = linops.Kronecker(
189 +
                A=linops.Identity(self.wiener_process_dimension),
190 +
                B=linops.aslinop(process_noise_1d),
191 +
            )
192 +
        else:
193 +
            process_noise = np.kron(
194 +
                np.eye(self.wiener_process_dimension), process_noise_1d
195 +
            )
196 +
        empty_shift = np.zeros(
197 +
            self.wiener_process_dimension * (self.num_derivatives + 1)
198 +
        )
199 +
200 +
        process_noise_cholesky_1d = np.linalg.cholesky(process_noise_1d)
201 +
        if config.lazy_linalg:
202 +
            process_noise_cholesky = linops.Kronecker(
203 +
                A=linops.Identity(self.wiener_process_dimension),
204 +
                B=linops.aslinop(process_noise_cholesky_1d),
205 +
            )
206 +
        else:
207 +
            process_noise_cholesky = np.kron(
208 +
                np.eye(self.wiener_process_dimension), process_noise_cholesky_1d
209 +
            )
210 +
211 +
        return discrete.DiscreteLTIGaussian(
212 +
            state_trans_mat=state_transition,
213 +
            shift_vec=empty_shift,
214 +
            proc_noise_cov_mat=process_noise,
215 +
            proc_noise_cov_cholesky=process_noise_cholesky,
216 +
            forward_implementation=self.forward_implementation,
217 +
            backward_implementation=self.backward_implementation,
218 +
        )
219 +
220 +
    def forward_rv(
221 +
        self,
222 +
        rv,
223 +
        t,
224 +
        dt=None,
225 +
        compute_gain=False,
226 +
        _diffusion=1.0,
227 +
        **kwargs,
228 +
    ):
229 +
        if dt is None:
230 +
            raise ValueError(
231 +
                "Continuous-time transitions require a time-increment ``dt``."
232 +
            )
233 +
234 +
        rv = _preconditioner.apply_precon(self.precon.inverse(dt), rv)
235 +
        rv, info = self.equivalent_discretisation_preconditioned.forward_rv(
236 +
            rv, t, compute_gain=compute_gain, _diffusion=_diffusion
237 +
        )
238 +
239 +
        info["crosscov"] = self.precon(dt) @ info["crosscov"] @ self.precon(dt).T
240 +
        if "gain" in info:
241 +
            info["gain"] = self.precon(dt) @ info["gain"] @ self.precon.inverse(dt).T
242 +
243 +
        return _preconditioner.apply_precon(self.precon(dt), rv), info
244 +
245 +
    def backward_rv(
246 +
        self,
247 +
        rv_obtained,
248 +
        rv,
249 +
        rv_forwarded=None,
250 +
        gain=None,
251 +
        t=None,
252 +
        dt=None,
253 +
        _diffusion=1.0,
254 +
        **kwargs,
255 +
    ):
256 +
        if dt is None:
257 +
            raise ValueError(
258 +
                "Continuous-time transitions require a time-increment ``dt``."
259 +
            )
260 +
261 +
        rv_obtained = _preconditioner.apply_precon(self.precon.inverse(dt), rv_obtained)
262 +
        rv = _preconditioner.apply_precon(self.precon.inverse(dt), rv)
263 +
        rv_forwarded = (
264 +
            _preconditioner.apply_precon(self.precon.inverse(dt), rv_forwarded)
265 +
            if rv_forwarded is not None
266 +
            else None
267 +
        )
268 +
        gain = (
269 +
            self.precon.inverse(dt) @ gain @ self.precon.inverse(dt).T
270 +
            if gain is not None
271 +
            else None
272 +
        )
273 +
274 +
        rv, info = self.equivalent_discretisation_preconditioned.backward_rv(
275 +
            rv_obtained=rv_obtained,
276 +
            rv=rv,
277 +
            rv_forwarded=rv_forwarded,
278 +
            gain=gain,
279 +
            t=t,
280 +
            _diffusion=_diffusion,
281 +
        )
282 +
283 +
        return _preconditioner.apply_precon(self.precon(dt), rv), info
284 +
285 +
    def discretise(self, dt):
286 +
        """Equivalent discretisation of the process.
287 +
288 +
        Overwrites matrix-fraction decomposition in the super-class. Only present for
289 +
        user's convenience and to maintain a clean interface. Not used for forward_rv,
290 +
        etc..
291 +
        """
292 +
        state_trans_mat = (
293 +
            self.precon(dt)
294 +
            @ self.equivalent_discretisation_preconditioned.state_trans_mat
295 +
            @ self.precon.inverse(dt)
296 +
        )
297 +
        proc_noise_cov_mat = (
298 +
            self.precon(dt)
299 +
            @ self.equivalent_discretisation_preconditioned.proc_noise_cov_mat
300 +
            @ self.precon(dt).T
301 +
        )
302 +
        zero_shift = np.zeros(state_trans_mat.shape[0])
303 +
304 +
        # The Cholesky factor of the process noise covariance matrix of the IBM
305 +
        # always exists, even for non-square root implementations.
306 +
        proc_noise_cov_cholesky = (
307 +
            self.precon(dt)
308 +
            @ self.equivalent_discretisation_preconditioned.proc_noise_cov_cholesky
309 +
        )
310 +
311 +
        return discrete.DiscreteLTIGaussian(
312 +
            state_trans_mat=state_trans_mat,
313 +
            shift_vec=zero_shift,
314 +
            proc_noise_cov_mat=proc_noise_cov_mat,
315 +
            proc_noise_cov_cholesky=proc_noise_cov_cholesky,
316 +
            forward_implementation=self.forward_implementation,
317 +
            backward_implementation=self.forward_implementation,
318 +
        )

@@ -7,17 +7,15 @@
Loading
7 7
"""
8 8
9 9
from ._gaussian_process import GaussianProcess
10 -
from ._markov_process import MarkovProcess
11 10
from ._random_process import RandomProcess
12 11
13 12
# Public classes and functions. Order is reflected in documentation.
14 13
__all__ = [
15 14
    "RandomProcess",
16 -
    "MarkovProcess",
17 15
    "GaussianProcess",
18 16
]
17 +
from . import markov
19 18
20 19
# Set correct module paths. Corrects links and module paths in documentation.
21 20
RandomProcess.__module__ = "probnum.randprocs"
22 -
MarkovProcess.__module__ = "probnum.randprocs"
23 21
GaussianProcess.__module__ = "probnum.randprocs"

@@ -0,0 +1,24 @@
Loading
1 +
"""Continous-time transitions and stochastic differential equations."""
2 +
3 +
from ._diffusions import ConstantDiffusion, Diffusion, PiecewiseConstantDiffusion
4 +
from ._mfd import matrix_fraction_decomposition
5 +
from ._sde import LTISDE, SDE, LinearSDE
6 +
7 +
# Public classes and functions. Order is reflected in documentation.
8 +
__all__ = [
9 +
    "SDE",
10 +
    "LinearSDE",
11 +
    "LTISDE",
12 +
    "Diffusion",
13 +
    "ConstantDiffusion",
14 +
    "PiecewiseConstantDiffusion",
15 +
    "matrix_fraction_decomposition",
16 +
]
17 +
18 +
# Set correct module paths. Corrects links and module paths in documentation.
19 +
SDE.__module__ = "probnum.randprocs.markov.continuous"
20 +
LinearSDE.__module__ = "probnum.randprocs.markov.continuous"
21 +
LTISDE.__module__ = "probnum.randprocs.markov.continuous"
22 +
Diffusion.__module__ = "probnum.randprocs.markov.continuous"
23 +
ConstantDiffusion.__module__ = "probnum.randprocs.markov.continuous"
24 +
PiecewiseConstantDiffusion.__module__ = "probnum.randprocs.markov.continuous"
0 25
imilarity index 100%
1 26
ename from src/probnum/statespace/diffusions.py
2 27
ename to src/probnum/randprocs/markov/continuous/_diffusions.py
3 28
imilarity index 100%
4 29
ename from src/probnum/statespace/sde_utils.py
5 30
ename to src/probnum/randprocs/markov/continuous/_mfd.py
6 31
imilarity index 97%
7 32
ename from src/probnum/statespace/sde.py
8 33
ename to src/probnum/randprocs/markov/continuous/_sde.py

@@ -9,7 +9,7 @@
Loading
9 9
import numpy as np
10 10
from scipy import stats
11 11
12 -
from probnum import randvars, statespace, utils
12 +
from probnum import randprocs, randvars, utils
13 13
from probnum.filtsmooth import _timeseriesposterior
14 14
from probnum.filtsmooth.gaussian import approx
15 15
from probnum.typing import (
@@ -20,10 +20,10 @@
Loading
20 20
)
21 21
22 22
GaussMarkovPriorTransitionArgType = Union[
23 -
    statespace.DiscreteLinearGaussian,
23 +
    randprocs.markov.discrete.DiscreteLinearGaussian,
24 24
    approx.DiscreteEKFComponent,
25 25
    approx.DiscreteUKFComponent,
26 -
    statespace.LinearSDE,
26 +
    randprocs.markov.continuous.LinearSDE,
27 27
    approx.ContinuousEKFComponent,
28 28
    approx.ContinuousUKFComponent,
29 29
]

@@ -1,10 +1,10 @@
Loading
1 1
import numpy as np
2 2
3 -
from probnum import statespace
3 +
from probnum import randprocs
4 4
from probnum.filtsmooth.optim import _stoppingcriterion
5 5
6 6
7 -
class IteratedDiscreteComponent(statespace.Transition):
7 +
class IteratedDiscreteComponent(randprocs.markov.Transition):
8 8
    """Iterated updates.
9 9
10 10
    Examples
@@ -12,22 +12,22 @@
Loading
12 12
    >>> from probnum.filtsmooth.gaussian.approx import DiscreteEKFComponent
13 13
    >>> from probnum.filtsmooth.optim import StoppingCriterion
14 14
    >>> from probnum.problems.zoo.diffeq import logistic
15 -
    >>> from probnum.statespace import IBM
15 +
    >>> from probnum.randprocs.markov.integrator import IntegratedWienerProcess
16 16
    >>> from probnum.randvars import Constant
17 17
    >>> import numpy as np
18 18
    >>>
19 19
20 20
    Set up an iterated component.
21 21
22 -
    >>> prior = IBM(ordint=2, spatialdim=1)
23 -
    >>> ekf = DiscreteEKFComponent.from_ode(logistic(t0=0., tmax=1., y0=np.array([0.1])), prior, 0.)
22 +
    >>> iwp = IntegratedWienerProcess(initarg=0., num_derivatives=2, wiener_process_dimension=1)
23 +
    >>> ekf = DiscreteEKFComponent.from_ode(logistic(t0=0., tmax=1., y0=np.array([0.1])), iwp.transition, 0.)
24 24
    >>> comp = IteratedDiscreteComponent(ekf, StoppingCriterion())
25 25
26 26
    Generate some random variables and pseudo observations.
27 27
28 28
    >>> some_array = np.array([0.1, 1., 2.])
29 29
    >>> some_rv = Constant(some_array)
30 -
    >>> rv, _ = prior.forward_realization(some_array , t=0., dt=0.1)
30 +
    >>> rv, _ = iwp.transition.forward_realization(some_array , t=0., dt=0.1)
31 31
    >>> rv_observed, _ =  comp.forward_rv(rv, t=0.2)
32 32
    >>> rv_observed *= 0.01  # mitigate zero data
33 33

@@ -4,7 +4,7 @@
Loading
4 4
5 5
import numpy as np
6 6
7 -
from probnum import problems, randprocs, randvars, statespace
7 +
from probnum import problems, randprocs, randvars
8 8
from probnum.filtsmooth import _bayesfiltsmooth
9 9
from probnum.filtsmooth.particle import (
10 10
    _importance_distributions,
@@ -14,10 +14,12 @@
Loading
14 14
15 15
# Terribly long variable names, but internal only, so no worries.
16 16
ParticleFilterMeasurementModelArgType = Union[
17 -
    statespace.DiscreteGaussian, Iterable[statespace.DiscreteGaussian]
17 +
    randprocs.markov.discrete.DiscreteGaussian,
18 +
    Iterable[randprocs.markov.discrete.DiscreteGaussian],
18 19
]
19 20
ParticleFilterLinearisedMeasurementModelArgType = Union[
20 -
    statespace.DiscreteGaussian, Iterable[statespace.DiscreteGaussian]
21 +
    randprocs.markov.discrete.DiscreteGaussian,
22 +
    Iterable[randprocs.markov.discrete.DiscreteGaussian],
21 23
]
22 24
23 25
@@ -61,7 +63,7 @@
Loading
61 63
62 64
    def __init__(
63 65
        self,
64 -
        prior_process: randprocs.MarkovProcess,
66 +
        prior_process: randprocs.markov.MarkovProcess,
65 67
        importance_distribution: _importance_distributions.ImportanceDistribution,
66 68
        num_particles: IntArgType,
67 69
        rng: np.random.Generator,

@@ -6,7 +6,7 @@
Loading
6 6
7 7
import numpy as np
8 8
9 -
from probnum import randvars, statespace
9 +
from probnum import randprocs, randvars
10 10
11 11
12 12
class EKFComponent(abc.ABC):
@@ -107,14 +107,16 @@
Loading
107 107
        )
108 108
109 109
    @abc.abstractmethod
110 -
    def linearize(self, at_this_rv: randvars.RandomVariable) -> statespace.Transition:
110 +
    def linearize(
111 +
        self, at_this_rv: randvars.RandomVariable
112 +
    ) -> randprocs.markov.Transition:
111 113
        """Linearize the transition and make it tractable."""
112 114
        raise NotImplementedError
113 115
114 116
115 117
# Order of inheritance matters, because forward and backward
116 118
# are defined in EKFComponent, and must not be inherited from SDE.
117 -
class ContinuousEKFComponent(EKFComponent, statespace.SDE):
119 +
class ContinuousEKFComponent(EKFComponent, randprocs.markov.continuous.SDE):
118 120
    """Continuous-time extended Kalman filter transition.
119 121
120 122
    Parameters
@@ -139,7 +141,7 @@
Loading
139 141
        forward_implementation="classic",
140 142
    ) -> None:
141 143
142 -
        statespace.SDE.__init__(
144 +
        randprocs.markov.continuous.SDE.__init__(
143 145
            self,
144 146
            driftfun=non_linear_model.driftfun,
145 147
            dispmatfun=non_linear_model.dispmatfun,
@@ -168,7 +170,7 @@
Loading
168 170
        def driftmatfun(t):
169 171
            return dg(t, x0)
170 172
171 -
        return statespace.LinearSDE(
173 +
        return randprocs.markov.continuous.LinearSDE(
172 174
            dimension=self.non_linear_model.dimension,
173 175
            driftmatfun=driftmatfun,
174 176
            forcevecfun=forcevecfun,
@@ -180,7 +182,7 @@
Loading
180 182
        )
181 183
182 184
183 -
class DiscreteEKFComponent(EKFComponent, statespace.DiscreteGaussian):
185 +
class DiscreteEKFComponent(EKFComponent, randprocs.markov.discrete.DiscreteGaussian):
184 186
    """Discrete extended Kalman filter transition."""
185 187
186 188
    def __init__(
@@ -190,7 +192,7 @@
Loading
190 192
        backward_implementation="classic",
191 193
    ) -> None:
192 194
193 -
        statespace.DiscreteGaussian.__init__(
195 +
        randprocs.markov.discrete.DiscreteGaussian.__init__(
194 196
            self,
195 197
            non_linear_model.input_dim,
196 198
            non_linear_model.output_dim,
@@ -218,7 +220,7 @@
Loading
218 220
        def dynamicsmatfun(t):
219 221
            return dg(t, x0)
220 222
221 -
        return statespace.DiscreteLinearGaussian(
223 +
        return randprocs.markov.discrete.DiscreteLinearGaussian(
222 224
            input_dim=self.non_linear_model.input_dim,
223 225
            output_dim=self.non_linear_model.output_dim,
224 226
            state_trans_mat_fun=dynamicsmatfun,
@@ -241,7 +243,6 @@
Loading
241 243
    ):
242 244
        # code is here, and not in DiscreteGaussian, because we want the option of ek0-Jacobians
243 245
244 -
        spatialdim = prior.spatialdim
245 246
        h0 = prior.proj2coord(coord=0)
246 247
        h1 = prior.proj2coord(coord=1)
247 248
@@ -249,10 +250,10 @@
Loading
249 250
            return h1 @ x - ode.f(t, h0 @ x)
250 251
251 252
        def diff(t):
252 -
            return evlvar * np.eye(spatialdim)
253 +
            return evlvar * np.eye(ode.dimension)
253 254
254 255
        def diff_cholesky(t):
255 -
            return np.sqrt(evlvar) * np.eye(spatialdim)
256 +
            return np.sqrt(evlvar) * np.eye(ode.dimension)
256 257
257 258
        def jaco_ek1(t, x):
258 259
            return h1 - ode.df(t, h0 @ x) @ h0
@@ -266,7 +267,7 @@
Loading
266 267
            jaco = jaco_ek1
267 268
        else:
268 269
            raise TypeError("ek0_or_ek1 must be 0 or 1, resp.")
269 -
        discrete_model = statespace.DiscreteGaussian(
270 +
        discrete_model = randprocs.markov.discrete.DiscreteGaussian(
270 271
            input_dim=prior.dimension,
271 272
            output_dim=ode.dimension,
272 273
            state_trans_fun=dyna,

@@ -5,11 +5,11 @@
Loading
5 5
import numpy as np
6 6
import scipy.stats
7 7
8 -
from probnum import randvars, statespace, utils
8 +
from probnum import randvars, utils
9 +
from probnum.randprocs import _random_process
10 +
from probnum.randprocs.markov import _transition
9 11
from probnum.typing import ShapeArgType
10 12
11 -
from . import _random_process
12 -
13 13
_InputType = Union[np.floating, np.ndarray]
14 14
_OutputType = Union[np.floating, np.ndarray]
15 15
@@ -42,7 +42,7 @@
Loading
42 42
        self,
43 43
        initarg: np.ndarray,
44 44
        initrv: randvars.RandomVariable,
45 -
        transition: statespace.Transition,
45 +
        transition: _transition.Transition,
46 46
    ):
47 47
        self.initarg = initarg
48 48
        self.initrv = initrv
@@ -50,7 +50,7 @@
Loading
50 50
51 51
        super().__init__(
52 52
            input_dim=1 if np.asarray(initarg).ndim == 0 else initarg.shape[0],
53 -
            output_dim=1 if np.asarray(initrv).ndim == 0 else initrv.shape[0],
53 +
            output_dim=1 if initrv.ndim == 0 else initrv.shape[0],
54 54
            dtype=np.dtype(np.float_),
55 55
        )
56 56
57 57
imilarity index 100%
58 58
ename from src/probnum/statespace/transition.py
59 59
ename to src/probnum/randprocs/markov/_transition.py

@@ -10,7 +10,7 @@
Loading
10 10
11 11
import numpy as np
12 12
13 -
from probnum import problems, randprocs, randvars, statespace
13 +
from probnum import problems, randprocs
14 14
from probnum.diffeq import odefiltsmooth, stepsize
15 15
16 16
__all__ = ["probsolve_ivp"]
@@ -93,11 +93,11 @@
Loading
93 93
        If they are specified, this only affects the first step. Optional.
94 94
        Default is None, in which case the first step is chosen as prescribed by :meth:`propose_firststep`.
95 95
    algo_order
96 -
        Order of the algorithm. This amounts to choosing the order of integration (``ordint``) of an integrated Brownian motion prior.
97 -
        For too high orders, process noise covariance matrices become singular. For IBM, this maximum seems to be :`q=11` (using standard ``float64``).
96 +
        Order of the algorithm. This amounts to choosing the number of derivatives of an integrated Wiener process prior.
97 +
        For too high orders, process noise covariance matrices become singular.
98 +
        For integrated Wiener processes, this maximum seems to be ``num_derivatives=11`` (using standard ``float64``).
98 99
        It is possible that higher orders may work for you.
99 -
        The type of prior relates to prior assumptions about the
100 -
        derivative of the solution.
100 +
        The type of prior relates to prior assumptions about the derivative of the solution.
101 101
        The higher the order of the algorithm, the faster the convergence, but also, the higher-dimensional (and thus the costlier) the state space.
102 102
    method : str, optional
103 103
        Which method is to be used. Default is ``EK0`` which is the
@@ -249,26 +249,20 @@
Loading
249 249
        raise ValueError("Diffusion model is not supported.")
250 250
251 251
    choose_diffusion_model = {
252 -
        "constant": statespace.ConstantDiffusion(),
253 -
        "dynamic": statespace.PiecewiseConstantDiffusion(t0=ivp.t0),
252 +
        "constant": randprocs.markov.continuous.ConstantDiffusion(),
253 +
        "dynamic": randprocs.markov.continuous.PiecewiseConstantDiffusion(t0=ivp.t0),
254 254
    }
255 255
    diffusion = choose_diffusion_model[diffusion_model]
256 256
257 257
    # Create solver
258 -
    prior = statespace.IBM(
259 -
        ordint=algo_order,
260 -
        spatialdim=ivp.dimension,
258 +
    prior_process = randprocs.markov.integrator.IntegratedWienerProcess(
259 +
        initarg=ivp.t0,
260 +
        num_derivatives=algo_order,
261 +
        wiener_process_dimension=ivp.dimension,
262 +
        diffuse=True,
261 263
        forward_implementation="sqrt",
262 264
        backward_implementation="sqrt",
263 265
    )
264 -
    initrv = randvars.Normal(
265 -
        mean=np.zeros(prior.dimension),
266 -
        cov=1e6 * np.eye(prior.dimension),
267 -
        cov_cholesky=1e3 * np.eye(prior.dimension),
268 -
    )
269 -
    prior_process = randprocs.MarkovProcess(
270 -
        transition=prior, initrv=initrv, initarg=ivp.t0
271 -
    )
272 266
273 267
    if method.upper() not in ["EK0", "EK1"]:
274 268
        raise ValueError("Method is not supported.")

@@ -0,0 +1,108 @@
Loading
1 +
"""Integrated processes such as the integrated Wiener process or the Matern process."""
2 +
3 +
import numpy as np
4 +
5 +
from probnum.randprocs.markov.integrator import _preconditioner
6 +
7 +
__all__ = ["IntegratorTransition"]
8 +
9 +
10 +
class IntegratorTransition:
11 +
    r"""Transitions for integrator processes.
12 +
13 +
    An integrator is a special kind of random process
14 +
    that models a stack of a state and its first :math:`\nu` time-derivatives.
15 +
    For instances, this includes integrated Wiener processes or Matern processes.
16 +
17 +
    In ProbNum, integrators are always driven by :math:`d` dimensional Wiener processes.
18 +
    We compute the transitions usually in a preconditioned state (Nordsieck-like coordinates).
19 +
    """
20 +
21 +
    def __init__(self, num_derivatives, wiener_process_dimension):
22 +
        self.num_derivatives = num_derivatives
23 +
        self.wiener_process_dimension = wiener_process_dimension
24 +
        self.precon = _preconditioner.NordsieckLikeCoordinates.from_order(
25 +
            num_derivatives, wiener_process_dimension
26 +
        )
27 +
28 +
    def proj2coord(self, coord: int) -> np.ndarray:
29 +
        """Projection matrix to :math:`i` th coordinates.
30 +
31 +
        Computes the matrix
32 +
33 +
        .. math:: H_i = \\left[ I_d \\otimes e_i \\right] P^{-1},
34 +
35 +
        where :math:`e_i` is the :math:`i` th unit vector,
36 +
        that projects to the :math:`i` th coordinate of a vector.
37 +
        If the ODE is multidimensional, it projects to **each** of the
38 +
        :math:`i` th coordinates of each ODE dimension.
39 +
40 +
        Parameters
41 +
        ----------
42 +
        coord : int
43 +
            Coordinate index :math:`i` which to project to.
44 +
            Expected to be in range :math:`0 \\leq i \\leq q + 1`.
45 +
46 +
        Returns
47 +
        -------
48 +
        np.ndarray, shape=(d, d*(q+1))
49 +
            Projection matrix :math:`H_i`.
50 +
        """
51 +
        projvec1d = np.eye(self.num_derivatives + 1)[:, coord]
52 +
        projmat1d = projvec1d.reshape((1, self.num_derivatives + 1))
53 +
        projmat = np.kron(np.eye(self.wiener_process_dimension), projmat1d)
54 +
        return projmat
55 +
56 +
    @property
57 +
    def _derivwise2coordwise_projmat(self) -> np.ndarray:
58 +
        r"""Projection matrix to change the ordering of the state representation in an :class:`Integrator` from coordinate-wise to derivative-wise representation.
59 +
60 +
        A coordinate-wise ordering is
61 +
62 +
        .. math:: (y_1, \dot y_1, \ddot y_1, y_2, \dot y_2, ..., y_d^{(\nu)})
63 +
64 +
        and a derivative-wise ordering is
65 +
66 +
        .. math:: (y_1, y_2, ..., y_d, \dot y_1, \dot y_2, ..., \dot y_d, \ddot y_1, ..., y_d^{(\nu)}).
67 +
68 +
        Default representation in an :class:`Integrator` is coordinate-wise ordering, but sometimes, derivative-wise ordering is more convenient.
69 +
70 +
        See Also
71 +
        --------
72 +
        :attr:`Integrator._convert_coordwise_to_derivwise`
73 +
        :attr:`Integrator._convert_derivwise_to_coordwise`
74 +
75 +
        """
76 +
        dim = (self.num_derivatives + 1) * self.wiener_process_dimension
77 +
        projmat = np.zeros((dim, dim))
78 +
        E = np.eye(dim)
79 +
        for q in range(self.num_derivatives + 1):
80 +
81 +
            projmat[q :: (self.num_derivatives + 1)] = E[
82 +
                q
83 +
                * self.wiener_process_dimension : (q + 1)
84 +
                * self.wiener_process_dimension
85 +
            ]
86 +
        return projmat
87 +
88 +
    @property
89 +
    def _coordwise2derivwise_projmat(self) -> np.ndarray:
90 +
        r"""Projection matrix to change the ordering of the state representation in an :class:`Integrator` from derivative-wise to coordinate-wise representation.
91 +
92 +
        A coordinate-wise ordering is
93 +
94 +
        .. math:: (y_1, \dot y_1, \ddot y_1, y_2, \dot y_2, ..., y_d^{(\nu)})
95 +
96 +
        and a derivative-wise ordering is
97 +
98 +
        .. math:: (y_1, y_2, ..., y_d, \dot y_1, \dot y_2, ..., \dot y_d, \ddot y_1, ..., y_d^{(\nu)}).
99 +
100 +
        Default representation in an :class:`Integrator` is coordinate-wise ordering, but sometimes, derivative-wise ordering is more convenient.
101 +
102 +
        See Also
103 +
        --------
104 +
        :attr:`Integrator._convert_coordwise_to_derivwise`
105 +
        :attr:`Integrator._convert_derivwise_to_coordwise`
106 +
107 +
        """
108 +
        return self._derivwise2coordwise_projmat.T

@@ -6,7 +6,7 @@
Loading
6 6
import numpy as np
7 7
import scipy.integrate as sci
8 8
9 -
from probnum import filtsmooth, problems, randprocs, randvars, statespace
9 +
from probnum import filtsmooth, problems, randprocs, randvars
10 10
from probnum.diffeq.odefiltsmooth.initialization_routines import _initialization_routine
11 11
from probnum.typing import FloatArgType
12 12
@@ -28,9 +28,8 @@
Loading
28 28
29 29
    >>> import numpy as np
30 30
    >>> from probnum.randvars import Normal
31 -
    >>> from probnum.statespace import IBM
32 31
    >>> from probnum.problems.zoo.diffeq import vanderpol
33 -
    >>> from probnum.randprocs import MarkovProcess
32 +
    >>> from probnum.randprocs.markov.integrator import IntegratedWienerProcess
34 33
35 34
    Compute the initial values of the van-der-Pol problem as follows.
36 35
    First, we set up the ODE problem and the prior process.
@@ -38,9 +37,7 @@
Loading
38 37
    >>> ivp = vanderpol()
39 38
    >>> print(ivp.y0)
40 39
    [2. 0.]
41 -
    >>> prior = IBM(ordint=3, spatialdim=2)
42 -
    >>> initrv = Normal(mean=np.zeros(prior.dimension), cov=np.eye(prior.dimension))
43 -
    >>> prior_process = MarkovProcess(transition=prior, initrv=initrv, initarg=ivp.t0)
40 +
    >>> prior_process = IntegratedWienerProcess(initarg=ivp.t0, num_derivatives=3, wiener_process_dimension=2)
44 41
45 42
    Next, we call the initialization routine.
46 43
@@ -62,7 +59,9 @@
Loading
62 59
        super().__init__(is_exact=False, requires_jax=False)
63 60
64 61
    def __call__(
65 -
        self, ivp: problems.InitialValueProblem, prior_process: randprocs.MarkovProcess
62 +
        self,
63 +
        ivp: problems.InitialValueProblem,
64 +
        prior_process: randprocs.markov.MarkovProcess,
66 65
    ) -> randvars.RandomVariable:
67 66
        """Compute the initial distribution.
68 67
@@ -95,7 +94,7 @@
Loading
95 94
        f, y0, t0, df = ivp.f, ivp.y0, ivp.t0, ivp.df
96 95
        y0 = np.asarray(y0)
97 96
        ode_dim = y0.shape[0] if y0.ndim > 0 else 1
98 -
        order = prior_process.transition.ordint
97 +
        order = prior_process.transition.num_derivatives
99 98
100 99
        # order + 1 would suffice in theory, 2*order + 1 is for good measure
101 100
        # (the "+1" is a safety factor for order=1)
@@ -115,7 +114,7 @@
Loading
115 114
        proj_to_y = prior_process.transition.proj2coord(coord=0)
116 115
        zeros_shift = np.zeros(ode_dim)
117 116
        zeros_cov = np.zeros((ode_dim, ode_dim))
118 -
        measmod_scipy = statespace.DiscreteLTIGaussian(
117 +
        measmod_scipy = randprocs.markov.discrete.DiscreteLTIGaussian(
119 118
            proj_to_y,
120 119
            zeros_shift,
121 120
            zeros_cov,
@@ -137,7 +136,7 @@
Loading
137 136
        zeros_cov = np.zeros(
138 137
            (len(projmat_initial_conditions), len(projmat_initial_conditions))
139 138
        )
140 -
        measmod_initcond = statespace.DiscreteLTIGaussian(
139 +
        measmod_initcond = randprocs.markov.discrete.DiscreteLTIGaussian(
141 140
            projmat_initial_conditions,
142 141
            zeros_shift,
143 142
            zeros_cov,

@@ -26,7 +26,9 @@
Loading
26 26
27 27
    @abc.abstractmethod
28 28
    def __call__(
29 -
        self, ivp: problems.InitialValueProblem, prior_process: randprocs.MarkovProcess
29 +
        self,
30 +
        ivp: problems.InitialValueProblem,
31 +
        prior_process: randprocs.markov.MarkovProcess,
30 32
    ) -> randvars.RandomVariable:
31 33
        raise NotImplementedError
32 34

@@ -5,13 +5,13 @@
Loading
5 5
6 6
import numpy as np
7 7
8 -
from probnum import problems, statespace
8 +
from probnum import problems, randprocs
9 9
from probnum.filtsmooth import _bayesfiltsmooth, _timeseriesposterior, optim
10 10
from probnum.filtsmooth.gaussian import _kalmanposterior, approx
11 11
12 12
# Measurement models for a Kalman filter can be all sorts of things:
13 13
KalmanSingleMeasurementModelType = Union[
14 -
    statespace.DiscreteLinearGaussian,
14 +
    randprocs.markov.discrete.DiscreteLinearGaussian,
15 15
    approx.DiscreteEKFComponent,
16 16
    approx.DiscreteUKFComponent,
17 17
]
@@ -27,9 +27,9 @@
Loading
27 27
    ----------
28 28
    prior_process
29 29
        Prior Gauss-Markov process. Usually a :class:`MarkovProcess` with a :class:`Normal` initial random variable,
30 -
        and an LTISDE transition or an Integrator transition, but LinearSDE, ContinuousEKFComponent,
31 -
        or ContinuousUKFComponent are also valid. Describes a random process in :math:`K` dimensions.
32 -
        If the transition is an integrator, `K=spatialdim*(ordint+1)` for some spatialdim and ordint.
30 +
        and an :class:`LTISDE` transition or an :class:`IntegratorTransition`, but :class:`LinearSDE`, :class:`ContinuousEKFComponent`,
31 +
        or :class:`ContinuousUKFComponent` are also valid. Describes a random process in :math:`K` dimensions.
32 +
        If the transition is an integrator, `K=d*(nu+1)` for some d and nu.
33 33
    """
34 34
35 35
    def iterated_filtsmooth(

@@ -4,7 +4,7 @@
Loading
4 4
5 5
import numpy as np
6 6
7 -
from probnum import problems, randprocs, randvars, statespace
7 +
from probnum import problems, randprocs, randvars
8 8
from probnum.diffeq.odefiltsmooth.initialization_routines import _initialization_routine
9 9
10 10
@@ -37,8 +37,7 @@
Loading
37 37
    >>> import numpy as np
38 38
    >>> from probnum.randvars import Normal
39 39
    >>> from probnum.problems.zoo.diffeq import threebody_jax, vanderpol_jax
40 -
    >>> from probnum.statespace import IBM
41 -
    >>> from probnum.randprocs import MarkovProcess
40 +
    >>> from probnum.randprocs.markov.integrator import IntegratedWienerProcess
42 41
43 42
    Compute the initial values of the restricted three-body problem as follows
44 43
@@ -48,9 +47,7 @@
Loading
48 47
49 48
    Construct the prior process.
50 49
51 -
    >>> prior = IBM(ordint=3, spatialdim=4)
52 -
    >>> initrv = Normal(mean=np.zeros(prior.dimension), cov=np.eye(prior.dimension))
53 -
    >>> prior_process = MarkovProcess(transition=prior, initrv=initrv, initarg=ivp.t0)
50 +
    >>> prior_process = IntegratedWienerProcess(initarg=ivp.t0, wiener_process_dimension=4, num_derivatives=3)
54 51
55 52
    Initialize with Taylor-mode autodiff.
56 53
@@ -73,9 +70,7 @@
Loading
73 70
    >>> ivp = vanderpol_jax()
74 71
    >>> print(ivp.y0)
75 72
    [2. 0.]
76 -
    >>> prior = IBM(ordint=3, spatialdim=2)
77 -
    >>> initrv = Normal(mean=np.zeros(prior.dimension), cov=np.eye(prior.dimension))
78 -
    >>> prior_process = MarkovProcess(transition=prior, initrv=initrv, initarg=ivp.t0)
73 +
    >>> prior_process = IntegratedWienerProcess(initarg=ivp.t0, wiener_process_dimension=2, num_derivatives=3)
79 74
80 75
    >>> taylor_init = TaylorModeInitialization()
81 76
    >>> improved_initrv = taylor_init(ivp=ivp, prior_process=prior_process)
@@ -94,7 +89,9 @@
Loading
94 89
        super().__init__(is_exact=True, requires_jax=True)
95 90
96 91
    def __call__(
97 -
        self, ivp: problems.InitialValueProblem, prior_process: randprocs.MarkovProcess
92 +
        self,
93 +
        ivp: problems.InitialValueProblem,
94 +
        prior_process: randprocs.markov.MarkovProcess,
98 95
    ) -> randvars.RandomVariable:
99 96
100 97
        try:
@@ -109,7 +106,7 @@
Loading
109 106
                "dependencies jax and jaxlib. Try installing them via `pip install jax jaxlib`."
110 107
            ) from err
111 108
112 -
        order = prior_process.transition.ordint
109 +
        num_derivatives = prior_process.transition.num_derivatives
113 110
114 111
        dt = jnp.array([1.0])
115 112
@@ -135,11 +132,15 @@
Loading
135 132
            stacked_ode_eval = jnp.concatenate((dx_ravelled, dt))
136 133
            return stacked_ode_eval
137 134
138 -
        def derivs_to_normal_randvar(derivs, ordint):
135 +
        def derivs_to_normal_randvar(derivs, num_derivatives_in_prior):
139 136
            """Finalize the output in terms of creating a suitably sized random
140 137
            variable."""
141 -
            all_derivs = statespace.Integrator._convert_derivwise_to_coordwise(
142 -
                np.asarray(derivs), ordint=ordint, spatialdim=ivp.y0.shape[0]
138 +
            all_derivs = (
139 +
                randprocs.markov.integrator.convert.convert_derivwise_to_coordwise(
140 +
                    np.asarray(derivs),
141 +
                    num_derivatives=num_derivatives_in_prior,
142 +
                    wiener_process_dimension=ivp.y0.shape[0],
143 +
                )
143 144
            )
144 145
145 146
            # Wrap all inputs through np.asarray, because 'Normal's
@@ -153,12 +154,14 @@
Loading
153 154
        extended_state = jnp.concatenate((jnp.ravel(ivp.y0), jnp.array([ivp.t0])))
154 155
        derivs = []
155 156
156 -
        # Corner case 1: order == 0
157 +
        # Corner case 1: num_derivatives == 0
157 158
        derivs.extend(ivp.y0)
158 -
        if order == 0:
159 -
            return derivs_to_normal_randvar(derivs=derivs, ordint=0)
159 +
        if num_derivatives == 0:
160 +
            return derivs_to_normal_randvar(
161 +
                derivs=derivs, num_derivatives_in_prior=num_derivatives
162 +
            )
160 163
161 -
        # Corner case 2: order == 1
164 +
        # Corner case 2: num_derivatives == 1
162 165
        initial_series = (jnp.ones_like(extended_state),)
163 166
        (initial_taylor_coefficient, [*remaining_taylor_coefficents]) = jet(
164 167
            fun=evaluate_ode_for_extended_state,
@@ -166,11 +169,13 @@
Loading
166 169
            series=(initial_series,),
167 170
        )
168 171
        derivs.extend(initial_taylor_coefficient[:-1])
169 -
        if order == 1:
170 -
            return derivs_to_normal_randvar(derivs=derivs, ordint=1)
172 +
        if num_derivatives == 1:
173 +
            return derivs_to_normal_randvar(
174 +
                derivs=derivs, num_derivatives_in_prior=num_derivatives
175 +
            )
171 176
172 177
        # Order > 1
173 -
        for _ in range(1, order):
178 +
        for _ in range(1, num_derivatives):
174 179
            taylor_coefficients = (
175 180
                initial_taylor_coefficient,
176 181
                *remaining_taylor_coefficents,
@@ -181,4 +186,6 @@
Loading
181 186
                series=(taylor_coefficients,),
182 187
            )
183 188
            derivs.extend(remaining_taylor_coefficents[-2][:-1])
184 -
        return derivs_to_normal_randvar(derivs=derivs, ordint=order)
189 +
        return derivs_to_normal_randvar(
190 +
            derivs=derivs, num_derivatives_in_prior=num_derivatives
191 +
        )

@@ -2,13 +2,13 @@
Loading
2 2
3 3
import numpy as np
4 4
5 -
from .transition import Transition
5 +
from probnum.randprocs.markov import _markov_process, _transition
6 6
7 7
8 8
def generate_artificial_measurements(
9 9
    rng: np.random.Generator,
10 -
    prior_process,
11 -
    measmod: Transition,
10 +
    prior_process: _markov_process.MarkovProcess,
11 +
    measmod: _transition.Transition,
12 12
    times: np.ndarray,
13 13
):
14 14
    """Samples true states and observations at pre-determined timesteps "times" for a
15 15
imilarity index 100%
16 16
ename from tests/test_diffeq/test_odefiltsmooth/test_initialize/__init__.py
17 17
ename to tests/test_diffeq/test_odefiltsmooth/test_initialization_routines/__init__.py
18 18
imilarity index 62%
19 19
ename from tests/test_diffeq/test_odefiltsmooth/test_initialize/_interface_initialize_test.py
20 20
ename to tests/test_diffeq/test_odefiltsmooth/test_initialization_routines/_interface_initialize_test.py

@@ -2,7 +2,7 @@
Loading
2 2
3 3
import numpy as np
4 4
5 -
from probnum import problems, randprocs, randvars, statespace
5 +
from probnum import problems, randprocs, randvars
6 6
from probnum.filtsmooth import gaussian
7 7
8 8
__all__ = ["filter_kalman", "smooth_rts"]
@@ -161,16 +161,18 @@
Loading
161 161
def _setup_prior_process(F, L, m0, C0, t0, prior_model):
162 162
    zero_shift_prior = np.zeros(F.shape[0])
163 163
    if prior_model == "discrete":
164 -
        prior = statespace.DiscreteLTIGaussian(
164 +
        prior = randprocs.markov.discrete.DiscreteLTIGaussian(
165 165
            state_trans_mat=F, shift_vec=zero_shift_prior, proc_noise_cov_mat=L
166 166
        )
167 167
    elif prior_model == "continuous":
168 -
        prior = statespace.LTISDE(driftmat=F, forcevec=zero_shift_prior, dispmat=L)
168 +
        prior = randprocs.markov.continuous.LTISDE(
169 +
            driftmat=F, forcevec=zero_shift_prior, dispmat=L
170 +
        )
169 171
    else:
170 172
        raise ValueError
171 173
    initrv = randvars.Normal(m0, C0)
172 174
    initarg = t0
173 -
    prior_process = randprocs.MarkovProcess(
175 +
    prior_process = randprocs.markov.MarkovProcess(
174 176
        transition=prior, initrv=initrv, initarg=initarg
175 177
    )
176 178
    return prior_process
@@ -178,7 +180,7 @@
Loading
178 180
179 181
def _setup_regression_problem(H, R, observations, locations):
180 182
    zero_shift_mm = np.zeros(H.shape[0])
181 -
    measmod = statespace.DiscreteLTIGaussian(
183 +
    measmod = randprocs.markov.discrete.DiscreteLTIGaussian(
182 184
        state_trans_mat=H, shift_vec=zero_shift_mm, proc_noise_cov_mat=R
183 185
    )
184 186
    measurement_models = [measmod] * len(locations)

@@ -0,0 +1,50 @@
Loading
1 +
"""Conversion functions for representations of integrators."""
2 +
3 +
import numpy as np
4 +
5 +
from probnum.randprocs.markov.integrator import _integrator
6 +
from probnum.typing import IntArgType
7 +
8 +
9 +
def convert_derivwise_to_coordwise(
10 +
    state: np.ndarray, num_derivatives: IntArgType, wiener_process_dimension: IntArgType
11 +
) -> np.ndarray:
12 +
    """Convert coordinate-wise representation to derivative-wise representation.
13 +
14 +
    Lightweight call to the respective property in :class:`Integrator`.
15 +
16 +
    Parameters
17 +
    ----------
18 +
    state:
19 +
        State to be converted. Assumed to be in coordinate-wise representation.
20 +
    num_derivatives:
21 +
        Order of the integrator-state. Usually, this is the order of the highest derivative in the state.
22 +
    wiener_process_dimension:
23 +
        Spatial dimension of the integrator. Usually, this is the number of states associated with each derivative.
24 +
    """
25 +
    projmat = _integrator.IntegratorTransition(
26 +
        num_derivatives, wiener_process_dimension
27 +
    )._derivwise2coordwise_projmat
28 +
    return projmat @ state
29 +
30 +
31 +
def convert_coordwise_to_derivwise(
32 +
    state: np.ndarray, num_derivatives: IntArgType, wiener_process_dimension: IntArgType
33 +
) -> np.ndarray:
34 +
    """Convert coordinate-wise representation to derivative-wise representation.
35 +
36 +
    Lightweight call to the respective property in :class:`Integrator`.
37 +
38 +
    Parameters
39 +
    ----------
40 +
    state:
41 +
        State to be converted. Assumed to be in derivative-wise representation.
42 +
    num_derivatives:
43 +
        Order of the integrator-state. Usually, this is the order of the highest derivative in the state.
44 +
    wiener_process_dimension:
45 +
        Spatial dimension of the integrator. Usually, this is the number of states associated with each derivative.
46 +
    """
47 +
    projmat = _integrator.IntegratorTransition(
48 +
        num_derivatives, wiener_process_dimension
49 +
    )._coordwise2derivwise_projmat
50 +
    return projmat @ state

@@ -0,0 +1,22 @@
Loading
1 +
"""Discrete-time transitions."""
2 +
3 +
from ._condition_state import condition_state_on_measurement, condition_state_on_rv
4 +
from ._discrete_gaussian import (
5 +
    DiscreteGaussian,
6 +
    DiscreteLinearGaussian,
7 +
    DiscreteLTIGaussian,
8 +
)
9 +
10 +
# Public classes and functions. Order is reflected in documentation.
11 +
__all__ = [
12 +
    "DiscreteGaussian",
13 +
    "DiscreteLinearGaussian",
14 +
    "DiscreteLTIGaussian",
15 +
    "condition_state_on_rv",
16 +
    "condition_state_on_measurement",
17 +
]
18 +
19 +
# Set correct module paths. Corrects links and module paths in documentation.
20 +
DiscreteGaussian.__module__ = "probnum.randprocs.markov.discrete"
21 +
DiscreteLinearGaussian.__module__ = "probnum.randprocs.markov.discrete"
22 +
DiscreteLTIGaussian.__module__ = "probnum.randprocs.markov.discrete"
0 23
imilarity index 100%
1 24
ename from src/probnum/statespace/discrete_transition_utils.py
2 25
ename to src/probnum/randprocs/markov/discrete/_condition_state.py
3 26
imilarity index 97%
4 27
ename from src/probnum/statespace/discrete_transition.py
5 28
ename to src/probnum/randprocs/markov/discrete/_discrete_gaussian.py
Files Coverage
src/probnum 86.87%
Project Totals (134 files) 86.87%
1
coverage:
2
  precision: 2
3
  status:
4
    project:
5
      default:
6
        target: auto
7
        threshold: 1%
8
    patch:
9
      default:
10
        target: 90%
11
        threshold: 1%
12

13
comment:
14
  layout: "reach, diff, files"
15
  behavior: default
16
  require_changes: true
Sunburst
The inner-most circle is the entire project, moving away from the center are folders then, finally, a single file. The size and color of each slice is representing the number of statements and the coverage, respectively.
Icicle
The top section represents the entire project. Proceeding with folders and finally individual files. The size and color of each slice is representing the number of statements and the coverage, respectively.
Grid
Each block represents a single file in the project. The size and color of each block is represented by the number of statements and the coverage, respectively.
Loading