@@ -0,0 +1,29 @@
 1 + """ODE Information operators.""" 2 + 3 + from ._approx_information_operator import ( 4 +  ApproximateInformationOperator, 5 +  LocallyLinearizedInformationOperator, 6 + ) 7 + from ._information_operator import InformationOperator, ODEInformationOperator 8 + from ._ode_residual import ODEResidual 9 + 10 + __all__ = [ 11 +  "InformationOperator", 12 +  "ODEInformationOperator", 13 +  "ODEResidual", 14 +  "ApproximateInformationOperator", 15 +  "LocallyLinearizedInformationOperator", 16 + ] 17 + 18 + 19 + # Set correct module paths (for superclasses). 20 + # Corrects links and module paths in documentation. 21 + InformationOperator.__module__ = "probnum.diffeq.odefiltsmooth.information_operators" 22 + ODEInformationOperator.__module__ = "probnum.diffeq.odefiltsmooth.information_operators" 23 + ODEResidual.__module__ = "probnum.diffeq.odefiltsmooth.information_operators" 24 + ApproximateInformationOperator.__module__ = ( 25 +  "probnum.diffeq.odefiltsmooth.information_operators" 26 + ) 27 + LocallyLinearizedInformationOperator.__module__ = ( 28 +  "probnum.diffeq.odefiltsmooth.information_operators" 29 + )

@@ -0,0 +1,152 @@
 1 + """Utility functions for differential equation problems.""" 2 + 3 + from typing import Optional, Sequence, Union 4 + 5 + import numpy as np 6 + 7 + from probnum import problems, randprocs 8 + from probnum.diffeq.odefiltsmooth import approx_strategies, information_operators 9 + from probnum.typing import FloatArgType 10 + 11 + __all__ = ["ivp_to_regression_problem"] 12 + 13 + # The ODE information operator is not optional, because in order to create it 14 + # one needs to know the order of the algorithm that is desired (i.e. num_prior_derivatives). 15 + # Since this is a weird input for the function, it seems safer to just require 16 + # the full operator. 17 + def ivp_to_regression_problem( 18 +  ivp: problems.InitialValueProblem, 19 +  locations: Union[Sequence, np.ndarray], 20 +  ode_information_operator: information_operators.InformationOperator, 21 +  approx_strategy: Optional[approx_strategies.ApproximationStrategy] = None, 22 +  ode_measurement_variance: Optional[FloatArgType] = 0.0, 23 +  exclude_initial_condition=False, 24 + ): 25 +  """Transform an initial value problem into a regression problem. 26 + 27 +  Parameters 28 +  ---------- 29 +  ivp 30 +  Initial value problem to be transformed. 31 +  locations 32 +  Locations of the time-grid-points. 33 +  ode_information_operator 34 +  ODE information operator to use. 35 +  approx_strategy 36 +  Approximation strategy to use. Optional. Default is EK1(). 37 +  ode_measurement_variance 38 +  Artificial ODE measurement noise. Optional. Default is 0.0. 39 +  exclude_initial_condition 40 +  Whether to exclude the initial condition from the regression problem. 41 +  Optional. Default is False, in which case the returned measurement model list 42 +  consist of [initcond_mm, ode_mm, ..., ode_mm]. 43 + 44 +  Returns 45 +  ------- 46 +  problems.TimeSeriesRegressionProblem 47 +  Time-series regression problem. 48 +  """ 49 + 50 +  # Construct data and solution 51 +  N = len(locations) 52 +  data = np.zeros((N, ivp.dimension)) 53 +  if ivp.solution is not None: 54 +  solution = np.stack([ivp.solution(t) for t in locations]) 55 +  else: 56 +  solution = None 57 + 58 +  ode_information_operator.incorporate_ode(ivp) 59 + 60 +  # Construct measurement models 61 +  measmod_initial_condition, measmod_ode = _construct_measurement_models( 62 +  ivp, ode_information_operator, approx_strategy, ode_measurement_variance 63 +  ) 64 + 65 +  if exclude_initial_condition: 66 +  measmod_list = [measmod_ode] * N 67 +  else: 68 +  measmod_list = [measmod_initial_condition] + [measmod_ode] * (N - 1) 69 + 70 +  # Return regression problem 71 +  return problems.TimeSeriesRegressionProblem( 72 +  locations=locations, 73 +  observations=data, 74 +  measurement_models=measmod_list, 75 +  solution=solution, 76 +  ) 77 + 78 + 79 + def _construct_measurement_models( 80 +  ivp, ode_information_operator, approx_strategy, ode_measurement_variance 81 + ): 82 +  """Construct measurement models for the IVP.""" 83 + 84 +  transition_matrix = np.eye( 85 +  ode_information_operator.output_dim, ode_information_operator.input_dim 86 +  ) 87 +  shift_vector = -ivp.y0 88 + 89 +  if ode_measurement_variance > 0.0: 90 +  measmod_y0, measmod_ode = _construct_measurement_models_gaussian_likelihood( 91 +  ode_information_operator, 92 +  shift_vector, 93 +  transition_matrix, 94 +  approx_strategy, 95 +  ode_measurement_variance, 96 +  ) 97 +  else: 98 +  measmod_y0, measmod_ode = _construct_measurement_models_dirac_likelihood( 99 +  ode_information_operator, 100 +  shift_vector, 101 +  transition_matrix, 102 +  approx_strategy, 103 +  ) 104 +  return measmod_y0, measmod_ode 105 + 106 + 107 + def _construct_measurement_models_gaussian_likelihood( 108 +  ode_information_operator, 109 +  shift_vector, 110 +  transition_matrix, 111 +  approx_strategy, 112 +  ode_measurement_variance, 113 + ): 114 +  """Construct measurement models for the IVP with Gaussian likelihoods.""" 115 + 116 +  def diff(t): 117 +  return ode_measurement_variance * np.eye(ode_information_operator.output_dim) 118 + 119 +  def diff_cholesky(t): 120 +  return np.sqrt(ode_measurement_variance) * np.eye( 121 +  ode_information_operator.output_dim 122 +  ) 123 + 124 +  measmod_initial_condition = randprocs.markov.discrete.DiscreteLTIGaussian( 125 +  state_trans_mat=transition_matrix, 126 +  shift_vec=shift_vector, 127 +  proc_noise_cov_mat=diff(None), 128 +  proc_noise_cov_cholesky=diff_cholesky(None), 129 +  ) 130 +  if approx_strategy is not None: 131 +  ode_information_operator = approx_strategy(ode_information_operator) 132 +  measmod_ode = ode_information_operator.as_transition( 133 +  measurement_cov_fun=diff, measurement_cov_cholesky_fun=diff_cholesky 134 +  ) 135 + 136 +  return measmod_initial_condition, measmod_ode 137 + 138 + 139 + def _construct_measurement_models_dirac_likelihood( 140 +  ode_information_operator, shift_vector, transition_matrix, approx_strategy 141 + ): 142 +  """Construct measurement models for the IVP with Dirac likelihoods.""" 143 +  measmod_initial_condition = ( 144 +  randprocs.markov.discrete.DiscreteLTIGaussian.from_linop( 145 +  state_trans_mat=transition_matrix, shift_vec=shift_vector 146 +  ) 147 +  ) 148 +  if approx_strategy is not None: 149 +  ode_information_operator = approx_strategy(ode_information_operator) 150 +  measmod_ode = ode_information_operator.as_transition() 151 + 152 +  return measmod_initial_condition, measmod_ode

@@ -60,7 +60,7 @@
 60 60  self.perturb_step = perturb_step 61 61  self.solver = solver 62 62  self.scales = None 63 -  super().__init__(ivp=solver.ivp, order=solver.order) 63 +  super().__init__(steprule=solver.steprule, order=solver.order) 64 64 65 65  @classmethod 66 66  def construct_with_lognormal_perturbation(
@@ -86,10 +86,14 @@
 86 86  rng=rng, solver=solver, noise_scale=noise_scale, perturb_function=pertfun 87 87  ) 88 88 89 -  def initialise(self): 89 +  def initialise(self, ivp): 90 90  """Initialise and reset the solver.""" 91 91  self.scales = [] 92 -  return self.solver.initialise() 92 +  return self.solver.initialise(ivp) 93 + 94 +  @property 95 +  def ivp(self): 96 +  return self.solver.ivp 93 97 94 98  def step( 95 99  self, start: FloatArgType, stop: FloatArgType, current: randvars, **kwargs

@@ -0,0 +1,19 @@
 1 + """Approximate information operators.""" 2 + 3 + import abc 4 + 5 + from probnum.diffeq.odefiltsmooth import information_operators 6 + 7 + 8 + class ApproximationStrategy(abc.ABC): 9 +  """Interface for approximation strategies. 10 + 11 +  Turn an information operator into an approximate information operator that converts 12 +  into a :class:GaussianIVPFilter compatible :class:Transition. 13 +  """ 14 + 15 +  def __call__( 16 +  self, information_operator: information_operators.InformationOperator 17 +  ) -> information_operators.ApproximateInformationOperator: 18 +  """Derive a tractable approximation of an information operator.""" 19 +  raise NotImplementedError

@@ -2,11 +2,10 @@
 2 2 3 3 import numpy as np 4 4 5 - from probnum import filtsmooth, problems, randprocs, randvars 5 + from probnum import diffeq, filtsmooth, problems, randprocs, randvars 6 + from probnum.problems.zoo import diffeq as diffeq_zoo 6 7 from probnum.typing import FloatArgType, IntArgType 7 8 8 - from .. import diffeq # diffeq zoo 9 - 10 9 __all__ = [ 11 10  "benes_daum", 12 11  "car_tracking",
@@ -512,6 +511,7 @@
 512 511  initrv: Optional[randvars.RandomVariable] = None, 513 512  evlvar: Optional[Union[np.ndarray, FloatArgType]] = None, 514 513  ek0_or_ek1: IntArgType = 1, 514 +  exclude_initial_condition: bool = True, 515 515  order: IntArgType = 3, 516 516  forward_implementation: str = "classic", 517 517  backward_implementation: str = "classic",
@@ -536,6 +536,9 @@
 536 536  See :py:class:probnum.diffeq.GaussianIVPFilter 537 537  ek0_or_ek1 538 538  See :py:class:probnum.diffeq.GaussianIVPFilter 539 +  exclude_initial_condition 540 +  Whether the resulting regression problem should exclude (i.e. not contain) the initial condition of the ODE. 541 +  Optional. Default is True, which means that the initial condition is omitted. 539 542  order 540 543  Order of integration for the Integrated Brownian Motion prior of the solver. 541 544  forward_implementation
@@ -566,40 +569,43 @@
 566 569  evlvar = np.zeros((1, 1)) 567 570 568 571  t0, tmax = timespan 569 -  logistic_ivp = diffeq.logistic(t0=t0, tmax=tmax, y0=y0, params=params) 570 -  dynamics_model = randprocs.markov.integrator.IntegratedWienerTransition( 571 -  num_derivatives=order, 572 -  wiener_process_dimension=1, 573 -  forward_implementation=forward_implementation, 574 -  backward_implementation=backward_implementation, 572 + 573 +  # Generate ODE regression problem 574 +  logistic_ivp = diffeq_zoo.logistic(t0=t0, tmax=tmax, y0=y0, params=params) 575 +  time_grid = np.arange(*timespan, step=step) 576 +  ode_residual = diffeq.odefiltsmooth.information_operators.ODEResidual( 577 +  num_prior_derivatives=order, ode_dimension=logistic_ivp.dimension 575 578  ) 576 -  measurement_model = filtsmooth.gaussian.approx.DiscreteEKFComponent.from_ode( 577 -  logistic_ivp, 578 -  prior=dynamics_model, 579 -  evlvar=evlvar, 580 -  ek0_or_ek1=ek0_or_ek1, 581 -  forward_implementation=forward_implementation, 582 -  backward_implementation=backward_implementation, 579 +  if ek0_or_ek1 == 0: 580 +  ek = diffeq.odefiltsmooth.approx_strategies.EK0() 581 +  else: 582 +  ek = diffeq.odefiltsmooth.approx_strategies.EK1() 583 +  regression_problem = diffeq.odefiltsmooth.utils.ivp_to_regression_problem( 584 +  ivp=logistic_ivp, 585 +  locations=time_grid, 586 +  ode_information_operator=ode_residual, 587 +  approx_strategy=ek, 588 +  ode_measurement_variance=evlvar, 589 +  exclude_initial_condition=exclude_initial_condition, 583 590  ) 584 591 592 +  # Generate prior process 585 593  if initrv is None: 586 594  initmean = np.array([0.1, 0, 0.0, 0.0]) 587 595  initcov = np.diag([0.0, 1.0, 1.0, 1.0]) 588 596  initrv = randvars.Normal(initmean, initcov) 589 - 590 -  # Generate zero-data 591 -  time_grid = np.arange(*timespan, step=step) 592 -  solution = logistic_ivp.solution(time_grid) 593 -  regression_problem = problems.TimeSeriesRegressionProblem( 594 -  observations=np.zeros(shape=(time_grid.size, 1)), 595 -  locations=time_grid, 596 -  measurement_models=measurement_model, 597 -  solution=solution, 597 +  dynamics_model = randprocs.markov.integrator.IntegratedWienerTransition( 598 +  num_derivatives=order, 599 +  wiener_process_dimension=1, 600 +  forward_implementation=forward_implementation, 601 +  backward_implementation=backward_implementation, 598 602  ) 599 603 600 604  prior_process = randprocs.markov.MarkovProcess( 601 605  transition=dynamics_model, initrv=initrv, initarg=time_grid[0] 602 606  ) 607 + 608 +  # Return problems and info 603 609  info = dict( 604 610  ivp=logistic_ivp, 605 611  prior_process=prior_process,

@@ -1,6 +1,13 @@
 1 1 """ODE Filtering.""" 2 2 3 + from . import utils 3 4 from ._ivpfiltsmooth import GaussianIVPFilter 4 5 from ._kalman_odesolution import KalmanODESolution 5 6 6 7 __all__ = ["GaussianIVPFilter", "KalmanODESolution"] 8 + 9 + 10 + # Set correct module paths (for superclasses). 11 + # Corrects links and module paths in documentation. 12 + GaussianIVPFilter.__module__ = "probnum.diffeq.odefiltsmooth" 13 + KalmanODESolution.__module__ = "probnum.diffeq.odefiltsmooth"

@@ -1,8 +1,8 @@
 1 1 """General Gaussian filters based on approximating intractable quantities with numerical 2 2 quadrature. 3 3 4 - Examples include the unscented Kalman filter / RTS smoother which is 5 - based on a third degree fully symmetric rule. 4 + Examples include the unscented Kalman filter / RTS smoother which is based on a third 5 + degree fully symmetric rule. 6 6 """ 7 7 8 8 from typing import Dict, Optional, Tuple
@@ -254,15 +254,3 @@
 254 254  @property 255 255  def dimension(self) -> int: 256 256  return self.ut.dimension 257 - 258 -  @classmethod 259 -  def from_ode( 260 -  cls, 261 -  ode, 262 -  prior, 263 -  evlvar=0.0, 264 -  ): 265 -  discrete_model = randprocs.discrete.DiscreteGaussian.from_ode( 266 -  ode=ode, prior=prior, evlvar=evlvar 267 -  ) 268 -  return cls(discrete_model)

@@ -160,33 +160,26 @@
 160 160  return np.linalg.cholesky(covmat) 161 161 162 162  @classmethod 163 -  def from_ode( 163 +  def from_callable( 164 164  cls, 165 -  ode, 166 -  prior, 167 -  evlvar=0.0, 165 +  input_dim: IntArgType, 166 +  output_dim: IntArgType, 167 +  state_trans_fun: Callable[[FloatArgType, np.ndarray], np.ndarray], 168 +  jacob_state_trans_fun: Callable[[FloatArgType, np.ndarray], np.ndarray], 168 169  ): 169 - 170 -  h0 = prior.proj2coord(coord=0) 171 -  h1 = prior.proj2coord(coord=1) 172 - 173 -  def dyna(t, x): 174 -  return h1 @ x - ode.f(t, h0 @ x) 170 +  """Turn a callable into a deterministic transition.""" 175 171 176 172  def diff(t): 177 -  return evlvar * np.eye(ode.dimension) 173 +  return np.zeros((output_dim, output_dim)) 178 174 179 175  def diff_cholesky(t): 180 -  return np.sqrt(evlvar) * np.eye(ode.dimension) 181 - 182 -  def jacobian(t, x): 183 -  return h1 - ode.df(t, h0 @ x) @ h0 176 +  return np.zeros((output_dim, output_dim)) 184 177 185 178  return cls( 186 -  input_dim=prior.dimension, 187 -  output_dim=ode.dimension, 188 -  state_trans_fun=dyna, 189 -  jacob_state_trans_fun=jacobian, 179 +  input_dim=input_dim, 180 +  output_dim=output_dim, 181 +  state_trans_fun=state_trans_fun, 182 +  jacob_state_trans_fun=jacob_state_trans_fun, 190 183  proc_noise_cov_mat_fun=diff, 191 184  proc_noise_cov_cholesky_fun=diff_cholesky, 192 185  )
@@ -560,6 +553,29 @@
 560 553  return self._proc_noise_cov_cholesky 561 554  return np.linalg.cholesky(self.proc_noise_cov_mat) 562 555 556 +  @classmethod 557 +  def from_linop( 558 +  cls, 559 +  state_trans_mat: np.ndarray, 560 +  shift_vec: np.ndarray, 561 +  forward_implementation="classic", 562 +  backward_implementation="classic", 563 +  ): 564 +  """Turn a linear operator (or numpy array) into a deterministic transition.""" 565 +  # Currently, this is only a numpy array. 566 +  # In the future, once linops are more widely adopted here, this will become a linop. 567 +  zero_matrix = np.zeros((state_trans_mat.shape[0], state_trans_mat.shape[0])) 568 +  if state_trans_mat.ndim != 2: 569 +  raise ValueError 570 +  return cls( 571 +  state_trans_mat=state_trans_mat, 572 +  shift_vec=shift_vec, 573 +  proc_noise_cov_mat=zero_matrix, 574 +  proc_noise_cov_cholesky=zero_matrix, 575 +  forward_implementation=forward_implementation, 576 +  backward_implementation=backward_implementation, 577 +  ) 578 + 563 579 564 580 def _check_dimensions(state_trans_mat, shift_vec, proc_noise_cov_mat): 565 581  """LTI SDE model needs matrices which are compatible with each other in size."""

@@ -166,13 +166,26 @@
 166 166 167 167  ivp = problems.InitialValueProblem(t0=t0, tmax=tmax, y0=np.asarray(y0), f=f) 168 168 169 +  # Create steprule 170 +  if adaptive is True: 171 +  if atol is None or rtol is None: 172 +  raise ValueError( 173 +  "Please provide absolute and relative tolerance for adaptive steps." 174 +  ) 175 +  firststep = step if step is not None else stepsize.propose_firststep(ivp) 176 +  steprule = stepsize.AdaptiveSteps(firststep=firststep, atol=atol, rtol=rtol) 177 +  else: 178 +  steprule = stepsize.ConstantSteps(step) 179 + 169 180  if method not in METHODS.keys(): 170 181  msg1 = f"Parameter method='{method}' is not supported. " 171 182  msg2 = f"Possible values are {list(METHODS.keys())}." 172 183  errormsg = msg1 + msg2 173 184  raise ValueError(errormsg) 174 -  scipy_solver = METHODS[method](ivp.f, ivp.t0, ivp.y0, ivp.tmax) 175 -  wrapped_scipy_solver = perturbed.scipy_wrapper.WrappedScipyRungeKutta(scipy_solver) 185 +  scipy_solver = METHODS[method] 186 +  wrapped_scipy_solver = perturbed.scipy_wrapper.WrappedScipyRungeKutta( 187 +  scipy_solver, steprule=steprule 188 +  ) 176 189 177 190  if perturb not in PERTURBS.keys(): 178 191  msg1 = f"Parameter perturb='{perturb}' is not supported. "
@@ -183,15 +196,4 @@
 183 196  rng=rng, solver=wrapped_scipy_solver, noise_scale=noise_scale 184 197  ) 185 198 186 -  # Create steprule 187 -  if adaptive is True: 188 -  if atol is None or rtol is None: 189 -  raise ValueError( 190 -  "Please provide absolute and relative tolerance for adaptive steps." 191 -  ) 192 -  firststep = step if step is not None else stepsize.propose_firststep(ivp) 193 -  steprule = stepsize.AdaptiveSteps(firststep=firststep, atol=atol, rtol=rtol) 194 -  else: 195 -  steprule = stepsize.ConstantSteps(step) 196 - 197 -  return perturbed_solver.solve(steprule=steprule) 199 +  return perturbed_solver.solve(ivp=ivp)

@@ -28,6 +28,7 @@
 28 28 29 29  Examples 30 30  -------- 31 +  >>> import numpy as np 31 32  >>> from probnum.diffeq import probsolve_ivp 32 33  >>> from probnum import randvars 33 34  >>>

@@ -0,0 +1,13 @@
 1 + """Approximate information operators.""" 2 + 3 + from ._approx_strategy import ApproximationStrategy 4 + from ._ek import EK0, EK1 5 + 6 + __all__ = ["ApproximationStrategy", "EK0", "EK1"] 7 + 8 + 9 + # Set correct module paths (for superclasses). 10 + # Corrects links and module paths in documentation. 11 + ApproximationStrategy.__module__ = "probnum.diffeq.odefiltsmooth.approx_strategies" 12 + EK0.__module__ = "probnum.diffeq.odefiltsmooth.approx_strategies" 13 + EK1.__module__ = "probnum.diffeq.odefiltsmooth.approx_strategies"

@@ -0,0 +1,5 @@
 1 + """Utility functions for filtering-based differential equation problems and solvers.""" 2 + 3 + from ._problem_utils import ivp_to_regression_problem 4 + 5 + __all__ = ["ivp_to_regression_problem"]

@@ -0,0 +1,75 @@
 1 + """ODE residual information operators.""" 2 + 3 + from typing import Callable, Tuple 4 + 5 + import numpy as np 6 + 7 + from probnum import problems, randprocs 8 + from probnum.diffeq.odefiltsmooth.information_operators import _information_operator 9 + from probnum.typing import FloatArgType, IntArgType 10 + 11 + __all__ = ["ODEResidual"] 12 + 13 + 14 + class ODEResidual(_information_operator.ODEInformationOperator): 15 +  """Information operator that measures the residual of an explicit ODE.""" 16 + 17 +  def __init__(self, num_prior_derivatives: IntArgType, ode_dimension: IntArgType): 18 +  integrator_dimension = ode_dimension * (num_prior_derivatives + 1) 19 +  super().__init__(input_dim=integrator_dimension, output_dim=ode_dimension) 20 +  # Store remaining attributes 21 +  self.num_prior_derivatives = num_prior_derivatives 22 +  self.ode_dimension = ode_dimension 23 + 24 +  # Prepare caching the projection matrices 25 +  self.projection_matrices = None 26 + 27 +  # These will be assigned once the ODE has been seen 28 +  self._residual = None 29 +  self._residual_jacobian = None 30 + 31 +  def incorporate_ode(self, ode: problems.InitialValueProblem): 32 +  """Incorporate the ODE and cache the required projection matrices.""" 33 +  super().incorporate_ode(ode=ode) 34 + 35 +  # Cache the projection matrices and match the implementation to the ODE 36 +  dummy_integrator = randprocs.markov.integrator.IntegratorTransition( 37 +  num_derivatives=self.num_prior_derivatives, 38 +  wiener_process_dimension=self.ode_dimension, 39 +  ) 40 +  ode_order = 1 # currently everything we can do 41 +  self.projection_matrices = [ 42 +  dummy_integrator.proj2coord(coord=deriv) for deriv in range(ode_order + 1) 43 +  ] 44 +  res, res_jac = self._match_residual_and_jacobian_to_ode_order( 45 +  ode_order=ode_order 46 +  ) 47 +  self._residual, self._residual_jacobian = res, res_jac 48 + 49 +  def _match_residual_and_jacobian_to_ode_order( 50 +  self, ode_order: IntArgType 51 +  ) -> Tuple[Callable, Callable]: 52 +  """Choose the correct residual (and Jacobian) implementation based on the order 53 +  of the ODE.""" 54 +  choose_implementation = { 55 +  1: (self._residual_first_order_ode, self._residual_first_order_ode_jacobian) 56 +  } 57 +  return choose_implementation[ode_order] 58 + 59 +  def __call__(self, t: FloatArgType, x: np.ndarray) -> np.ndarray: 60 +  return self._residual(t, x) 61 + 62 +  def jacobian(self, t: FloatArgType, x: np.ndarray) -> np.ndarray: 63 +  return self._residual_jacobian(t, x) 64 + 65 +  # Implementation of different residuals 66 + 67 +  def _residual_first_order_ode(self, t: FloatArgType, x: np.ndarray) -> np.ndarray: 68 +  h0, h1 = self.projection_matrices 69 +  return h1 @ x - self.ode.f(t, h0 @ x) 70 + 71 +  def _residual_first_order_ode_jacobian( 72 +  self, t: FloatArgType, x: np.ndarray 73 +  ) -> np.ndarray: 74 +  h0, h1 = self.projection_matrices 75 +  return h1 - self.ode.df(t, h0 @ x) @ h0

@@ -0,0 +1,116 @@
 1 + """Interface for information operators.""" 2 + 3 + import abc 4 + from typing import Callable, Optional 5 + 6 + import numpy as np 7 + 8 + from probnum import problems, randprocs 9 + from probnum.typing import FloatArgType, IntArgType 10 + 11 + __all__ = ["InformationOperator", "ODEInformationOperator"] 12 + 13 + 14 + class InformationOperator(abc.ABC): 15 +  r"""Information operators used in probabilistic ODE solvers. 16 + 17 +  ODE solver-related information operators gather information about whether a state or function solves an ODE. 18 +  More specifically, an information operator maps a sample from the prior distribution 19 +  **that is also an ODE solution** to the zero function. 20 + 21 +  Consider the following example. For an ODE 22 + 23 +  .. math:: \dot y(t) - f(t, y(t)) = 0, 24 + 25 +  and a :math:\nu times integrated Wiener process prior, 26 +  the information operator maps 27 + 28 +  .. math:: \mathcal{Z}: [t, (Y_0, Y_1, ..., Y_\nu)] \mapsto Y_1(t) - f(t, Y_0(t)). 29 + 30 +  (Recall that :math:Y_j models the j th derivative of Y_0 for given prior.) 31 +  If :math:Y_0 solves the ODE, :math:\mathcal{Z}(Y)(t) is zero for all :math:t. 32 + 33 +  Information operators are used to condition prior distributions on solving a numerical problem. 34 +  This happens by conditioning the prior distribution :math:Y on :math:\mathcal{Z}(Y)(t_n)=0 35 +  on time-points :math:t_1, ..., t_n, ..., t_N (:math:N is usually large). 36 +  Therefore, they are one important component in a probabilistic ODE solver. 37 +  """ 38 + 39 +  def __init__(self, input_dim: IntArgType, output_dim: IntArgType): 40 +  self.input_dim = input_dim 41 +  self.output_dim = output_dim 42 + 43 +  @abc.abstractmethod 44 +  def __call__(self, t: FloatArgType, x: np.ndarray) -> np.ndarray: 45 +  raise NotImplementedError 46 + 47 +  def jacobian(self, t: FloatArgType, x: np.ndarray) -> np.ndarray: 48 +  raise NotImplementedError 49 + 50 +  def as_transition( 51 +  self, 52 +  measurement_cov_fun: Optional[Callable[[FloatArgType], np.ndarray]] = None, 53 +  measurement_cov_cholesky_fun: Optional[ 54 +  Callable[[FloatArgType], np.ndarray] 55 +  ] = None, 56 +  ): 57 + 58 +  if measurement_cov_fun is None: 59 +  if measurement_cov_cholesky_fun is not None: 60 +  raise ValueError( 61 +  "If a Cholesky function is provided, a covariance function must be provided as well." 62 +  ) 63 +  return randprocs.markov.discrete.DiscreteGaussian.from_callable( 64 +  state_trans_fun=self.__call__, 65 +  jacob_state_trans_fun=self.jacobian, 66 +  input_dim=self.input_dim, 67 +  output_dim=self.output_dim, 68 +  ) 69 + 70 +  return randprocs.markov.discrete.DiscreteGaussian( 71 +  state_trans_fun=self.__call__, 72 +  jacob_state_trans_fun=self.jacobian, 73 +  proc_noise_cov_mat_fun=measurement_cov_fun, 74 +  proc_noise_cov_cholesky_fun=measurement_cov_cholesky_fun, 75 +  input_dim=self.input_dim, 76 +  output_dim=self.output_dim, 77 +  ) 78 + 79 + 80 + class ODEInformationOperator(InformationOperator): 81 +  """Information operators that depend on an ODE function. 82 + 83 +  Other than :class:InformationOperators, :class:ODEInformationOperators depend explicitly on an 84 +  :class:InitialValueProblem. Not all information operators that are used in ODE solvers do. 85 +  """ 86 + 87 +  def __init__(self, input_dim: IntArgType, output_dim: IntArgType): 88 +  super().__init__(input_dim=input_dim, output_dim=output_dim) 89 + 90 +  # Initialized once the ODE can be seen 91 +  self.ode = None 92 + 93 +  def incorporate_ode(self, ode: problems.InitialValueProblem): 94 +  """Incorporate the ODE into the operator.""" 95 +  if self.ode_has_been_incorporated: 96 +  raise ValueError("An ODE has been incorporated already.") 97 +  else: 98 +  self.ode = ode 99 + 100 +  @property 101 +  def ode_has_been_incorporated(self) -> bool: 102 +  return self.ode is not None 103 + 104 +  def as_transition( 105 +  self, 106 +  measurement_cov_fun: Optional[Callable[[FloatArgType], np.ndarray]] = None, 107 +  measurement_cov_cholesky_fun: Optional[ 108 +  Callable[[FloatArgType], np.ndarray] 109 +  ] = None, 110 +  ): 111 +  if not self.ode_has_been_incorporated: 112 +  raise ValueError("An ODE has not been incorporated yet.") 113 +  return super().as_transition( 114 +  measurement_cov_fun=measurement_cov_fun, 115 +  measurement_cov_cholesky_fun=measurement_cov_cholesky_fun, 116 +  )

@@ -0,0 +1,86 @@
 1 + """Extended Kalman filter-based approximation strategies. 2 + 3 + Make an intractable information operator tractable with local linearization. 4 + """ 5 + from typing import Optional 6 + 7 + import numpy as np 8 + 9 + from probnum import problems 10 + from probnum.diffeq.odefiltsmooth import information_operators 11 + from probnum.diffeq.odefiltsmooth.approx_strategies import _approx_strategy 12 + 13 + 14 + class EK1(_approx_strategy.ApproximationStrategy): 15 +  """Make inference with an (ODE-)information operator tractable using a first-order 16 +  linearization of the ODE vector-field.""" 17 + 18 +  def __init__( 19 +  self, 20 +  forward_implementation: Optional[str] = "sqrt", 21 +  backward_implementation: Optional[str] = "sqrt", 22 +  ): 23 +  self.forward_implementation = forward_implementation 24 +  self.backward_implementation = backward_implementation 25 + 26 +  def __call__( 27 +  self, information_operator: information_operators.InformationOperator 28 +  ) -> information_operators.LocallyLinearizedInformationOperator: 29 + 30 +  return information_operators.LocallyLinearizedInformationOperator( 31 +  information_operator=information_operator, 32 +  forward_implementation=self.forward_implementation, 33 +  backward_implementation=self.backward_implementation, 34 +  ) 35 + 36 + 37 + class EK0(_approx_strategy.ApproximationStrategy): 38 +  """Make inference with the information operator tractable using a zeroth-order 39 +  linearization of the ODE vector-field. 40 + 41 +  This only applies to standard (explicit) ODEs. Implicit ODEs must use the EK1. 42 +  """ 43 + 44 +  def __init__( 45 +  self, 46 +  forward_implementation: Optional[str] = "sqrt", 47 +  backward_implementation: Optional[str] = "sqrt", 48 +  ): 49 +  self.forward_implementation = forward_implementation 50 +  self.backward_implementation = backward_implementation 51 + 52 +  def __call__( 53 +  self, information_operator: information_operators.ODEResidual 54 +  ) -> information_operators.LocallyLinearizedInformationOperator: 55 + 56 +  if not information_operator.ode_has_been_incorporated: 57 +  raise ValueError( 58 +  "ODE has not been incorporated into the ODE information operator." 59 +  ) 60 + 61 +  # The following EK0 implementation generalizes to higher-order ODEs in the sense 62 +  # that for higher order ODEs, the attribute df is a list of Jacobians, 63 +  # and in this case we can loop over "all" Jacobians and set them to the 64 +  # custom (zero) linearization. 65 +  ode = information_operator.ode 66 +  custom_linearization = lambda t, x: np.zeros((len(x), len(x))) 67 +  new_ivp = problems.InitialValueProblem( 68 +  f=ode.f, 69 +  df=custom_linearization, 70 +  y0=ode.y0, 71 +  t0=ode.t0, 72 +  tmax=ode.tmax, 73 +  solution=ode.solution, 74 +  ) 75 + 76 +  # From here onwards, mimic the EK1 implementation. 77 +  ek0_information_operator = information_operators.ODEResidual( 78 +  num_prior_derivatives=information_operator.num_prior_derivatives, 79 +  ode_dimension=information_operator.ode_dimension, 80 +  ) 81 +  ek0_information_operator.incorporate_ode(ode=new_ivp) 82 +  return information_operators.LocallyLinearizedInformationOperator( 83 +  information_operator=ek0_information_operator, 84 +  forward_implementation=self.forward_implementation, 85 +  backward_implementation=self.backward_implementation, 86 +  )

@@ -9,10 +9,11 @@
 9 9 10 10  Examples 11 11  -------- 12 -  >>> from probnum.filtsmooth.gaussian.approx import DiscreteEKFComponent 13 12  >>> from probnum.filtsmooth.optim import StoppingCriterion 13 +  >>> from probnum.filtsmooth.gaussian.approx import DiscreteEKFComponent 14 14  >>> from probnum.problems.zoo.diffeq import logistic 15 15  >>> from probnum.randprocs.markov.integrator import IntegratedWienerProcess 16 +  >>> from probnum.randprocs.markov.discrete import DiscreteGaussian 16 17  >>> from probnum.randvars import Constant 17 18  >>> import numpy as np 18 19  >>>
@@ -20,7 +21,11 @@
 20 21  Set up an iterated component. 21 22 22 23  >>> iwp = IntegratedWienerProcess(initarg=0., num_derivatives=2, wiener_process_dimension=1) 23 -  >>> ekf = DiscreteEKFComponent.from_ode(logistic(t0=0., tmax=1., y0=np.array([0.1])), iwp.transition, 0.) 24 +  >>> H0, H1 = iwp.transition.proj2coord(coord=0), iwp.transition.proj2coord(coord=1) 25 +  >>> call = lambda t, x: H1 @ x - H0 @ x * (1 - H0 @ x) 26 +  >>> jacob = lambda t, x: H1 - (1 - 2*(H0 @ x)) @ H0 27 +  >>> nonlinear_model = DiscreteGaussian.from_callable(3, 1, call, jacob) 28 +  >>> ekf = DiscreteEKFComponent(nonlinear_model) 24 29  >>> comp = IteratedDiscreteComponent(ekf, StoppingCriterion()) 25 30 26 31  Generate some random variables and pseudo observations.
@@ -37,16 +42,16 @@
 37 42  3 38 43  >>> out, info = comp.forward_realization(some_array,some_rv,) 39 44  >>> print(out.mean) 40 -  [0.73] 45 +  [0.91] 41 46 42 47  But its backward values are different, because of the iteration. 43 48 44 49  >>> out_ekf, _ = ekf.backward_rv(rv_observed, rv) 45 50  >>> print(out_ekf.mean) 46 -  [ 0.18392711 0.504723 -8.429155 ] 51 +  [ 0.17081493 0.15351366 -13.73607367] 47 52  >>> out_iterated, _ = comp.backward_rv(rv_observed, rv) 48 53  >>> print(out_iterated.mean) 49 -  [ 0.18201288 0.45367681 -9.1948478 ] 54 +  [ 0.17076427 0.15194483 -13.76505168] 50 55  """ 51 56 52 57  def __init__(

@@ -1,13 +1,13 @@
 1 1 """Wrapper class of scipy.integrate. for RK23 and RK45. 2 2 3 - Dense-output can not be used for DOP853, if you use other RK-methods, 4 - make sure, that the current implementation works for them. 3 + Dense-output can not be used for DOP853, if you use other RK-methods, make sure, that 4 + the current implementation works for them. 5 5 """ 6 6 import numpy as np 7 7 from scipy.integrate._ivp import rk 8 8 from scipy.integrate._ivp.common import OdeSolution 9 9 10 - from probnum import problems, randvars 10 + from probnum import randvars 11 11 from probnum.diffeq import _odesolver 12 12 from probnum.diffeq.perturbed.scipy_wrapper import _wrapped_scipy_odesolution 13 13 from probnum.typing import FloatArgType
@@ -16,26 +16,22 @@
 16 16 class WrappedScipyRungeKutta(_odesolver.ODESolver): 17 17  """Wrapper for Runge-Kutta methods from SciPy.""" 18 18 19 -  def __init__(self, solver: rk.RungeKutta): 20 -  self.solver = solver 19 +  def __init__(self, solver_type: rk.RungeKutta, steprule): 20 +  self.solver_type = solver_type 21 21  self.interpolants = None 22 22 23 -  # ProbNum ODESolver needs an ivp 24 -  ivp = problems.InitialValueProblem( 25 -  t0=self.solver.t, 26 -  tmax=self.solver.t_bound, 27 -  y0=self.solver.y, 28 -  f=self.solver._fun, 29 -  ) 23 +  # Filled in later 24 +  self.solver = None 25 +  self.ivp = None 30 26 31 27  # Dopri853 as implemented in SciPy computes the dense output differently. 32 -  if isinstance(solver, rk.DOP853): 28 +  if issubclass(solver_type, rk.DOP853): 33 29  raise TypeError( 34 30  "Dense output interpolation of DOP853 is currently not supported. Choose a different RK-method." 35 31  ) 36 -  super().__init__(ivp=ivp, order=solver.order) 32 +  super().__init__(steprule=steprule, order=solver_type.order) 37 33 38 -  def initialise(self): 34 +  def initialise(self, ivp): 39 35  """Return t0 and y0 (for the solver, which might be different to ivp.y0) and 40 36  initialize the solver. Reset the solver when solving the ODE multiple times, 41 37  i.e. explicitly setting y_old, t, y and f to the respective initial values,
@@ -48,6 +44,8 @@
 48 44  self.ivp.initrv: randvars.RandomVariable 49 45  initial random variable 50 46  """ 47 +  self.solver = self.solver_type(ivp.f, ivp.t0, ivp.y0, ivp.tmax) 48 +  self.ivp = ivp 51 49 52 50  self.interpolants = [] 53 51  self.solver.y_old = None

@@ -5,9 +5,14 @@
 5 5 import numpy as np 6 6 import scipy.linalg 7 7 8 - from probnum import filtsmooth, problems, randprocs, randvars, utils 9 - from probnum.diffeq import _odesolver 10 - from probnum.diffeq.odefiltsmooth import _kalman_odesolution, initialization_routines 8 + from probnum import filtsmooth, randprocs, randvars, utils 9 + from probnum.diffeq import _odesolver, stepsize 10 + from probnum.diffeq.odefiltsmooth import ( 11 +  _kalman_odesolution, 12 +  approx_strategies, 13 +  information_operators, 14 +  initialization_routines, 15 + ) 11 16 12 17 13 18 class GaussianIVPFilter(_odesolver.ODESolver):
@@ -48,9 +53,10 @@
 48 53 49 54  def __init__( 50 55  self, 51 -  ivp: problems.InitialValueProblem, 56 +  steprule: stepsize.StepRule, 52 57  prior_process: randprocs.markov.MarkovProcess, 53 -  measurement_model: randprocs.markov.discrete.DiscreteGaussian, 58 +  information_operator: information_operators.ODEInformationOperator, 59 +  approx_strategy: approx_strategies.ApproximationStrategy, 54 60  with_smoothing: bool, 55 61  initialization_routine: initialization_routines.InitializationRoutine, 56 62  diffusion_model: Optional[randprocs.markov.continuous.Diffusion] = None,
@@ -65,12 +71,19 @@
 65 71  ) 66 72 67 73  self.prior_process = prior_process 68 -  self.measurement_model = measurement_model 74 + 75 +  self.information_operator = information_operator 76 +  self.approx_strategy = approx_strategy 77 + 78 +  # Filled in in initialize(), once the ODE has been seen. 79 +  self.measurement_model = None 69 80 70 81  self.sigma_squared_mle = 1.0 71 82  self.with_smoothing = with_smoothing 72 83  self.initialization_routine = initialization_routine 73 -  super().__init__(ivp=ivp, order=prior_process.transition.num_derivatives) 84 +  super().__init__( 85 +  steprule=steprule, order=prior_process.transition.num_derivatives 86 +  ) 74 87 75 88  # Set up the diffusion_model style: constant or piecewise constant. 76 89  self.diffusion_model = (
@@ -93,13 +106,22 @@
 93 106  # or from any other state. 94 107  self._reference_coordinates = _reference_coordinates 95 108 96 -  def initialise(self): 109 +  def initialise(self, ivp): 110 +  self.information_operator.incorporate_ode(ode=ivp) 97 111  initrv = self.initialization_routine( 98 -  ivp=self.ivp, prior_process=self.prior_process 112 +  ivp=self.ivp, 113 +  prior_process=self.prior_process, 99 114  ) 100 115 116 +  self.measurement_model = self.approx_strategy( 117 +  self.information_operator 118 +  ).as_transition() 101 119  return self.ivp.t0, initrv 102 120 121 +  @property 122 +  def ivp(self): 123 +  return self.information_operator.ode 124 + 103 125  def step(self, t, t_new, current_rv): 104 126  r"""Gaussian IVP filter step as nonlinear Kalman filtering with zero data. 105 127
@@ -320,45 +342,3 @@
 320 342  ) 321 343 322 344  return _kalman_odesolution.KalmanODESolution(kalman_posterior) 323 - 324 -  @staticmethod 325 -  def string_to_measurement_model( 326 -  measurement_model_string, ivp, prior_process, measurement_noise_covariance=0.0 327 -  ): 328 -  """Construct a measurement model :math:\\mathcal{N}(g(m), R) for an ODE. 329 - 330 -  Return a :class:DiscreteGaussian (:class:DiscreteEKFComponent) that provides 331 -  a tractable approximation of the transition densities based on the local defect of the ODE 332 - 333 -  .. math:: g(m) = H_1 m(t) - f(t, H_0 m(t)) 334 - 335 -  and user-specified measurement noise covariance :math:R. Almost always, the measurement noise covariance is zero. 336 -  """ 337 -  measurement_model_string = measurement_model_string.upper() 338 - 339 -  # While "UK" is not available in probsolve_ivp (because it is not recommended) 340 -  # It is an option in this function here, because there is no obvious reason to restrict 341 -  # the options in this lower level function. 342 -  choose_meas_model = { 343 -  "EK0": filtsmooth.gaussian.approx.DiscreteEKFComponent.from_ode( 344 -  ivp, 345 -  prior=prior_process.transition, 346 -  ek0_or_ek1=0, 347 -  evlvar=measurement_noise_covariance, 348 -  forward_implementation="sqrt", 349 -  backward_implementation="sqrt", 350 -  ), 351 -  "EK1": filtsmooth.gaussian.approx.DiscreteEKFComponent.from_ode( 352 -  ivp, 353 -  prior=prior_process.transition, 354 -  ek0_or_ek1=1, 355 -  evlvar=measurement_noise_covariance, 356 -  forward_implementation="sqrt", 357 -  backward_implementation="sqrt", 358 -  ), 359 -  } 360 - 361 -  if measurement_model_string not in choose_meas_model.keys(): 362 -  raise ValueError("Type of measurement model not supported.") 363 - 364 -  return choose_meas_model[measurement_model_string]

@@ -239,9 +239,9 @@
 239 239  "Please provide absolute and relative tolerance for adaptive steps." 240 240  ) 241 241  firststep = step if step is not None else stepsize.propose_firststep(ivp) 242 -  stprl = stepsize.AdaptiveSteps(firststep=firststep, atol=atol, rtol=rtol) 242 +  steprule = stepsize.AdaptiveSteps(firststep=firststep, atol=atol, rtol=rtol) 243 243  else: 244 -  stprl = stepsize.ConstantSteps(step) 244 +  steprule = stepsize.ConstantSteps(step) 245 245 246 246  # Construct diffusion model. 247 247  diffusion_model = diffusion_model.lower()
@@ -264,20 +264,30 @@
 264 264  backward_implementation="sqrt", 265 265  ) 266 266 267 -  if method.upper() not in ["EK0", "EK1"]: 268 -  raise ValueError("Method is not supported.") 269 -  measmod = odefiltsmooth.GaussianIVPFilter.string_to_measurement_model( 270 -  method, ivp, prior_process 267 +  info_op = odefiltsmooth.information_operators.ODEResidual( 268 +  num_prior_derivatives=prior_process.transition.num_derivatives, 269 +  ode_dimension=prior_process.transition.wiener_process_dimension, 271 270  ) 272 271 272 +  choose_method = { 273 +  "EK0": odefiltsmooth.approx_strategies.EK0(), 274 +  "EK1": odefiltsmooth.approx_strategies.EK1(), 275 +  } 276 +  method = method.upper() 277 +  if method not in choose_method.keys(): 278 +  raise ValueError("Method is not supported.") 279 +  approx_strategy = choose_method[method] 280 + 273 281  rk_init = odefiltsmooth.initialization_routines.RungeKuttaInitialization() 282 + 274 283  solver = odefiltsmooth.GaussianIVPFilter( 275 -  ivp=ivp, 276 -  prior_process=prior_process, 277 -  measurement_model=measmod, 284 +  steprule, 285 +  prior_process, 286 +  information_operator=info_op, 287 +  approx_strategy=approx_strategy, 278 288  with_smoothing=dense_output, 279 289  initialization_routine=rk_init, 280 290  diffusion_model=diffusion, 281 291  ) 282 292 283 -  return solver.solve(steprule=stprl) 293 +  return solver.solve(ivp=ivp)

@@ -4,8 +4,6 @@
 4 4 import abc 5 5 from typing import Dict, Tuple 6 6 7 - import numpy as np 8 - 9 7 from probnum import randprocs, randvars 10 8 11 9
@@ -230,53 +228,3 @@
 230 228  forward_implementation=self.forward_implementation, 231 229  backward_implementation=self.backward_implementation, 232 230  ) 233 - 234 -  @classmethod 235 -  def from_ode( 236 -  cls, 237 -  ode, 238 -  prior, 239 -  evlvar=0.0, 240 -  ek0_or_ek1=0, 241 -  forward_implementation="classic", 242 -  backward_implementation="classic", 243 -  ): 244 -  # code is here, and not in DiscreteGaussian, because we want the option of ek0-Jacobians 245 - 246 -  h0 = prior.proj2coord(coord=0) 247 -  h1 = prior.proj2coord(coord=1) 248 - 249 -  def dyna(t, x): 250 -  return h1 @ x - ode.f(t, h0 @ x) 251 - 252 -  def diff(t): 253 -  return evlvar * np.eye(ode.dimension) 254 - 255 -  def diff_cholesky(t): 256 -  return np.sqrt(evlvar) * np.eye(ode.dimension) 257 - 258 -  def jaco_ek1(t, x): 259 -  return h1 - ode.df(t, h0 @ x) @ h0 260 - 261 -  def jaco_ek0(t, x): 262 -  return h1 263 - 264 -  if ek0_or_ek1 == 0: 265 -  jaco = jaco_ek0 266 -  elif ek0_or_ek1 == 1: 267 -  jaco = jaco_ek1 268 -  else: 269 -  raise TypeError("ek0_or_ek1 must be 0 or 1, resp.") 270 -  discrete_model = randprocs.markov.discrete.DiscreteGaussian( 271 -  input_dim=prior.dimension, 272 -  output_dim=ode.dimension, 273 -  state_trans_fun=dyna, 274 -  proc_noise_cov_mat_fun=diff, 275 -  jacob_state_trans_fun=jaco, 276 -  proc_noise_cov_cholesky_fun=diff_cholesky, 277 -  ) 278 -  return cls( 279 -  discrete_model, 280 -  forward_implementation=forward_implementation, 281 -  backward_implementation=backward_implementation, 282 -  )

@@ -0,0 +1,85 @@
 1 + """Approximate information operators.""" 2 + 3 + import abc 4 + from typing import Optional 5 + 6 + from probnum import filtsmooth 7 + from probnum.diffeq.odefiltsmooth.information_operators import _information_operator 8 + 9 + __all__ = ["ApproximateInformationOperator"] 10 + 11 + 12 + class ApproximateInformationOperator( 13 +  _information_operator.InformationOperator, abc.ABC 14 + ): 15 +  """Approximate information operators. 16 + 17 +  An approximate information operator is a version of an information operator that 18 +  differs from its non-approximated operator in two ways: 19 + 20 +  1) When it is transformed into a transition, the output is an approximate transition such as an EKF component. 21 +  2) The Jacobian might be different to the Jacobian of the original version. 22 + 23 +  Approximate information operators are returned by approximation strategies such as EK0 and EK1. 24 +  For instance, the EK0 changes the Jacobian of the information operator 25 +  (in the sense that it sets the Jacobian of the ODE vector field to zero). 26 +  """ 27 + 28 +  def __init__( 29 +  self, 30 +  information_operator: _information_operator.InformationOperator, 31 +  ): 32 +  super().__init__( 33 +  input_dim=information_operator.input_dim, 34 +  output_dim=information_operator.output_dim, 35 +  ) 36 +  self.information_operator = information_operator 37 + 38 +  def __call__(self, t, x): 39 +  return self.information_operator(t, x) 40 + 41 +  def jacobian(self, t, x): 42 +  return self.information_operator.jacobian(t, x) 43 + 44 +  @abc.abstractmethod 45 +  def as_transition( 46 +  self, 47 +  measurement_cov_fun=None, 48 +  measurement_cov_cholesky_fun=None, 49 +  ): 50 +  raise NotImplementedError 51 + 52 + 53 + class LocallyLinearizedInformationOperator(ApproximateInformationOperator): 54 +  """Approximate information operators based on local linearization.""" 55 + 56 +  def __init__( 57 +  self, 58 +  information_operator: _information_operator.InformationOperator, 59 +  forward_implementation: Optional[str] = "sqrt", 60 +  backward_implementation: Optional[str] = "sqrt", 61 +  ): 62 +  super().__init__( 63 +  information_operator=information_operator, 64 +  ) 65 +  self.forward_implementation = forward_implementation 66 +  self.backward_implementation = backward_implementation 67 + 68 +  def as_transition( 69 +  self, 70 +  measurement_cov_fun=None, 71 +  measurement_cov_cholesky_fun=None, 72 +  ): 73 +  """Return an approximate transition. 74 + 75 +  In this case, an EKF component. 76 +  """ 77 +  transition = self.information_operator.as_transition( 78 +  measurement_cov_fun=measurement_cov_fun, 79 +  measurement_cov_cholesky_fun=measurement_cov_cholesky_fun, 80 +  ) 81 +  return filtsmooth.gaussian.approx.DiscreteEKFComponent( 82 +  non_linear_model=transition, 83 +  forward_implementation=self.forward_implementation, 84 +  backward_implementation=self.backward_implementation, 85 +  )

@@ -6,47 +6,38 @@
 6 6 class ODESolver(ABC): 7 7  """Interface for ODE solvers in ProbNum.""" 8 8 9 -  def __init__(self, ivp, order): 10 -  self.ivp = ivp 11 -  self.order = ( 12 -  order # e.g.: RK45 has order=5, IntegratedWienerTransition(nu) has order=nu 13 -  ) 9 +  def __init__(self, order, steprule): 10 +  self.order = order # e.g.: RK45 has order=5, IBM(q) has order=q 14 11  self.num_steps = 0 15 - 16 -  def solve(self, steprule): 17 -  """Solve an IVP. 18 - 19 -  Parameters 20 -  ---------- 21 -  steprule : :class:StepRule 22 -  Step-size selection rule, e.g. constant steps or adaptive steps. 23 -  """ 24 12  self.steprule = steprule 13 + 14 +  def solve(self, ivp): 15 +  """Solve an IVP.""" 25 16  times, rvs = [], [] 26 -  for t, rv in self.solution_generator(steprule): 17 +  for t, rv in self.solution_generator(ivp): 27 18  times.append(t) 28 19  rvs.append(rv) 29 20 30 21  odesol = self.rvlist_to_odesol(times=times, rvs=rvs) 31 22  return self.postprocess(odesol) 32 23 33 -  def solution_generator(self, steprule): 24 +  def solution_generator(self, ivp): 34 25  """Generate ODE solver steps.""" 35 26 36 -  t, current_rv = self.initialise() 27 +  t, current_rv = self.initialise(ivp) 37 28 38 29  yield t, current_rv 39 -  stepsize = steprule.firststep 30 +  stepsize = self.steprule.firststep 40 31 41 32  while t < self.ivp.tmax: 42 33 43 34  t_new = t + stepsize 44 35  proposed_rv, errorest, reference_state = self.step(t, t_new, current_rv) 45 -  internal_norm = steprule.errorest_to_norm( 36 +  internal_norm = self.steprule.errorest_to_norm( 46 37  errorest=errorest, 47 38  reference_state=reference_state, 48 39  ) 49 -  if steprule.is_accepted(internal_norm): 40 +  if self.steprule.is_accepted(internal_norm): 50 41  self.num_steps += 1 51 42  self.method_callback( 52 43  time=t_new, current_guess=proposed_rv, current_error=errorest
@@ -56,13 +47,13 @@
 56 47 57 48  yield t, current_rv 58 49 59 -  suggested_stepsize = steprule.suggest( 50 +  suggested_stepsize = self.steprule.suggest( 60 51  stepsize, internal_norm, localconvrate=self.order + 1 61 52  ) 62 53  stepsize = min(suggested_stepsize, self.ivp.tmax - t) 63 54 64 55  @abstractmethod 65 -  def initialise(self): 56 +  def initialise(self, ivp): 66 57  """Returns t0 and y0 (for the solver, which might be different to ivp.y0)""" 67 58  raise NotImplementedError 68 59
 1 coverage:  2  precision: 2  3  status:  4  project:  5  default:  6  target: auto  7  threshold: 1%  8  patch:  9  default:  10  target: 90%  11  threshold: 1%  12 13 comment:  14  layout: "reach, diff, files"  15  behavior: default  16  require_changes: true