See also on Google Scholar
2025
Christoph Jürgen Hemmer; Daniel Durstewitz
True Zero-Shot Inference of Dynamical Systems Preserving Long-Term Statistics Proceedings Article
In: The Thirty-ninth Annual Conference on Neural Information Processing Systems, 2025.
@inproceedings{hemmer2025true,
title = {True Zero-Shot Inference of Dynamical Systems Preserving Long-Term Statistics},
author = {Christoph Jürgen Hemmer and Daniel Durstewitz},
url = {https://openreview.net/forum?id=RE97LT26w8},
year = {2025},
date = {2025-03-01},
urldate = {2025-02-01},
booktitle = {The Thirty-ninth Annual Conference on Neural Information Processing Systems},
abstract = {Complex, temporally evolving phenomena, from climate to brain activity, are governed by dynamical systems (DS). DS reconstruction (DSR) seeks to infer generative surrogate models of these from observed data, reproducing their long-term behavior. Existing DSR approaches require purpose-training for any new system observed, lacking the zero-shot and in-context inference capabilities known from LLMs. Here we introduce DynaMix, a novel multivariate ALRNN-based mixture-of-experts architecture pre-trained for DSR, the first DSR model able to generalize zero-shot to out-of-domain DS. Just from a provided context signal, without any re-training, DynaMix faithfully forecasts the long-term evolution of novel DS where existing time series (TS) foundation models, like Chronos, fail -- at a fraction of the number of parameters (0.1%) and orders of magnitude faster inference times. DynaMix outperforms TS foundation models in terms of long-term statistics, and often also short-term forecasts, even on real-world time series, like traffic or weather data, typically used for training and evaluating TS models, but not at all part of DynaMix' training corpus. We illustrate some of the failure modes of TS models for DSR problems, and conclude that models built on DS principles may bear a huge potential also for advancing the TS prediction field. },
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lukas Eisenmann; Alena Brändle; Zahra Monfared; Daniel Durstewitz
Detecting Invariant Manifolds in ReLU-Based RNNs Journal Article
In: arXiv preprint arXiv:2510.03814, 2025.
@article{eisenmann2025detecting,
title = {Detecting Invariant Manifolds in ReLU-Based RNNs},
author = {Lukas Eisenmann and Alena Brändle and Zahra Monfared and Daniel Durstewitz},
year = {2025},
date = {2025-03-01},
urldate = {2025-02-01},
journal = {arXiv preprint arXiv:2510.03814},
abstract = {Recurrent Neural Networks (RNNs) have found widespread applications in machine learning for time series prediction and dynamical systems reconstruction, and experienced a recent renaissance with improved training algorithms and architectural designs. Understanding why and how trained RNNs produce their behavior is important for scientific and medical applications, and explainable AI more generally. An RNN's dynamical repertoire depends on the topological and geometrical properties of its state space. Stable and unstable manifolds of periodic points play a particularly important role: They dissect a dynamical system's state space into different basins of attraction, and their intersections lead to chaotic dynamics with fractal geometry. Here we introduce a novel algorithm for detecting these manifolds, with a focus on piecewise-linear RNNs (PLRNNs) employing rectified linear units (ReLUs) as their activation function. We demonstrate how the algorithm can be used to trace the boundaries between different basins of attraction, and hence to characterize multistability, a computationally important property. We further show its utility in finding so-called homoclinic points, the intersections between stable and unstable manifolds, and thus establish the existence of chaos in PLRNNs. Finally we show for an empirical example, electrophysiological recordings from a cortical neuron, how insights into the underlying dynamics could be gained through our method. },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Manuel Brenner; Elias Weber; Georgia Koppe; Daniel Durstewitz
Learning Interpretable Hierarchical Dynamical Systems Models from Time Series Data Proceedings Article
In: The Thirteenth International Conference on Learning Representations, 2025.
@inproceedings{brenner2025learning,
title = {Learning Interpretable Hierarchical Dynamical Systems Models from Time Series Data},
author = {Manuel Brenner and Elias Weber and Georgia Koppe and Daniel Durstewitz},
url = {https://openreview.net/forum?id=Vp2OAxMs2s},
year = {2025},
date = {2025-03-01},
urldate = {2025-02-01},
booktitle = {The Thirteenth International Conference on Learning Representations},
abstract = {In science, we are often interested in obtaining a generative model of the underlying system dynamics from observed time series. While powerful methods for dynamical systems reconstruction (DSR) exist when data come from a single domain, how to best integrate data from multiple dynamical regimes and leverage it for generalization is still an open question. This becomes particularly important when individual time series are short, and group-level information may help to fill in for gaps in single-domain data. At the same time, averaging is not an option in DSR, as it will wipe out crucial dynamical properties (e.g., limit cycles in one domain vs. chaos in another). Hence, a framework is needed that enables to efficiently harvest group-level (multi-domain) information while retaining all single-domain dynamical characteristics. Here we provide such a hierarchical approach and showcase it on popular DSR benchmarks, as well as on neuroscientific and medical time series. In addition to faithful reconstruction of all individual dynamical regimes, our unsupervised methodology discovers common low-dimensional feature spaces in which datasets with similar dynamics cluster. The features spanning these spaces were further dynamically highly interpretable, surprisingly in often linear relation to control parameters that govern the dynamics of the underlying system. Finally, we illustrate transfer learning and generalization to new parameter regimes.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Janik Fechtelpeter; Christian Rauschenberg; Christian Goetzl; Selina Hiller; Niklas Emonds; Silvia Krumm; Ulrich Reininghaus; Daniel Durstewitz; Georgia Koppe
Computational network models for forecasting and control of mental health trajectories in digital applications Journal Article
In: medRxiv, 2025.
@article{fechtelpeter2025computational,
title = {Computational network models for forecasting and control of mental health trajectories in digital applications},
author = {Janik Fechtelpeter and Christian Rauschenberg and Christian Goetzl and Selina Hiller and Niklas Emonds and Silvia Krumm and Ulrich Reininghaus and Daniel Durstewitz and Georgia Koppe},
year = {2025},
date = {2025-02-01},
urldate = {2025-01-02},
journal = {medRxiv},
publisher = {Cold Spring Harbor Laboratory Press},
abstract = {Ecological momentary assessments (EMA) have transformed mobile mental health by capturing real-time fluctuations in psychological states and behavior. While forecasting future states from EMA data is crucial for adaptive interventions, most current approaches to modeling the underlying psychological mechanisms rely on linear assumptions. These include common network based methods such as vector autoregression (VAR) or Kalman filtering, which assume fixed and proportional relationships among variables. However, a growing body of evidence suggests that psychological dynamics exhibit nonlinear properties raising concerns about the adequacy of linear models for both interpretation and prediction.
Here, we leverage three independent 40-day micro-randomized trials (N=145) to benchmark a spectrum of models—from naïve baselines and linear network models to autoregressive Transformers and nonlinear state-space models (SSMs) built on piecewise-linear recurrent neural networks (PLRNNs). PLRNNs provided the most accurate forecasts, including predictions of how individuals responded to interventions. Beyond superior forecasting, the PLRNN’s latent-network structure allowed us to simulate how changes in individual psychological states spread through the system. This revealed interpretable patterns of influence—highlighting central network nodes like sad or down as high-impact intervention targets based on their strong ripple effects. Critically, performance remained robust under real-time retraining constraints and varying data completeness, underscoring the practical viability of nonlinear SSMs in deployed mobile mental health systems. Our results establish PLRNN-based forecasting as a powerful, interpretable foundation for real-time, model-predictive control of digital mental health.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Here, we leverage three independent 40-day micro-randomized trials (N=145) to benchmark a spectrum of models—from naïve baselines and linear network models to autoregressive Transformers and nonlinear state-space models (SSMs) built on piecewise-linear recurrent neural networks (PLRNNs). PLRNNs provided the most accurate forecasts, including predictions of how individuals responded to interventions. Beyond superior forecasting, the PLRNN’s latent-network structure allowed us to simulate how changes in individual psychological states spread through the system. This revealed interpretable patterns of influence—highlighting central network nodes like sad or down as high-impact intervention targets based on their strong ripple effects. Critically, performance remained robust under real-time retraining constraints and varying data completeness, underscoring the practical viability of nonlinear SSMs in deployed mobile mental health systems. Our results establish PLRNN-based forecasting as a powerful, interpretable foundation for real-time, model-predictive control of digital mental health.
Daniel Durstewitz; Bruno Averbeck; Georgia Koppe
What Neuroscience Can Teach AI About Learning in Continuously Changing Environments Journal Article
In: arXiv preprint arXiv:2507.02103, 2025.
@article{durstewitz2025neuroscience,
title = {What Neuroscience Can Teach AI About Learning in Continuously Changing Environments},
author = {Daniel Durstewitz and Bruno Averbeck and Georgia Koppe},
year = {2025},
date = {2025-02-01},
urldate = {2025-01-01},
journal = {arXiv preprint arXiv:2507.02103},
abstract = {Modern AI models, such as large language models, are usually trained once on a huge corpus of data, potentially fine-tuned for a specific task, and then deployed with fixed parameters. Their training is costly, slow, and gradual, requiring billions of repetitions. In stark contrast, animals continuously adapt to the ever-changing contingencies in their environments. This is particularly important for social species, where behavioral policies and reward outcomes may frequently change in interaction with peers. The underlying computational processes are often marked by rapid shifts in an animal's behaviour and rather sudden transitions in neuronal population activity. Such computational capacities are of growing importance for AI systems operating in the real world, like those guiding robots or autonomous vehicles, or for agentic AI interacting with humans online. Can AI learn from neuroscience? This Perspective explores this question, integrating the literature on continual and in-context learning in AI with the neuroscience of learning on behavioral tasks with shifting rules, reward probabilities, or outcomes. We will outline an agenda for how specifically insights from neuroscience may inform current developments in AI in this area, and - vice versa - what neuroscience may learn from AI, contributing to the evolving field of NeuroAI. },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Selina Hiller; Christian Götzl; Christian Rauschenberg; Janik Fechtelpeter; Georgia Koppe; Eva Wierzba; Julia Sauter; Sina Dietrich; Daniel Durstewitz; Ulrich Reininghaus; others
Health-Promoting Effects and Everyday Experiences With a Mental Health App Using Ecological Momentary Assessments and AI-Based Ecological Momentary Interventions Among Young People: Qualitative Interview and Focus Group Study Journal Article
In: JMIR mHealth and uHealth, vol. 13, no. 1, pp. e65106, 2025.
@article{hiller2025health,
title = {Health-Promoting Effects and Everyday Experiences With a Mental Health App Using Ecological Momentary Assessments and AI-Based Ecological Momentary Interventions Among Young People: Qualitative Interview and Focus Group Study},
author = {Selina Hiller and Christian Götzl and Christian Rauschenberg and Janik Fechtelpeter and Georgia Koppe and Eva Wierzba and Julia Sauter and Sina Dietrich and Daniel Durstewitz and Ulrich Reininghaus and others},
year = {2025},
date = {2025-01-01},
journal = {JMIR mHealth and uHealth},
volume = {13},
number = {1},
pages = {e65106},
publisher = {JMIR Publications Inc., Toronto, Canada},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Florian Bähner; Tzvetan Popov; Nico Boehme; Selina Hermann; Tom Merten; Hélène Zingone; Georgia Koppe; Andreas Meyer-Lindenberg; Hazem Toutounji; Daniel Durstewitz
Abstract rule learning promotes cognitive flexibility in complex environments across species Journal Article
In: Nature Communications, vol. 16, no. 1, pp. 5396, 2025.
@article{bahner2025abstract,
title = {Abstract rule learning promotes cognitive flexibility in complex environments across species},
author = {Florian Bähner and Tzvetan Popov and Nico Boehme and Selina Hermann and Tom Merten and Hélène Zingone and Georgia Koppe and Andreas Meyer-Lindenberg and Hazem Toutounji and Daniel Durstewitz},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Nature Communications},
volume = {16},
number = {1},
pages = {5396},
publisher = {Nature Publishing Group UK London},
abstract = {Rapid learning in complex and changing environments is a hallmark of intelligent behavior. Humans achieve this in part through abstract concepts applicable to multiple, related situations. It is unclear, however, whether the computational mechanisms underlying rapid learning are unique to humans or also exist in other species. We combined behavioral, computational and electrophysiological analyses of a multidimensional rule-learning paradigm in male rats and in humans. We report that both species infer task rules by sequentially testing different hypotheses, rather than learning the correct action for all possible cue combinations. Neural substrates of hypothetical rules were detected in prefrontal network activity of both species. This species-conserved mechanism reduces task dimensionality and explains key experimental observations: sudden behavioral transitions and facilitated learning after prior experience. Our findings help to narrow the explanatory gap between human macroscopic and rodent microcircuit levels and provide a foundation for the translational investigation of impaired cognitive flexibility.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2024
Manuel Brenner; Florian Hess; Georgia Koppe; Daniel Durstewitz
Integrating Multimodal Data for Joint Generative Modeling of Complex Dynamics Proceedings Article
In: Forty-first International Conference on Machine Learning, 2024.
@inproceedings{brenner2024integrating,
title = {Integrating Multimodal Data for Joint Generative Modeling of Complex Dynamics},
author = {Manuel Brenner and Florian Hess and Georgia Koppe and Daniel Durstewitz},
url = {https://openreview.net/forum?id=b1iurBHDck},
year = {2024},
date = {2024-03-01},
booktitle = {Forty-first International Conference on Machine Learning},
abstract = {Many, if not most, systems of interest in science are naturally described as nonlinear dynamical systems. Empirically, we commonly access these systems through time series measurements. Often such time series may consist of discrete random variables rather than continuous measurements, or may be composed of measurements from multiple data modalities observed simultaneously. For instance, in neuroscience we may have behavioral labels in addition to spike counts and continuous physiological recordings. While by now there is a burgeoning literature on deep learning for dynamical systems reconstruction (DSR), multimodal data integration has hardly been considered in this context. Here we provide such an efficient and flexible algorithmic framework that rests on a multimodal variational autoencoder for generating a sparse teacher signal that guides training of a reconstruction model, exploiting recent advances in DSR training techniques. It enables to combine various sources of information for optimal reconstruction, even allows for reconstruction from symbolic data (class labels) alone, and connects different types of observations within a common latent dynamics space. In contrast to previous multimodal data integration techniques for scientific applications, our framework is fully generative, producing, after training, trajectories with the same geometrical and temporal structure as those of the ground truth system.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Christoph Hemmer; Manuel Brenner; Florian Hess; Daniel Durstewitz
Optimal Recurrent Network Topologies for Dynamical Systems Reconstruction Proceedings Article
In: Forty-first International Conference on Machine Learning, 2024.
@inproceedings{hemmer2024optimal,
title = {Optimal Recurrent Network Topologies for Dynamical Systems Reconstruction},
author = {Christoph Hemmer and Manuel Brenner and Florian Hess and Daniel Durstewitz},
url = {https://openreview.net/forum?id=HZyOz9VEg4},
year = {2024},
date = {2024-03-01},
booktitle = {Forty-first International Conference on Machine Learning},
abstract = {In dynamical systems reconstruction (DSR) we seek to infer from time series measurements a generative model of the underlying dynamical process. This is a prime objective in any scientific discipline, where we are particularly interested in parsimonious models with a low parameter load. A common strategy here is parameter pruning, removing all parameters with small weights. However, here we find this strategy does not work for DSR, where even low magnitude parameters can contribute considerably to the system dynamics. On the other hand, it is well known that many natural systems which generate complex dynamics, like the brain or ecological networks, have a sparse topology with comparatively few links. Inspired by this, we show that geometric pruning, where in contrast to magnitude-based pruning weights with a low contribution to an attractor's geometrical structure are removed, indeed manages to reduce parameter load substantially without significantly hampering DSR quality. We further find that the networks resulting from geometric pruning have a specific type of topology, and that this topology, and not the magnitude of weights, is what is most crucial to performance. We provide an algorithm that automatically generates such topologies which can be used as priors for generative modeling of dynamical systems by RNNs, and compare it to other well studied topologies like small-world or scale-free networks.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Eric Volkmann; Alena Brändle; Daniel Durstewitz; Georgia Koppe
A scalable generative model for dynamical system reconstruction from neuroimaging data Proceedings Article
In: The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024.
@inproceedings{volkmann2024a,
title = {A scalable generative model for dynamical system reconstruction from neuroimaging data},
author = {Eric Volkmann and Alena Brändle and Daniel Durstewitz and Georgia Koppe},
url = {https://openreview.net/forum?id=exATQD4HSv},
year = {2024},
date = {2024-03-01},
booktitle = {The Thirty-eighth Annual Conference on Neural Information Processing Systems},
abstract = {Data-driven inference of the generative dynamics underlying a set of observed time series is of growing interest in machine learning and the natural sciences. In neuroscience, such methods promise to alleviate the need to handcraft models based on biophysical principles and allow to automatize the inference of inter-individual differences in brain dynamics. Recent breakthroughs in training techniques for state space models (SSMs) specifically geared toward dynamical systems (DS) reconstruction (DSR) enable to recover the underlying system including its geometrical (attractor) and long-term statistical invariants from even short time series. These techniques are based on control-theoretic ideas, like modern variants of teacher forcing (TF), to ensure stable loss gradient propagation while training. However, as it currently stands, these techniques are not directly applicable to data modalities where current observations depend on an entire history of previous states due to a signal’s filtering properties, as common in neuroscience (and physiology more generally). Prominent examples are the blood oxygenation level dependent (BOLD) signal in functional magnetic resonance imaging (fMRI) or Ca imaging data. Such types of signals render the SSM's decoder model non-invertible, a requirement for previous TF-based methods. Here, exploiting the recent success of control techniques for training SSMs, we propose a novel algorithm that solves this problem and scales exceptionally well with model dimensionality and filter length. We demonstrate its efficiency in reconstructing dynamical systems, including their state space geometry and long-term temporal properties, from just short BOLD time series.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Manuel Brenner; Christoph Hemmer; Zahra Monfared; Daniel Durstewitz
Almost-Linear RNNs Yield Highly Interpretable Symbolic Codes in Dynamical Systems Reconstruction Proceedings Article
In: The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024.
@inproceedings{brenner2024almostlinear,
title = {Almost-Linear RNNs Yield Highly Interpretable Symbolic Codes in Dynamical Systems Reconstruction},
author = {Manuel Brenner and Christoph Hemmer and Zahra Monfared and Daniel Durstewitz},
url = {https://openreview.net/forum?id=sEpSxteEKJ},
year = {2024},
date = {2024-03-01},
booktitle = {The Thirty-eighth Annual Conference on Neural Information Processing Systems},
abstract = {Dynamical systems theory (DST) is fundamental for many areas of science and engineering. It can provide deep insights into the behavior of systems evolving in time, as typically described by differential or recursive equations. A common approach to facilitate mathematical tractability and interpretability of DS models involves decomposing nonlinear DS into multiple linear DS combined by switching manifolds, i.e. piecewise linear (PWL) systems. PWL models are popular in engineering and a frequent choice in mathematics for analyzing the topological properties of DS. However, hand-crafting such models is tedious and only possible for very low-dimensional scenarios, while inferring them from data usually gives rise to unnecessarily complex representations with very many linear subregions. Here we introduce Almost-Linear Recurrent Neural Networks (AL-RNNs) which automatically and robustly produce most parsimonious PWL representations of DS from time series data, using as few PWL nonlinearities as possible. AL-RNNs can be efficiently trained with any SOTA algorithm for dynamical systems reconstruction (DSR), and naturally give rise to a symbolic encoding of the underlying DS that provably preserves important topological properties. We show that for the Lorenz and Rössler systems, AL-RNNs derive, in a purely data-driven way, the known topologically minimal PWL representations of the corresponding chaotic attractors. We further illustrate on two challenging empirical datasets that interpretable symbolic encodings of the dynamics can be achieved, tremendously facilitating mathematical and computational analysis of the underlying systems.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Niclas Göring; Florian Hess; Manuel Brenner; Zahra Monfared; Daniel Durstewitz
Out-of-Domain Generalization in Dynamical Systems Reconstruction Proceedings Article
In: Forty-first International Conference on Machine Learning, 2024.
@inproceedings{göring2024outofdomain,
title = {Out-of-Domain Generalization in Dynamical Systems Reconstruction},
author = {Niclas Göring and Florian Hess and Manuel Brenner and Zahra Monfared and Daniel Durstewitz},
url = {https://openreview.net/forum?id=xTYIAD2NND},
year = {2024},
date = {2024-03-01},
booktitle = {Forty-first International Conference on Machine Learning},
abstract = {In science we are interested in finding the governing equations, the dynamical rules, underlying empirical phenomena. While traditionally scientific models are derived through cycles of human insight and experimentation, recently deep learning (DL) techniques have been advanced to reconstruct dynamical systems (DS) directly from time series data. State-of-the-art dynamical systems reconstruction (DSR) methods show promise in capturing invariant and long-term properties of observed DS, but their ability to generalize to unobserved domains remains an open challenge. Yet, this is a crucial property we would expect from any viable scientific theory. In this work, we provide a formal framework that addresses generalization in DSR. We explain why and how out-of-domain (OOD) generalization (OODG) in DSR profoundly differs from OODG considered elsewhere in machine learning. We introduce mathematical notions based on topological concepts and ergodic theory to formalize the idea of learnability of a DSR model. We formally prove that black-box DL techniques, without adequate structural priors, generally will not be able to learn a generalizing DSR model. We also show this empirically, considering major classes of DSR algorithms proposed so far, and illustrate where and why they fail to generalize across the whole phase space. Our study provides the first comprehensive mathematical treatment of OODG in DSR, and gives a deeper conceptual understanding of where the fundamental problems in OODG lie and how they could possibly be addressed in practice.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Han Cao; Augusto Anguita; Charline Warembourg; Xavier Escriba-Montagut; Martine Vrijheid; Juan R Gonzalez; Tim Cadman; Verena Schneider-Lindner; Daniel Durstewitz; Xavier Basagana; others
dsLassoCov: a federated machine learning approach incorporating covariate control Journal Article
In: arXiv preprint arXiv:2412.07991, 2024.
@article{cao2024dslassocov,
title = {dsLassoCov: a federated machine learning approach incorporating covariate control},
author = {Han Cao and Augusto Anguita and Charline Warembourg and Xavier Escriba-Montagut and Martine Vrijheid and Juan R Gonzalez and Tim Cadman and Verena Schneider-Lindner and Daniel Durstewitz and Xavier Basagana and others},
url = {https://arxiv.org/abs/2412.07991},
year = {2024},
date = {2024-02-01},
journal = {arXiv preprint arXiv:2412.07991},
abstract = {Machine learning has been widely adopted in biomedical research, fueled by the increasing availability of data. However, integrating datasets across institutions is challenging due to legal restrictions and data governance complexities. Federated learning allows the direct, privacy preserving training of machine learning models using geographically distributed datasets, but faces the challenge of how to appropriately control for covariate effects. The naive implementation of conventional covariate control methods in federated learning scenarios is often impractical due to the substantial communication costs, particularly with high-dimensional data. To address this issue, we introduce dsLassoCov, a machine learning approach designed to control for covariate effects and allow an efficient training in federated learning. In biomedical analysis, this allow the biomarker selection against the confounding effects. Using simulated data, we demonstrate that dsLassoCov can efficiently and effectively manage confounding effects during model training. In our real-world data analysis, we replicated a large-scale Exposome analysis using data from six geographically distinct databases, achieving results consistent with previous studies. By resolving the challenge of covariate control, our proposed approach can accelerate the application of federated learning in large-scale biomedical studies.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Janik Fechtelpeter; Christian Rauschenberg; Hamidreza Jalalabadi; Benjamin Boecking; Therese Amelsvoort; Ulrich Reininghaus; Daniel Durstewitz; Georgia Koppe
A control theoretic approach to evaluate and inform ecological momentary interventions Journal Article
In: International Journal of Methods in Psychiatric Research, vol. 33, no. 4, pp. e70001, 2024.
@article{fechtelpeter2024control,
title = {A control theoretic approach to evaluate and inform ecological momentary interventions},
author = {Janik Fechtelpeter and Christian Rauschenberg and Hamidreza Jalalabadi and Benjamin Boecking and Therese Amelsvoort and Ulrich Reininghaus and Daniel Durstewitz and Georgia Koppe},
url = {https://onlinelibrary.wiley.com/doi/full/10.1002/mpr.70001},
year = {2024},
date = {2024-02-01},
journal = {International Journal of Methods in Psychiatric Research},
volume = {33},
number = {4},
pages = {e70001},
publisher = {Wiley Online Library},
abstract = {Objectives
Ecological momentary interventions (EMI) are digital mobile health interventions administered in an individual's daily life to improve mental health by tailoring intervention components to person and context. Experience sampling via ecological momentary assessments (EMA) furthermore provides dynamic contextual information on an individual's mental health state. We propose a personalized data-driven generic framework to select and evaluate EMI based on EMA.
Methods
We analyze EMA/EMI time-series from 10 individuals, published in a previous study. The EMA consist of multivariate psychological Likert scales. The EMI are mental health trainings presented on a smartphone. We model EMA as linear dynamical systems (DS) and EMI as perturbations. Using concepts from network control theory, we propose and evaluate three personalized data-driven intervention delivery strategies. Moreover, we study putative change mechanisms in response to interventions.
Results
We identify promising intervention delivery strategies that outperform empirical strategies in simulation. We pinpoint interventions with a high positive impact on the network, at low energetic costs. Although mechanisms differ between individuals - demanding personalized solutions - the proposed strategies are generic and applicable to various real-world settings.
Conclusions
Combined with knowledge from mental health experts, DS and control algorithms may provide powerful data-driven and personalized intervention delivery and evaluation strategies.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ecological momentary interventions (EMI) are digital mobile health interventions administered in an individual's daily life to improve mental health by tailoring intervention components to person and context. Experience sampling via ecological momentary assessments (EMA) furthermore provides dynamic contextual information on an individual's mental health state. We propose a personalized data-driven generic framework to select and evaluate EMI based on EMA.
Methods
We analyze EMA/EMI time-series from 10 individuals, published in a previous study. The EMA consist of multivariate psychological Likert scales. The EMI are mental health trainings presented on a smartphone. We model EMA as linear dynamical systems (DS) and EMI as perturbations. Using concepts from network control theory, we propose and evaluate three personalized data-driven intervention delivery strategies. Moreover, we study putative change mechanisms in response to interventions.
Results
We identify promising intervention delivery strategies that outperform empirical strategies in simulation. We pinpoint interventions with a high positive impact on the network, at low energetic costs. Although mechanisms differ between individuals - demanding personalized solutions - the proposed strategies are generic and applicable to various real-world settings.
Conclusions
Combined with knowledge from mental health experts, DS and control algorithms may provide powerful data-driven and personalized intervention delivery and evaluation strategies.
Manuel Brenner; Daniel Durstewitz
Critical alterations in the brain and psyche Journal Article
In: Der Nervenarzt, vol. 95, no. 11, pp. 1013–1023, 2024.
@article{brenner2024critical,
title = {Critical alterations in the brain and psyche},
author = {Manuel Brenner and Daniel Durstewitz},
url = {https://europepmc.org/article/med/39438289},
year = {2024},
date = {2024-02-01},
journal = {Der Nervenarzt},
volume = {95},
number = {11},
pages = {1013–1023},
abstract = {Critical alterations in the brain and psyche are often triggered by critical points and feedback effects in closely networked systems. Such crises can occur in the form of neurological disorders, such as epilepsy or mental disorders, such as bipolar disorder and depression. A central mechanism is the excitation-inhibition (EI) balance in the brain, which is responsible for an optimal processing of information. Disruptions in this balance can lead to pathological conditions. The concept of attractors, which represent the stable conditions in neuronal networks, helps to explain the consolidation of memories, behavioral patterns and mental states. These attractor states can be triggered by external stimuli and may become anchored in pathological contexts. Advances in measurement technologies and methods of artificial intelligence enable a deeper analysis of neuronal dynamics and open up new pathways for targeted therapeutic interventions for the treatment of mental disorders.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Han Cao; Sivanesan Rajan; Bianka Hahn; Manuel Brenner; Florian Hess; Holger A Lindner; Daniel Durstewitz; Georgia Koppe; Emanuel Schwarz; Verena Schneider-Lindner
MTLComb: Multi-task learning combing regression and classification tasks with biomarker identification–an application to sepsis Journal Article
In: Journal of Critical Care, vol. 81, pp. 154548, 2024.
@article{cao2024mtlcomb,
title = {MTLComb: Multi-task learning combing regression and classification tasks with biomarker identification–an application to sepsis},
author = {Han Cao and Sivanesan Rajan and Bianka Hahn and Manuel Brenner and Florian Hess and Holger A Lindner and Daniel Durstewitz and Georgia Koppe and Emanuel Schwarz and Verena Schneider-Lindner},
url = {https://www.sciencedirect.com/science/article/abs/pii/S0883944124000352},
year = {2024},
date = {2024-02-01},
journal = {Journal of Critical Care},
volume = {81},
pages = {154548},
publisher = {Elsevier},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Jiarui Chen; Anastasia Benedyk; Alexander Moldavski; Heike Tost; Andreas Meyer-Lindenberg; Urs Braun; Daniel Durstewitz; Georgia Koppe; Emanuel Schwarz
Quantifying brain-functional dynamics using deep dynamical systems: Technical considerations Journal Article
In: Iscience, vol. 27, no. 8, 2024.
@article{chen2024quantifying,
title = {Quantifying brain-functional dynamics using deep dynamical systems: Technical considerations},
author = {Jiarui Chen and Anastasia Benedyk and Alexander Moldavski and Heike Tost and Andreas Meyer-Lindenberg and Urs Braun and Daniel Durstewitz and Georgia Koppe and Emanuel Schwarz},
url = {https://www.cell.com/iscience/fulltext/S2589-0042(24)01770-X},
year = {2024},
date = {2024-01-01},
journal = {Iscience},
volume = {27},
number = {8},
publisher = {Elsevier},
abstract = {Summary
Both mental health and mental illness unfold in complex and unpredictable ways. Novel artificial intelligence approaches from the area of dynamical systems reconstruction can characterize such dynamics and help understand the underlying brain mechanisms, which can also be used as potential biomarkers. However, applying deep learning to model dynamical systems at the individual level must overcome numerous computational challenges to be reproducible and clinically useful. In this study, we performed an extensive analysis of these challenges using generative modeling of brain dynamics from fMRI data as an example and demonstrated their impact on classifying patients with schizophrenia and major depression. This study highlights the tendency of deep learning models to identify functionally unique solutions during parameter optimization, which severely impacts the reproducibility of downstream predictions. We hope this study guides the future development of individual-level generative models and similar machine learning approaches aimed at identifying reproducible biomarkers of mental illness.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Both mental health and mental illness unfold in complex and unpredictable ways. Novel artificial intelligence approaches from the area of dynamical systems reconstruction can characterize such dynamics and help understand the underlying brain mechanisms, which can also be used as potential biomarkers. However, applying deep learning to model dynamical systems at the individual level must overcome numerous computational challenges to be reproducible and clinically useful. In this study, we performed an extensive analysis of these challenges using generative modeling of brain dynamics from fMRI data as an example and demonstrated their impact on classifying patients with schizophrenia and major depression. This study highlights the tendency of deep learning models to identify functionally unique solutions during parameter optimization, which severely impacts the reproducibility of downstream predictions. We hope this study guides the future development of individual-level generative models and similar machine learning approaches aimed at identifying reproducible biomarkers of mental illness.
Inez Myin-Germeys; Anita Schick; Thomas Ganslandt; Michal Hajdúk; Anton Heretik; Ine Van Hoyweghen; Glenn Kiekens; Georgia Koppe; Luca Marelli; Iveta Nagyova; others
The experience sampling methodology as a digital clinical tool for more person-centered mental health care: an implementation research agenda Journal Article
In: Psychological Medicine, vol. 54, no. 11, pp. 2785–2793, 2024.
@article{myin2024experience,
title = {The experience sampling methodology as a digital clinical tool for more person-centered mental health care: an implementation research agenda},
author = {Inez Myin-Germeys and Anita Schick and Thomas Ganslandt and Michal Hajdúk and Anton Heretik and Ine Van Hoyweghen and Glenn Kiekens and Georgia Koppe and Luca Marelli and Iveta Nagyova and others},
url = {https://www.cambridge.org/core/journals/psychological-medicine/article/experience-sampling-methodology-as-a-digital-clinical-tool-for-more-personcentered-mental-health-care-an-implementation-research-agenda/4DFE205393EFC90DBF095AE32D81B4E4},
year = {2024},
date = {2024-01-01},
journal = {Psychological Medicine},
volume = {54},
number = {11},
pages = {2785–2793},
publisher = {Cambridge University Press},
abstract = {This position paper by the international IMMERSE consortium reviews the evidence of a digital mental health solution based on Experience Sampling Methodology (ESM) for advancing person-centered mental health care and outlines a research agenda for implementing innovative digital mental health tools into routine clinical practice. ESM is a structured diary technique recording real-time self-report data about the current mental state using a mobile application. We will review how ESM may contribute to (1) service user engagement and empowerment, (2) self-management and recovery, (3) goal direction in clinical assessment and management of care, and (4) shared decision-making. However, despite the evidence demonstrating the value of ESM-based approaches in enhancing person-centered mental health care, it is hardly integrated into clinical practice. Therefore, we propose a global research agenda for implementing ESM in routine mental health care addressing six key challenges: (1) the motivation and ability of service users to adhere to the ESM monitoring, reporting and feedback, (2) the motivation and competence of clinicians in routine healthcare delivery settings to integrate ESM in the workflow, (3) the technical requirements and (4) governance requirements for integrating these data in the clinical workflow, (5) the financial and competence related resources related to IT-infrastructure and clinician time, and (6) implementation studies that build the evidence-base. While focused on ESM, the research agenda holds broader implications for implementing digital innovations in mental health. This paper calls for a shift in focus from developing new digital interventions to overcoming implementation barriers, essential for achieving a true transformation toward person-centered care in mental health.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rainer Spanagel; Patrick Bach; Tobias Banaschewski; Anne Beck; Felix Bermpohl; Rick E Bernardi; Christian Beste; Lorenz Deserno; Daniel Durstewitz; Ulrich Ebner-Priemer; others
The ReCoDe addiction research consortium: Losing and regaining control over drug intake—Findings and future perspectives Journal Article
In: Addiction Biology, vol. 29, no. 7, pp. e13419, 2024.
@article{spanagel2024recode,
title = {The ReCoDe addiction research consortium: Losing and regaining control over drug intake—Findings and future perspectives},
author = {Rainer Spanagel and Patrick Bach and Tobias Banaschewski and Anne Beck and Felix Bermpohl and Rick E Bernardi and Christian Beste and Lorenz Deserno and Daniel Durstewitz and Ulrich Ebner-Priemer and others},
url = {https://onlinelibrary.wiley.com/doi/full/10.1111/adb.13419},
year = {2024},
date = {2024-01-01},
journal = {Addiction Biology},
volume = {29},
number = {7},
pages = {e13419},
publisher = {Wiley Online Library},
abstract = {Substance use disorders (SUDs) are seen as a continuum ranging from goal-directed and hedonic drug use to loss of control over drug intake with aversive consequences for mental and physical health and social functioning. The main goals of our interdisciplinary German collaborative research centre on Losing and Regaining Control over Drug Intake (ReCoDe) are (i) to study triggers (drug cues, stressors, drug priming) and modifying factors (age, gender, physical activity, cognitive functions, childhood adversity, social factors, such as loneliness and social contact/interaction) that longitudinally modulate the trajectories of losing and regaining control over drug consumption under real-life conditions. (ii) To study underlying behavioural, cognitive and neurobiological mechanisms of disease trajectories and drug-related behaviours and (iii) to provide non-invasive mechanism-based interventions. These goals are achieved by: (A) using innovative mHealth (mobile health) tools to longitudinally monitor the effects of triggers and modifying factors on drug consumption patterns in real life in a cohort of 900 patients with alcohol use disorder. This approach will be complemented by animal models of addiction with 24/7 automated behavioural monitoring across an entire disease trajectory; i.e. from a naïve state to a drug-taking state to an addiction or resilience-like state. (B) The identification and, if applicable, computational modelling of key molecular, neurobiological and psychological mechanisms (e.g., reduced cognitive flexibility) mediating the effects of such triggers and modifying factors on disease trajectories. (C) Developing and testing non-invasive interventions (e.g., Just-In-Time-Adaptive-Interventions (JITAIs), various non-invasive brain stimulations (NIBS), individualized physical activity) that specifically target the underlying mechanisms for regaining control over drug intake. Here, we will report on the most important results of the first funding period and outline our future research strategy.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ulrich Reininghaus; Matthias Schwannauer; Islay Barne; Joanne R Beames; Rafaël A Bonnier; Manuel Brenner; Dagmar Breznoščáková; Daniel Dančík; Manuela De Allegri; Simona Di Folco; Daniel Durstewitz; others
In: BMC psychiatry, vol. 24, no. 1, pp. 465, 2024.
@article{reininghaus2024strategies,
title = {Strategies, processes, outcomes, and costs of implementing experience sampling-based monitoring in routine mental health care in four European countries: study protocol for the IMMERSE effectiveness-implementation study},
author = {Ulrich Reininghaus and Matthias Schwannauer and Islay Barne and Joanne R Beames and Rafaël A Bonnier and Manuel Brenner and Dagmar Breznoščáková and Daniel Dančík and Manuela De Allegri and Simona Di Folco and Daniel Durstewitz and others},
url = {https://link.springer.com/article/10.1186/s12888-024-05839-4},
year = {2024},
date = {2024-01-01},
journal = {BMC psychiatry},
volume = {24},
number = {1},
pages = {465},
publisher = {Springer},
abstract = {Background
Recent years have seen a growing interest in the use of digital tools for delivering person-centred mental health care. Experience Sampling Methodology (ESM), a structured diary technique for capturing moment-to-moment variation in experience and behaviour in service users’ daily life, reflects a particularly promising avenue for implementing a person-centred approach. While there is evidence on the effectiveness of ESM-based monitoring, uptake in routine mental health care remains limited. The overarching aim of this hybrid effectiveness-implementation study is to investigate, in detail, reach, effectiveness, adoption, implementation, and maintenance as well as contextual factors, processes, and costs of implementing ESM-based monitoring, reporting, and feedback into routine mental health care in four European countries (i.e., Belgium, Germany, Scotland, Slovakia).
Methods
In this hybrid effectiveness-implementation study, a parallel-group, assessor-blind, multi-centre cluster randomized controlled trial (cRCT) will be conducted, combined with a process and economic evaluation. In the cRCT, 24 clinical units (as the cluster and unit of randomization) at eight sites in four European countries will be randomly allocated using an unbalanced 2:1 ratio to one of two conditions: (a) the experimental condition, in which participants receive a Digital Mobile Mental Health intervention (DMMH) and other implementation strategies in addition to treatment as usual (TAU) or (b) the control condition, in which service users are provided with TAU. Outcome data in service users and clinicians will be collected at four time points: at baseline (t0), 2-month post-baseline (t1), 6-month post-baseline (t2), and 12-month post-baseline (t3). The primary outcome will be patient-reported service engagement assessed with the service attachment questionnaire at 2-month post-baseline. The process and economic evaluation will provide in-depth insights into in-vivo context-mechanism-outcome configurations and economic costs of the DMMH and other implementation strategies in routine care, respectively.
Discussion
If this trial provides evidence on reach, effectiveness, adoption, implementation and maintenance of implementing ESM-based monitoring, reporting, and feedback, it will form the basis for establishing its public health impact and has significant potential to bridge the research-to-practice gap and contribute to swifter ecological translation of digital innovations to real-world delivery in routine mental health care.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Recent years have seen a growing interest in the use of digital tools for delivering person-centred mental health care. Experience Sampling Methodology (ESM), a structured diary technique for capturing moment-to-moment variation in experience and behaviour in service users’ daily life, reflects a particularly promising avenue for implementing a person-centred approach. While there is evidence on the effectiveness of ESM-based monitoring, uptake in routine mental health care remains limited. The overarching aim of this hybrid effectiveness-implementation study is to investigate, in detail, reach, effectiveness, adoption, implementation, and maintenance as well as contextual factors, processes, and costs of implementing ESM-based monitoring, reporting, and feedback into routine mental health care in four European countries (i.e., Belgium, Germany, Scotland, Slovakia).
Methods
In this hybrid effectiveness-implementation study, a parallel-group, assessor-blind, multi-centre cluster randomized controlled trial (cRCT) will be conducted, combined with a process and economic evaluation. In the cRCT, 24 clinical units (as the cluster and unit of randomization) at eight sites in four European countries will be randomly allocated using an unbalanced 2:1 ratio to one of two conditions: (a) the experimental condition, in which participants receive a Digital Mobile Mental Health intervention (DMMH) and other implementation strategies in addition to treatment as usual (TAU) or (b) the control condition, in which service users are provided with TAU. Outcome data in service users and clinicians will be collected at four time points: at baseline (t0), 2-month post-baseline (t1), 6-month post-baseline (t2), and 12-month post-baseline (t3). The primary outcome will be patient-reported service engagement assessed with the service attachment questionnaire at 2-month post-baseline. The process and economic evaluation will provide in-depth insights into in-vivo context-mechanism-outcome configurations and economic costs of the DMMH and other implementation strategies in routine care, respectively.
Discussion
If this trial provides evidence on reach, effectiveness, adoption, implementation and maintenance of implementing ESM-based monitoring, reporting, and feedback, it will form the basis for establishing its public health impact and has significant potential to bridge the research-to-practice gap and contribute to swifter ecological translation of digital innovations to real-world delivery in routine mental health care.
2023
Lukas Eisenmann; Zahra Monfared; Niclas Alexander Göring; Daniel Durstewitz
Bifurcations and loss jumps in RNN training Proceedings Article
In: NeurIPS 2023, 2023.
@inproceedings{Eisenmann2023,
title = {Bifurcations and loss jumps in RNN training},
author = {Lukas Eisenmann and Zahra Monfared and Niclas Alexander Göring and Daniel Durstewitz},
year = {2023},
date = {2023-03-01},
booktitle = {NeurIPS 2023},
abstract = {Recurrent neural networks (RNNs) are popular machine learning tools for modeling
and forecasting sequential data and for inferring dynamical systems (DS) from
observed time series. Concepts from DS theory (DST) have variously been used
to further our understanding of both, how trained RNNs solve complex tasks, and
the training process itself. Bifurcations are particularly important phenomena in
DS, including RNNs, that refer to topological (qualitative) changes in a system’s
dynamical behavior as one or more of its parameters are varied. Knowing the
bifurcation structure of an RNN will thus allow to deduce many of its computa-
tional and dynamical properties, like its sensitivity to parameter variations or its
behavior during training. In particular, bifurcations may account for sudden loss
jumps observed in RNN training that could severely impede the training process.
Here we first mathematically prove for a particular class of ReLU-based RNNs
that certain bifurcations are indeed associated with loss gradients tending toward
infinity or zero. We then introduce a novel heuristic algorithm for detecting all
fixed points and k-cycles in ReLU-based RNNs and their existence and stability
regions, hence bifurcation manifolds in parameter space. In contrast to previous
numerical algorithms for finding fixed points and common continuation methods,
our algorithm provides exact results and returns fixed points and cycles up to high
orders with surprisingly good scaling behavior. We exemplify the algorithm on
the analysis of the training process of RNNs, and find that the recently introduced
technique of generalized teacher forcing completely avoids certain types of bifurca-
tions in training. Thus, besides facilitating the DST analysis of trained RNNs, our
algorithm provides a powerful instrument for analyzing the training process itself.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
and forecasting sequential data and for inferring dynamical systems (DS) from
observed time series. Concepts from DS theory (DST) have variously been used
to further our understanding of both, how trained RNNs solve complex tasks, and
the training process itself. Bifurcations are particularly important phenomena in
DS, including RNNs, that refer to topological (qualitative) changes in a system’s
dynamical behavior as one or more of its parameters are varied. Knowing the
bifurcation structure of an RNN will thus allow to deduce many of its computa-
tional and dynamical properties, like its sensitivity to parameter variations or its
behavior during training. In particular, bifurcations may account for sudden loss
jumps observed in RNN training that could severely impede the training process.
Here we first mathematically prove for a particular class of ReLU-based RNNs
that certain bifurcations are indeed associated with loss gradients tending toward
infinity or zero. We then introduce a novel heuristic algorithm for detecting all
fixed points and k-cycles in ReLU-based RNNs and their existence and stability
regions, hence bifurcation manifolds in parameter space. In contrast to previous
numerical algorithms for finding fixed points and common continuation methods,
our algorithm provides exact results and returns fixed points and cycles up to high
orders with surprisingly good scaling behavior. We exemplify the algorithm on
the analysis of the training process of RNNs, and find that the recently introduced
technique of generalized teacher forcing completely avoids certain types of bifurca-
tions in training. Thus, besides facilitating the DST analysis of trained RNNs, our
algorithm provides a powerful instrument for analyzing the training process itself.
Florian Hess; Zahra Monfared; Manuel Brenner; Daniel Durstewitz
Generalized Teacher Forcing for Learning Chaotic Dynamics Proceedings Article
In: Proceedings of the 40th International Conference on Machine Learning, PMLR 202:13017-13049, 2023., 2023.
@inproceedings{Hess2023,
title = {Generalized Teacher Forcing for Learning Chaotic Dynamics},
author = {Florian Hess and Zahra Monfared and Manuel Brenner and Daniel Durstewitz},
url = {https://proceedings.mlr.press/v202/hess23a.html},
year = {2023},
date = {2023-03-01},
booktitle = {Proceedings of the 40th International Conference on Machine Learning, PMLR 202:13017-13049, 2023.},
journal = {Proceedings of Machine Learning Research, ICML 2023},
abstract = {Chaotic dynamical systems (DS) are ubiquitous in nature and society. Often we are interested in reconstructing such systems from observed time series for prediction or mechanistic insight, where by reconstruction we mean learning geometrical and invariant temporal properties of the system in question. However, training reconstruction algorithms like recurrent neural networks (RNNs) on such systems by gradient-descent based techniques faces severe challenges. This is mainly due to the exploding gradients caused by the exponential divergence of trajectories in chaotic systems. Moreover, for (scientific) interpretability we wish to have as low dimensional reconstructions as possible, preferably in a model which is mathematically tractable. Here we report that a surprisingly simple modification of teacher forcing leads to provably strictly all-time bounded gradients in training on chaotic systems, while still learning to faithfully represent their dynamics. Furthermore, we observed that a simple architectural rearrangement of a tractable RNN design, piecewise-linear RNNs (PLRNNs), enables to reduce the reconstruction dimension to at most that of the observed system (or less).
We show on several DS that with these amendments we can reconstruct DS better than current SOTA algorithms, in much lower dimensions. Performance differences were particularly compelling on real world data with which most other methods severely struggled. This work thus led to a simple yet powerful DS reconstruction algorithm which is highly interpretable at the same time.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
We show on several DS that with these amendments we can reconstruct DS better than current SOTA algorithms, in much lower dimensions. Performance differences were particularly compelling on real world data with which most other methods severely struggled. This work thus led to a simple yet powerful DS reconstruction algorithm which is highly interpretable at the same time.
Daniel Durstewitz; Georgia Koppe; Max Ingo Thurm
Reconstructing Computational Dynamics from Neural Measurements with Recurrent Neural Networks Journal Article
In: Nature Reviews Neuroscience, 2023.
@article{Durstewitz2023,
title = {Reconstructing Computational Dynamics from Neural Measurements with Recurrent Neural Networks},
author = {Daniel Durstewitz and Georgia Koppe and Max Ingo Thurm},
url = {https://www.nature.com/articles/s41583-023-00740-7},
doi = {https://doi.org/10.1038/s41583-023-00740-7},
year = {2023},
date = {2023-03-01},
journal = {Nature Reviews Neuroscience},
abstract = {Computational models in neuroscience usually take the form of systems of differential equations. The behaviour of such systems is the subject of dynamical systems theory. Dynamical systems theory provides a powerful mathematical toolbox for analysing neurobiological processes and has been a mainstay of computational neuroscience for decades. Recently, recurrent neural networks (RNNs) have become a popular machine learning tool for studying the non-linear dynamics of neural and behavioural processes by emulating an underlying system of differential equations. RNNs have been routinely trained on similar behavioural tasks to those used for animal subjects to generate hypotheses about the underlying computational mechanisms. By contrast, RNNs can also be trained on the measured physiological and behavioural data, thereby directly inheriting their temporal and geometrical properties. In this way they become a formal surrogate for the experimentally probed system that can be further analysed, perturbed and simulated. This powerful approach is called dynamical system reconstruction. In this Perspective, we focus on recent trends in artificial intelligence and machine learning in this exciting and rapidly expanding field, which may be less well known in neuroscience. We discuss formal prerequisites, different model architectures and training approaches for RNN-based dynamical system reconstructions, ways to evaluate and validate model performance, how to interpret trained models in a neuroscience context, and current challenges.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Egzon Miftari; Daniel Durstewitz; Filip Sadlo
Visualization of Discontinuous Vector Field Topology Journal Article
In: IEEE Transactions on Visualization & Computer Graphics, 2023.
@article{Miftari2023,
title = {Visualization of Discontinuous Vector Field Topology},
author = {Egzon Miftari and Daniel Durstewitz and Filip Sadlo},
url = {https://www.computer.org/csdl/journal/tg/5555/01/10296524/1RwXG8nn7d6},
year = {2023},
date = {2023-01-01},
journal = {IEEE Transactions on Visualization & Computer Graphics},
abstract = {This paper extends the concept and the visualization of vector field topology to vector fields with discontinuities. We address the non-uniqueness of flow in such fields by introduction of a time-reversible concept of equivalence. This concept generalizes streamlines to streamsets and thus vector field topology to discontinuous vector fields in terms of invariant streamsets. We identify respective novel critical structures as well as their manifolds, investigate their interplay with traditional vector field topology, and detail the application and interpretation of our approach using specifically designed synthetic cases and a simulated case from physics.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Janine Thome; Mathieu Pinger; Daniel Durstewitz; Wolfgang H Sommer; Peter Kirsch; Georgia Koppe
Model-based experimental manipulation of probabilistic behavior in interpretable behavioral latent variable models Journal Article
In: Frontiers in Neuroscience, vol. 16, pp. 2270, 2023.
@article{Thome2023,
title = {Model-based experimental manipulation of probabilistic behavior in interpretable behavioral latent variable models},
author = {Janine Thome and Mathieu Pinger and Daniel Durstewitz and Wolfgang H Sommer and Peter Kirsch and Georgia Koppe},
url = {https://www.frontiersin.org/articles/10.3389/fnins.2022.1077735/full},
year = {2023},
date = {2023-01-01},
journal = {Frontiers in Neuroscience},
volume = {16},
pages = {2270},
abstract = {In studying mental processes, we often rely on quantifying not directly observable latent processes. Interpretable latent variable models that probabilistically link observations to the underlying process have increasingly been used to draw inferences from observed behavior. However, these models are far more powerful than that. By formally embedding experimentally manipulable variables within the latent process, they can be used to make precise and falsifiable hypotheses or predictions. In doing so, they pinpoint how experimental conditions must be designed to test these hypotheses and, by that, generate adaptive experiments. By comparing predictions to observed behavior, we may then assess and evaluate the predictive validity of an adaptive experiment and model directly and objectively. These ideas are exemplified here on the experimentally not directly observable process of delay discounting. We propose a generic approach to systematically generate and validate experimental conditions based on the aforementioned models. The conditions are explicitly generated so as to predict 9 graded behavioral discounting probabilities across participants. Meeting this prediction, the framework induces discounting probabilities on 9 levels. In contrast to several alternative models, the applied model exhibits high validity as indicated by a comparably low out-of-sample prediction error. We also report evidence for inter-individual differences with respect to the most suitable models underlying behavior. Finally, we outline how to adapt the proposed method to the investigation of other cognitive processes including reinforcement learning.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ileana L Hanganu-Opatz; Thomas Klausberger; Torfi Sigurdsson; Andreas Nieder; Simon N Jacob; Marlene Bartos; Jonas-Frederic Sauer; Daniel Durstewitz; Christian Leibold; Ilka Diester
Resolving the prefrontal mechanisms of adaptive cognitive behaviors: A cross-species perspective Journal Article
In: Neuron, vol. 111, no. 7, 2023.
@article{Hanganu-Opatz2023,
title = {Resolving the prefrontal mechanisms of adaptive cognitive behaviors: A cross-species perspective},
author = {Ileana L Hanganu-Opatz and Thomas Klausberger and Torfi Sigurdsson and Andreas Nieder and Simon N Jacob and Marlene Bartos and Jonas-Frederic Sauer and Daniel Durstewitz and Christian Leibold and Ilka Diester},
url = {https://neurocluster-db.meduniwien.ac.at/db_files/pub_art_431.pdf},
year = {2023},
date = {2023-01-01},
journal = {Neuron},
volume = {111},
number = {7},
abstract = {The prefrontal cortex (PFC) enables a staggering variety of complex behaviors, such as planning actions, solving problems, and adapting to new situations according to external information and internal states. These higher-order abilities, collectively defined as adaptive cognitive behavior, require cellular ensembles that coordinate the tradeoff between the stability and flexibility of neural representations. While the mechanisms
underlying the function of cellular ensembles are still unclear, recent experimental and theoretical studies suggest that temporal coordination dynamically binds prefrontal neurons into functional ensembles. A so far largely separate stream of research has investigated the prefrontal efferent and afferent connectivity. These two research streams have recently converged on the hypothesis that prefrontal connectivity patterns
influence ensemble formation and the function of neurons within ensembles. Here, we propose a unitary concept that, leveraging a cross-species definition of prefrontal regions, explains how prefrontal ensembles adaptively regulate and efficiently coordinate multiple processes in distinct cognitive behaviors.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
underlying the function of cellular ensembles are still unclear, recent experimental and theoretical studies suggest that temporal coordination dynamically binds prefrontal neurons into functional ensembles. A so far largely separate stream of research has investigated the prefrontal efferent and afferent connectivity. These two research streams have recently converged on the hypothesis that prefrontal connectivity patterns
influence ensemble formation and the function of neurons within ensembles. Here, we propose a unitary concept that, leveraging a cross-species definition of prefrontal regions, explains how prefrontal ensembles adaptively regulate and efficiently coordinate multiple processes in distinct cognitive behaviors.
Aleksander PF Domanski; Michal T Kucewicz; Eleonora Russo; Mark D Tricklebank; Emma SJ Robinson; Daniel Durstewitz; Matt W Jones
Distinct hippocampal-prefrontal neural assemblies coordinate memory encoding, maintenance, and recall Journal Article
In: Current Biology, vol. 33, no. 7, 2023.
@article{Domanski2023,
title = {Distinct hippocampal-prefrontal neural assemblies coordinate memory encoding, maintenance, and recall},
author = {Aleksander PF Domanski and Michal T Kucewicz and Eleonora Russo and Mark D Tricklebank and Emma SJ Robinson and Daniel Durstewitz and Matt W Jones},
url = {https://www.cell.com/current-biology/pdf/S0960-9822(23)00169-0.pdf},
year = {2023},
date = {2023-01-01},
journal = {Current Biology},
volume = {33},
number = {7},
abstract = {Short-term memory enables incorporation of recent experience into subsequent decision-making. This pro cessing recruits both the prefrontal cortex and hippocampus, where neurons encode task cues, rules, and outcomes. However, precisely which information is carried when, and by which neurons, remains unclear. Using population decoding of activity in rat medial prefrontal cortex (mPFC) and dorsal hippocampal CA1,
we confirm that mPFC populations lead in maintaining sample information across delays of an operant non-match to sample task, despite individual neurons firing only transiently. During sample encoding, distinct mPFC subpopulations joined distributed CA1-mPFC cell assemblies hallmarked by 4–5 Hz rhythmic modulation; CA1-mPFC assemblies re-emerged during choice episodes but were not 4–5 Hz modulated.
Delay-dependent errors arose when attenuated rhythmic assembly activity heralded collapse of sustained mPFC encoding. Our results map component processes of memory-guided decisions onto heterogeneous CA1-mPFC subpopulations and the dynamics of physiologically distinct, distributed cell assemblies.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
we confirm that mPFC populations lead in maintaining sample information across delays of an operant non-match to sample task, despite individual neurons firing only transiently. During sample encoding, distinct mPFC subpopulations joined distributed CA1-mPFC cell assemblies hallmarked by 4–5 Hz rhythmic modulation; CA1-mPFC assemblies re-emerged during choice episodes but were not 4–5 Hz modulated.
Delay-dependent errors arose when attenuated rhythmic assembly activity heralded collapse of sustained mPFC encoding. Our results map component processes of memory-guided decisions onto heterogeneous CA1-mPFC subpopulations and the dynamics of physiologically distinct, distributed cell assemblies.
2022
Manuel Brenner; Florian Hess; Jonas Mikhaeil; Leonard Bereska; Zahra Monfared; Po-Chen Kuo; Daniel Durstewitz
Tractable Dendritic RNNs for Reconstructing Nonlinear Dynamical Systems Proceedings Article
In: Proceedings of the 39th International Conference on Machine Learning, 2022.
@inproceedings{Brenner2022,
title = {Tractable Dendritic RNNs for Reconstructing Nonlinear Dynamical Systems},
author = {Manuel Brenner and Florian Hess and Jonas Mikhaeil and Leonard Bereska and Zahra Monfared and Po-Chen Kuo and Daniel Durstewitz},
url = {https://proceedings.mlr.press/v162/brenner22a.html},
year = {2022},
date = {2022-03-01},
booktitle = {Proceedings of the 39th International Conference on Machine Learning},
journal = {Proceedings of Machine Learning Research, ICML 2022},
abstract = {In many scientific disciplines, we are interested in inferring the nonlinear dynamical system underlying a set of observed time series, a challenging task in the face of chaotic behavior and noise. Previous deep learning approaches toward this goal often suffered from a lack of interpretability and tractability. In particular, the high-dimensional latent spaces often required for a faithful embedding, even when the underlying dynamics lives on a lower-dimensional manifold, can hamper theoretical analysis. Motivated by the emerging principles of dendritic computation, we augment a dynamically interpretable and mathematically tractable piecewise-linear (PL) recurrent neural network (RNN) by a linear spline basis expansion. We show that this approach retains all the theoretically appealing properties of the simple PLRNN, yet boosts its capacity for approximating arbitrary nonlinear dynamical systems in comparatively low dimensions. We employ two frameworks for training the system, one combining BPTT with teacher forcing, and another based on fast and scalable variational inference. We show that the dendritically expanded PLRNN achieves better reconstructions with fewer parameters and dimensions on various dynamical systems benchmarks and compares favorably to other methods, while retaining a tractable and interpretable structure.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Daniel Kramer; Philine Lou Bommer; Carlo Tombolini; Georgia Koppe; Daniel Durstewitz
Reconstructing Nonlinear Dynamical Systems from Multi-Modal Time Series Proceedings Article
In: Proceedings of the 39th International Conference on Machine Learning, 2022.
@inproceedings{Kramer2022,
title = {Reconstructing Nonlinear Dynamical Systems from Multi-Modal Time Series},
author = {Daniel Kramer and Philine Lou Bommer and Carlo Tombolini and Georgia Koppe and Daniel Durstewitz},
url = {https://proceedings.mlr.press/v162/kramer22a.html},
year = {2022},
date = {2022-03-01},
booktitle = {Proceedings of the 39th International Conference on Machine Learning},
journal = {Proceedings of Machine Learning Research},
volume = {162},
abstract = {Empirically observed time series in physics, biology, or medicine, are commonly generated by some underlying dynamical system (DS) which is the target of scientific interest. There is an increasing interest to harvest machine learning methods to reconstruct this latent DS in a completely data-driven, unsupervised way. In many areas of science it is common to sample time series observations from many data modalities simultaneously, e.g. electrophysiological and behavioral time series in a typical neuroscience experiment. However, current machine learning tools for reconstructing DSs usually focus on just one data modality. Here we propose a general framework for multi-modal data integration for the purpose of nonlinear DS identification and cross-modal prediction. This framework is based on dynamically interpretable recurrent neural networks as general approximators of nonlinear DSs, coupled to sets of modality-specific decoder models from the class of generalized linear models. Both an expectation-maximization and a variational inference algorithm for model training are advanced and compared. We show on nonlinear DS benchmarks that our algorithms can efficiently compensate for too noisy or missing information in one data channel by exploiting other channels, and demonstrate on experimental neuroscience data how the algorithm learns to link different data domains to the underlying dynamics},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jonas M. Mikhaeil; Zahra Monfared; Daniel Durstewitz
On the difficulty of learning chaotic dynamics with RNNs Proceedings Article
In: Advances in Neural Information Processing Systems, 2022.
@inproceedings{Monfared2021b,
title = {On the difficulty of learning chaotic dynamics with RNNs},
author = {Jonas M. Mikhaeil and Zahra Monfared and Daniel Durstewitz},
url = {https://proceedings.neurips.cc/paper_files/paper/2022/file/495e55f361708bedbab5d81f92048dcd-Paper-Conference.pdf},
year = {2022},
date = {2022-03-01},
booktitle = {Advances in Neural Information Processing Systems},
journal = {36th Conference on Neural Information Processing Systems (NeurIPS 2022).},
abstract = {Recurrent neural networks (RNNs) are wide-spread machine learning tools for modeling sequential and time series data. They are notoriously hard to train because their loss gradients backpropagated in time tend to saturate or diverge during training. This is known as the exploding and vanishing gradient problem. Previous solutions to this issue either built on rather complicated, purpose-engineered architectures with gated memory buffers, or - more recently - imposed constraints that ensure convergence to a fixed point or restrict (the eigenspectrum of) the recurrence matrix. Such constraints, however, convey severe limitations on the expressivity of the RNN. Essential intrinsic dynamics such as multistability or chaos are disabled. This is inherently at disaccord with the chaotic nature of many, if not most, time series encountered in nature and society. Here we offer a comprehensive theoretical treatment of this problem by relating the loss gradients during RNN training to the Lyapunov spectrum of RNN-generated orbits. We mathematically prove that RNNs producing stable equilibrium or cyclic behavior have bounded gradients, whereas the gradients of RNNs with chaotic dynamics always diverge. Based on these analyses and insights, we offer an effective yet simple training technique for chaotic data and guidance on how to choose relevant hyperparameters according to the Lyapunov spectrum.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Svenja Melbaum; Eleonora Russo; David Eriksson; Artur Schneider; Daniel Durstewitz; Thomas Brox; Ilka Diester
Conserved structures of neural activity in sensorimotor cortex of freely moving rats allow cross-subject decoding Journal Article
In: bioRxiv, 2022.
@article{Melbaum2022,
title = {Conserved structures of neural activity in sensorimotor cortex of freely moving rats allow cross-subject decoding},
author = {Svenja Melbaum and Eleonora Russo and David Eriksson and Artur Schneider and Daniel Durstewitz and Thomas Brox and Ilka Diester},
url = {https://www.biorxiv.org/content/10.1101/2021.03.04.433869v2},
doi = {https://doi.org/10.1101/2021.03.04.433869},
year = {2022},
date = {2022-02-18},
journal = {bioRxiv},
abstract = {Our knowledge about neuronal activity in the sensorimotor cortex relies primarily on stereotyped movements that are strictly controlled in experimental settings. It remains unclear how results can be carried over to less constrained behavior like that of freely moving subjects. Toward this goal, we developed a self-paced behavioral paradigm that encouraged rats to engage in different movement types. We employed bilateral electrophysiological recordings across the entire sensorimotor cortex and simultaneous paw tracking. These techniques revealed behavioral coupling of neurons with lateralization and an anterior–posterior gradient from the premotor to the primary sensory cortex. The structure of population activity patterns was conserved across animals despite the severe under-sampling of the total number of neurons and variations in electrode positions across individuals. We demonstrated cross-subject and cross-session generalization in a decoding task through alignments of low-dimensional neural manifolds, providing evidence of a conserved neuronal code.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zahra Monfared; Mahashweta Patra; Daniel Durstewitz
Robust chaos and multi-stability in piecewise linear recurrent neural networks Journal Article
In: Preprint, 2022.
@article{Monfared2022,
title = {Robust chaos and multi-stability in piecewise linear recurrent neural networks},
author = {Zahra Monfared and Mahashweta Patra and Daniel Durstewitz},
url = {https://www.researchsquare.com/article/rs-2147683/v1},
year = {2022},
date = {2022-02-01},
journal = {Preprint},
abstract = {Recurrent neural networks (RNNs) are major machine learning tools for the processing of sequential data. Piecewise-linear RNNs (PLRNNs) in particular, which are formally piecewise linear (PWL) maps, have become popular recently as data-driven techniques for dynamical systems reconstructions from time-series observations. For a better understanding of the training process, performance, and behavior of trained PLRNNs, more thorough theoretical analysis is highly needed. Especially the presence of chaos strongly affects RNN training and expressivity. Here we show the existence of robust chaos in 2d PLRNNs. To this end, necessary and sufficient conditions for the occurrence of homoclinic intersections are derived by analyzing the interplay between stable and unstable manifolds of 2d PWL maps. Our analysis focuses on general PWL maps, like PLRNNs, since normal form PWL maps lack important characteristics that can occur in PLRNNs. We also explore some bifurcations and multi-stability involving chaos, since the co-existence of chaotic attractors with other attractor objects poses particular challenges for PLRNN training on the one hand, yet may endow trained PLRNNs with important computational properties on the other. Numerical simulations are performed to verify our results and are demonstrated to be in good agreement with the theoretical derivations. We discuss the implications of our results for PLRNN training, performance on machine learning tasks, and scientific applications.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Julia Elina Stocker; Georgia Koppe; Hanna Reich Paredes; Saeideh Heshmati; Stefan G Hofmann; Tim Hahn; Han Maas; Lourens Waldorp; Hamidreza Jamalabadi
Towards a formal model of psychological intervention: Applying a dynamic network and control approach to attitude modification Journal Article
In: PsyArXiv, 2022.
@article{Stocker2022,
title = {Towards a formal model of psychological intervention: Applying a dynamic network and control approach to attitude modification},
author = {Julia Elina Stocker and Georgia Koppe and Hanna Reich Paredes and Saeideh Heshmati and Stefan G Hofmann and Tim Hahn and Han Maas and Lourens Waldorp and Hamidreza Jamalabadi},
year = {2022},
date = {2022-01-01},
journal = {PsyArXiv},
abstract = {Despite the growing deployment of network representation throughout psychological sciences, the question of whether and how networks can systematically describe the effects of psychological interventions remains elusive. Towards this end, we capitalize on recent breakthrough in network control theory, the engineering study of networked interventions, to investigate a representative psychological attitude modification experiment. This study examined 30 healthy participants who answered 11 questions about their attitude toward eating meat. They then received 11 arguments to challenge their attitude on the questions, after which they were asked again the same set of questions. Using this data, we constructed networks that quantify the connections between the responses and tested: 1) if the observed psychological effect, in terms of sensitivity and specificity, relates to the regional network topology as described by control theory, 2) if the size of change in responses relates to whole-network topology that quantifies the “ease” of change as described by control theory, and 3) if responses after intervention could be predicted based on formal results from control theory. We found that 1) the interventions that had higher regional topological relevance (the so-called controllability scores) had stronger effect (r> 0.5), the intervention sensitivities were systematically lower for the interventions that were “easier to control”(r=-0.49), and that the model offered substantial prediction accuracy (r= 0.36).},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Deanna M Barch John P O'Doherty Zeb Kurth-Nelson
Computational Approaches Journal Article
In: Computational Psychiatry: New Perspectives on Mental Illness, 2022.
@article{Kurth-Nelson2022,
title = {Computational Approaches},
author = {Deanna M Barch John P O'Doherty Zeb Kurth-Nelson},
url = {https://books.google.de/books?hl=en&lr=&id=746JEAAAQBAJ&oi=fnd&pg=PA77&dq=info:okpKmHWClm8J:scholar.google.com&ots=oqTdTTaF-h&sig=dPeS3sfDXW64H2ytq_NFvQbYWXI&redir_esc=y#v=onepage&q&f=false},
year = {2022},
date = {2022-01-01},
journal = {Computational Psychiatry: New Perspectives on Mental Illness},
abstract = {Vast spectra of biological and psychological processes are potentially involved in the mechanisms of psychiatric illness. Computational neuroscience brings a diverse toolkit to bear on understanding these processes. This chapter begins by organizing the many ways in which computational neuroscience may provide insight to the mechanisms of psychiatric illness. It then contextualizes the quest for deep mechanistic understanding through the perspective that even partial or nonmechanistic understanding can be applied productively. Finally, it questions the standards by which these approaches should be evaluated. If computational psychiatry hopes to go beyond traditional psychiatry, it cannot be judged solely on the basis of how closely it reproduces the diagnoses and prognoses of traditional psychiatry, but must also be judged against more fundamental measures such as patient outcomes.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Florian Bähner; Tzvetan Popov; Selina Hermann; Nico Boehme; Tom Merten; Hélène Zingone; Georgia Koppe; Andreas Meyer-Lindenberg; Hazem Toutounji; Daniel Durstewitz
Species-conserved mechanisms of cognitive flexibility in complex environments Journal Article
In: bioRxiv, 2022.
@article{Bähner2022,
title = {Species-conserved mechanisms of cognitive flexibility in complex environments},
author = {Florian Bähner and Tzvetan Popov and Selina Hermann and Nico Boehme and Tom Merten and Hélène Zingone and Georgia Koppe and Andreas Meyer-Lindenberg and Hazem Toutounji and Daniel Durstewitz},
year = {2022},
date = {2022-01-01},
journal = {bioRxiv},
abstract = {Flexible decision making in complex environments is a hallmark of intelligent behavior but the underlying learning mechanisms and neural computations remain elusive. Through a combination of behavioral, computational and electrophysiological analysis of a novel multidimensional rule-learning paradigm, we show that both rats and humans sequentially probe different behavioral strategies to infer the task rule, rather than learning all possible mappings between environmental cues and actions as current theoretical formulations suppose. This species-conserved process reduces task dimensionality and explains both observed sudden behavioral transitions and positive transfer effects. Behavioral strategies are represented by rat prefrontal activity and strategy-related variables can be decoded from magnetoencephalography signals in human prefrontal cortex. These mechanistic findings provide a foundation for the translational investigation of impaired cognitive flexibility.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Christian Götzl; Selina Hiller; Christian Rauschenberg; Anita Schick; Janik Fechtelpeter; Unai Fischer Abaigar; Georgia Koppe; Daniel Durstewitz; Ulrich Reininghaus; Silvia Krumm
Artificial intelligence-informed mobile mental health apps for young people: a mixed-methods approach on users’ and stakeholders’ perspectives Journal Article
In: Child and Adolescent Psychiatry and Mental Health, vol. 16, no. 86, 2022.
@article{Götzl2022,
title = {Artificial intelligence-informed mobile mental health apps for young people: a mixed-methods approach on users’ and stakeholders’ perspectives},
author = {Christian Götzl and Selina Hiller and Christian Rauschenberg and Anita Schick and Janik Fechtelpeter and Unai Fischer Abaigar and Georgia Koppe and Daniel Durstewitz and Ulrich Reininghaus and Silvia Krumm},
year = {2022},
date = {2022-01-01},
journal = {Child and Adolescent Psychiatry and Mental Health},
volume = {16},
number = {86},
abstract = {Novel approaches in mobile mental health (mHealth) apps that make use of Artificial Intelligence (AI), Ecological Momentary Assessments, and Ecological Momentary Interventions have the potential to support young people in the achievement of mental health and wellbeing goals. However, little is known on the perspectives of young people and mental health experts on this rapidly advancing technology. This study aims to investigate the subjective needs, attitudes, and preferences of key stakeholders towards an AI–informed mHealth app, including young people and experts on mHealth promotion and prevention in youth.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Mathieu Pinger; Janine Thome; Patrick Halli; Wolfgang H. Sommer; Georgia Koppe; Peter Kirsch
Comparing Discounting of Potentially Real Rewards and Losses by Means of Functional Magnetic Resonance Imaging Journal Article
In: Frontiers in System Neuroscience, 2022.
@article{Pinger2022,
title = {Comparing Discounting of Potentially Real Rewards and Losses by Means of Functional Magnetic Resonance Imaging},
author = {Mathieu Pinger and Janine Thome and Patrick Halli and Wolfgang H. Sommer and Georgia Koppe and Peter Kirsch},
url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC9365957/},
doi = {10.3389/fnsys.2022.867202},
year = {2022},
date = {2022-01-01},
journal = {Frontiers in System Neuroscience},
abstract = {Delay discounting (DD) has often been investigated in the context of decision making whereby individuals attribute decreasing value to rewards in the distant future. Less is known about DD in the context of negative consequences. The aim of this pilot study was to identify commonalities and differences between reward and loss discounting on the behavioral as well as the neural level by means of computational modeling and functional Magnetic Resonance Imaging (fMRI). We furthermore compared the neural activation between anticipation of rewards and losses.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Janine Thome; Mathieu Pinger; Patrick Halli; Daniel Durstewitz; Wolfgang H. Sommer; Peter Kirsch; Georgia Koppe
A Model Guided Approach to Evoke Homogeneous Behavior During Temporal Reward and Loss Discounting Journal Article
In: Frontiers in Psychiatry, 2022.
@article{Thome2022b,
title = {A Model Guided Approach to Evoke Homogeneous Behavior During Temporal Reward and Loss Discounting},
author = {Janine Thome and Mathieu Pinger and Patrick Halli and Daniel Durstewitz and Wolfgang H. Sommer and Peter Kirsch and Georgia Koppe},
url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC9253427/},
doi = {10.3389/fpsyt.2022.846119},
year = {2022},
date = {2022-01-01},
journal = {Frontiers in Psychiatry},
abstract = {The tendency to devaluate future options as a function of time, known as delay discounting, is associated with various factors such as psychiatric illness and personality. Under identical experimental conditions, individuals may therefore strongly differ in the degree to which they discount future options. In delay discounting tasks, this inter-individual variability inevitably results in an unequal number of discounted trials per subject, generating difficulties in linking delay discounting to psychophysiological and neural correlates. Many studies have therefore focused on assessing delay discounting adaptively. Here, we extend these approaches by developing an adaptive paradigm which aims at inducing more comparable and homogeneous discounting frequencies across participants on a dimensional scale.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Janine Thome; Mathieu Pinger; Daniel Durstewitz; Wolfgang Sommer; Peter Kirsch; Georgia Koppe
Model-based experimental manipulation of probabilistic behavior in interpretable behavioral latent variable models Journal Article
In: PsyArXiv Preprints, 2022.
@article{Thome2022,
title = {Model-based experimental manipulation of probabilistic behavior in interpretable behavioral latent variable models},
author = {Janine Thome and Mathieu Pinger and Daniel Durstewitz and Wolfgang Sommer and Peter Kirsch and Georgia Koppe},
url = {https://psyarxiv.com/s7wda/},
doi = {10.31234/osf.io/s7wda},
year = {2022},
date = {2022-01-01},
journal = {PsyArXiv Preprints},
abstract = {In studying mental processes, we often rely on quantifying not directly observable latent constructs. Interpretable latent variable models that probabilistically link observations to the underlying construct have increasingly been used to draw inferences from observed behavior. However, these models are far more powerful than that. By formally embedding experimentally manipulable variables within the latent construct, they can be used to make precise and falsifiable hypotheses or predictions. At the same time, they pinpoint how experimental conditions must be designed to test these hypotheses. By comparing predictions to observed behavior, we may then assess and evaluate the validity of a measurement instrument directly and objectively, without resorting to comparisons with other latent constructs, as traditionally done in psychology.
These ideas are exemplified here on the experimentally not directly observable construct of delay discounting. We propose a generic approach to systematically generate experimental conditions based on the aforementioned models. The conditions are explicitly generated so as to predict 9 graded behavioral discounting probabilities across participants. Meeting this prediction, the framework induces discounting probabilities on 9 levels. In contrast to several alternative models, the applied model exhibits high validity as indicated by a comparably low out-of-sample prediction error. We also report evidence for inter-individual differences w.r.t. the most suitable models underlying behavior.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
These ideas are exemplified here on the experimentally not directly observable construct of delay discounting. We propose a generic approach to systematically generate experimental conditions based on the aforementioned models. The conditions are explicitly generated so as to predict 9 graded behavioral discounting probabilities across participants. Meeting this prediction, the framework induces discounting probabilities on 9 levels. In contrast to several alternative models, the applied model exhibits high validity as indicated by a comparably low out-of-sample prediction error. We also report evidence for inter-individual differences w.r.t. the most suitable models underlying behavior.
2021
Priscilla N. Owusu; Ulrich Reininghaus; Georgia Koppe; Irene Dankwa-Mullan; Till Bärnighausen
Artificial intelligence applications in social media for depression screening: A systematic review protocol for content validity processes Journal Article
In: PLoS ONE, 2021.
@article{Owusu2021,
title = {Artificial intelligence applications in social media for depression screening: A systematic review protocol for content validity processes},
author = {Priscilla N. Owusu and Ulrich Reininghaus and Georgia Koppe and Irene Dankwa-Mullan and Till Bärnighausen},
url = {https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0259499},
doi = {https://doi.org/10.1371/journal.pone.0259499},
year = {2021},
date = {2021-11-08},
journal = {PLoS ONE},
abstract = {The popularization of social media has led to the coalescing of user groups around mental health conditions; in particular, depression. Social media offers a rich environment for contextualizing and predicting users’ self-reported burden of depression. Modern artificial intelligence (AI) methods are commonly employed in analyzing user-generated sentiment on social media. In the forthcoming systematic review, we will examine the content validity of these computer-based health surveillance models with respect to standard diagnostic frameworks. Drawing from a clinical perspective, we will attempt to establish a normative judgment about the strengths of these modern AI applications in the detection of depression.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Janine Thome; Robert Steinbach; Julian Grosskreutz; Daniel Durstewitz; Georgia Koppe
Classification of amyotrophic lateral sclerosis by brain volume, connectivity, and network dynamics Journal Article
In: Human Brain Mapping, 2021.
@article{Thome2021,
title = {Classification of amyotrophic lateral sclerosis by brain volume, connectivity, and network dynamics},
author = {Janine Thome and Robert Steinbach and Julian Grosskreutz and Daniel Durstewitz and Georgia Koppe},
url = {https://doi.org/10.1002/hbm.25679},
year = {2021},
date = {2021-10-16},
journal = {Human Brain Mapping},
abstract = {Emerging studies corroborate the importance of neuroimaging biomarkers and machine learning to improve diagnostic classification of amyotrophic lateral sclerosis (ALS). While most studies focus on structural data, recent studies assessing functional connectivity between brain regions by linear methods highlight the role of brain function. These studies have yet to be combined with brain structure and nonlinear functional features. We investigate the role of linear and nonlinear functional brain features, and the benefit of combining brain structure and function for ALS classification. ALS patients (N = 97) and healthy controls (N = 59) underwent structural and functional resting state magnetic resonance imaging. Based on key hubs of resting state networks, we defined three feature sets comprising brain volume, resting state functional connectivity (rsFC), as well as (nonlinear) resting state dynamics assessed via recurrent neural networks. Unimodal and multimodal random forest classifiers were built to classify ALS. Out-of-sample prediction errors were assessed via five-fold cross-validation. Unimodal classifiers achieved a classification accuracy of 56.35–61.66%. Multimodal classifiers outperformed unimodal classifiers achieving accuracies of 62.85–66.82%. Evaluating the ranking of individual features' importance scores across all classifiers revealed that rsFC features were most dominant in classification. While univariate analyses revealed reduced rsFC in ALS patients, functional features more generally indicated deficits in information integration across resting state brain networks in ALS. The present work undermines that combining brain structure and function provides an additional benefit to diagnostic classification, as indicated by multimodal classifiers, while emphasizing the importance of capturing both linear and nonlinear functional brain properties to identify discriminative biomarkers of ALS.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Giulio Pergola Anais Harneit Urs Braun
Brain network dynamics during working memory are modulated by dopamine and diminished in schizophrenia Journal Article
In: Nature Communications, 2021.
@article{Braun2021,
title = {Brain network dynamics during working memory are modulated by dopamine and diminished in schizophrenia},
author = {Giulio Pergola Anais Harneit Urs Braun},
url = {https://www.nature.com/articles/s41467-021-23694-9},
doi = {10.1038/s41467-021-23694-9},
year = {2021},
date = {2021-06-09},
journal = {Nature Communications},
abstract = {Dynamical brain state transitions are critical for flexible working memory but the network mechanisms are incompletely understood. Here, we show that working memory performance entails brain-wide switching between activity states using a combination of functional magnetic resonance imaging in healthy controls and individuals with schizophrenia, pharmacological fMRI, genetic analyses and network control theory. The stability of states relates to dopamine D1 receptor gene expression while state transitions are influenced by D2 receptor expression and pharmacological modulation. Individuals with schizophrenia show altered network control properties, including a more diverse energy landscape and decreased stability of working memory representations. Our results demonstrate the relevance of dopamine signaling for the steering of whole-brain network dynamics during working memory and link these processes to schizophrenia pathophysiology.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Eleonora Russo; Tianyang Ma; Rainer Spanagel; Daniel Durstewitz; Hazem Toutounji; Georg Köhr
Coordinated prefrontal state transition leads extinction of reward-seeking behaviors Journal Article
In: Journal of Neuroscience, vol. 41, no. 11, 2021.
@article{Russo2021,
title = {Coordinated prefrontal state transition leads extinction of reward-seeking behaviors},
author = {Eleonora Russo and Tianyang Ma and Rainer Spanagel and Daniel Durstewitz and Hazem Toutounji and Georg Köhr},
url = {https://www.jneurosci.org/content/jneuro/41/11/2406.full.pdf},
year = {2021},
date = {2021-02-02},
journal = {Journal of Neuroscience},
volume = {41},
number = {11},
abstract = {Extinction learning suppresses conditioned reward responses and is thus fundamental to adapt to changing environmental
demands and to control excessive reward seeking. The medial prefrontal cortex (mPFC) monitors and controls conditioned
reward responses. Abrupt transitions in mPFC activity anticipate changes in conditioned responses to altered contingencies.
It remains, however, unknown whether such transitions are driven by the extinction of old behavioral strategies or by the ac-
quisition of new competing ones. Using in vivo multiple single-unit recordings of mPFC in male rats, we studied the relation-
ship between single-unit and population dynamics during extinction learning, using alcohol as a positive reinforcer in an
operant conditioning paradigm. To examine the fine temporal relation between neural activity and behavior, we developed a
novel behavioral model that allowed us to identify the number, onset, and duration of extinction-learning episodes in the
behavior of each animal. We found that single-unit responses to conditioned stimuli changed even under stable experimental
conditions and behavior. However, when behavioral responses to task contingencies had to be updated, unit-specific modula-
tions became coordinated across the whole population, pushing the network into a new stable attractor state. Thus, extinction
learning is not associated with suppressed mPFC responses to conditioned stimuli, but is anticipated by single-unit coordina-
tion into population-wide transitions of the internal state of the animal.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
demands and to control excessive reward seeking. The medial prefrontal cortex (mPFC) monitors and controls conditioned
reward responses. Abrupt transitions in mPFC activity anticipate changes in conditioned responses to altered contingencies.
It remains, however, unknown whether such transitions are driven by the extinction of old behavioral strategies or by the ac-
quisition of new competing ones. Using in vivo multiple single-unit recordings of mPFC in male rats, we studied the relation-
ship between single-unit and population dynamics during extinction learning, using alcohol as a positive reinforcer in an
operant conditioning paradigm. To examine the fine temporal relation between neural activity and behavior, we developed a
novel behavioral model that allowed us to identify the number, onset, and duration of extinction-learning episodes in the
behavior of each animal. We found that single-unit responses to conditioned stimuli changed even under stable experimental
conditions and behavior. However, when behavioral responses to task contingencies had to be updated, unit-specific modula-
tions became coordinated across the whole population, pushing the network into a new stable attractor state. Thus, extinction
learning is not associated with suppressed mPFC responses to conditioned stimuli, but is anticipated by single-unit coordina-
tion into population-wide transitions of the internal state of the animal.
2020
Daniel Durstewitz Andreas Meyer-Lindenberg Georgia Koppe
Deep learning for small and big data in psychiatry Journal Article
In: Neuropsychopharmacology, 2020.
@article{Koppe2020b,
title = {Deep learning for small and big data in psychiatry},
author = {Daniel Durstewitz Andreas Meyer-Lindenberg Georgia Koppe},
url = {https://www.nature.com/articles/s41386-020-0767-z},
doi = {10.1038/s41386-020-0767-z},
year = {2020},
date = {2020-07-15},
journal = {Neuropsychopharmacology},
abstract = {Psychiatry today must gain a better understanding of the common and distinct pathophysiological mechanisms underlying psychiatric disorders in order to deliver more effective, person-tailored treatments. To this end, it appears that the analysis of ‘small’ experimental samples using conventional statistical approaches has largely failed to capture the heterogeneity underlying psychiatric phenotypes. Modern algorithms and approaches from machine learning, particularly deep learning, provide new hope to address these issues given their outstanding prediction performance in other disciplines. The strength of deep learning algorithms is that they can implement very complicated, and in principle arbitrary predictor-response mappings efficiently. This power comes at a cost, the need for large training (and test) samples to infer the (sometimes over millions of) model parameters. This appears to be at odds with the as yet rather ‘small’ samples available in psychiatric human research to date (n < 10,000), and the ambition of predicting treatment at the single subject level (n = 1). Here, we aim at giving a comprehensive overview on how we can yet use such models for prediction in psychiatry. We review how machine learning approaches compare to more traditional statistical hypothesis-driven approaches, how their complexity relates to the need of large sample sizes, and what we can do to optimally use these powerful techniques in psychiatric neuroscience.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Carla Filosa Max Scheller Lars-Lennart Oettl
Phasic dopamine reinforces distinct striatal stimulus encoding in the olfactory tubercle driving dopaminergic reward prediction Journal Article
In: Nature Communications, 2020.
@article{Oettl2020b,
title = {Phasic dopamine reinforces distinct striatal stimulus encoding in the olfactory tubercle driving dopaminergic reward prediction},
author = {Carla Filosa Max Scheller Lars-Lennart Oettl},
url = {https://www.nature.com/articles/s41467-020-17257-7#citeas},
doi = {https://doi.org/10.1038/s41467-020-17257-7},
year = {2020},
date = {2020-07-10},
journal = {Nature Communications},
abstract = {The learning of stimulus-outcome associations allows for predictions about the environment. Ventral striatum and dopaminergic midbrain neurons form a larger network for generating reward prediction signals from sensory cues. Yet, the network plasticity mechanisms to generate predictive signals in these distributed circuits have not been entirely clarified. Also, direct evidence of the underlying interregional assembly formation and information transfer is still missing. Here we show that phasic dopamine is sufficient to reinforce the distinctness of stimulus representations in the ventral striatum even in the absence of reward. Upon such reinforcement, striatal stimulus encoding gives rise to interregional assemblies that drive dopaminergic neurons during stimulus-outcome learning. These assemblies dynamically encode the predicted reward value of conditioned stimuli. Together, our data reveal that ventral striatal and midbrain reward networks form a reinforcing loop to generate reward prediction coding.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Carla Filosa Max Scheller Lars-Lennart Oettl
Phasic dopamine reinforces distinct striatal stimulus encoding in the olfactory tubercle driving dopaminergic reward prediction Journal Article
In: Nature Communications, 2020.
@article{Oettl2020,
title = {Phasic dopamine reinforces distinct striatal stimulus encoding in the olfactory tubercle driving dopaminergic reward prediction},
author = {Carla Filosa Max Scheller Lars-Lennart Oettl},
url = {https://www.nature.com/articles/s41467-020-17257-7#disqus_thread},
doi = {https://doi.org/10.1038/s41467-020-17257-7},
year = {2020},
date = {2020-07-10},
journal = {Nature Communications},
abstract = {The learning of stimulus-outcome associations allows for predictions about the environment. Ventral striatum and dopaminergic midbrain neurons form a larger network for generating reward prediction signals from sensory cues. Yet, the network plasticity mechanisms to generate predictive signals in these distributed circuits have not been entirely clarified. Also, direct evidence of the underlying interregional assembly formation and information transfer is still missing. Here we show that phasic dopamine is sufficient to reinforce the distinctness of stimulus representations in the ventral striatum even in the absence of reward. Upon such reinforcement, striatal stimulus encoding gives rise to interregional assemblies that drive dopaminergic neurons during stimulus-outcome learning. These assemblies dynamically encode the predicted reward value of conditioned stimuli. Together, our data reveal that ventral striatal and midbrain reward networks form a reinforcing loop to generate reward prediction coding.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Daniel Durstewitz Zahra Monfared
Transformation of ReLU-based recurrent neural networks from discrete-time to continuous-time Journal Article
In: Proceedings of the International Conference on Machine Learning, 2020.
@article{Monfared2020b,
title = {Transformation of ReLU-based recurrent neural networks from discrete-time to continuous-time},
author = {Daniel Durstewitz Zahra Monfared},
url = {https://arxiv.org/abs/2007.00321},
year = {2020},
date = {2020-07-01},
journal = {Proceedings of the International Conference on Machine Learning},
abstract = {Recurrent neural networks (RNN) as used in machine learning are commonly formulated in discrete time, i.e. as recursive maps. This brings a lot of advantages for training models on data, e.g. for the purpose of time series prediction or dynamical systems identification, as powerful and efficient inference algorithms exist for discrete time systems and numerical integration of differential equations is not necessary. On the other hand, mathematical analysis of dynamical systems inferred from data is often more convenient and enables additional insights if these are formulated in continuous time, i.e. as systems of ordinary (or partial) differential equations (ODE). Here we show how to perform such a translation from discrete to continuous time for a particular class of ReLU-based RNN. We prove three theorems on the mathematical equivalence between the discrete and continuous time formulations under a variety of conditions, and illustrate how to use our mathematical results on different machine learning and nonlinear dynamical systems examples.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Daniel Durstewitz Zahra Monfared
Existence of n-cycles and border-collision bifurcations in piecewise-linear continuous maps with applications to recurrent neural networks Journal Article
In: Nonlinear Dynamics, 2020.
@article{Monfared2020,
title = {Existence of n-cycles and border-collision bifurcations in piecewise-linear continuous maps with applications to recurrent neural networks},
author = {Daniel Durstewitz Zahra Monfared},
url = {https://arxiv.org/abs/1911.04304},
doi = {10.1007/s11071-020-05777-2},
year = {2020},
date = {2020-07-01},
journal = {Nonlinear Dynamics},
abstract = {Piecewise linear recurrent neural networks (PLRNNs) form the basis of many successful machine learning applications for time series prediction and dynamical systems identification, but rigorous mathematical analysis of their dynamics and properties is lagging behind. Here we contribute to this topic by investigating the existence of n-cycles (n≥3) and border-collision bifurcations in a class of n-dimensional piecewise linear continuous maps which have the general form of a PLRNN. This is particularly important as for one-dimensional maps the existence of 3-cycles implies chaos. It is shown that these n-cycles collide with the switching boundary in a border-collision bifurcation, and parametric regions for the existence of both stable and unstable n-cycles and border-collision bifurcations will be derived theoretically. We then discuss how our results can be extended and applied to PLRNNs. Finally, numerical simulations demonstrate the implementation of our results and are found to be in good agreement with the theoretical derivations. Our findings thus provide a basis for understanding periodic behavior in PLRNNs, how it emerges in bifurcations, and how it may lead into chaos.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Julia Linke; Georgia Koppe; Vanessa Scholz; Philipp Kanske; Daniel Durstewitz; Michèle Wessa
Aberrant probabilistic reinforcement learning in first-degree relatives of individuals with bipolar disorder Journal Article
In: Journal of Affective Disorders, 2020.
@article{Linke2020,
title = {Aberrant probabilistic reinforcement learning in first-degree relatives of individuals with bipolar disorder},
author = {Julia Linke and Georgia Koppe and Vanessa Scholz and Philipp Kanske and Daniel Durstewitz and Michèle Wessa},
url = {https://doi.org/10.1016/j.jad.2019.11.063},
doi = {10.1016/j.jad.2019.11.063},
year = {2020},
date = {2020-03-01},
journal = {Journal of Affective Disorders},
abstract = {Motivational dysregulation represents a core vulnerability factor for bipolar disorder. Whether this also comprises aberrant learning of stimulus-reinforcer contingencies is less clear.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rainer Spanagel Tianyang Ma Eleonora Russo
Coordinated prefrontal state transition leads extinction of reward-seeking behaviors Journal Article
In: biorxiv, 2020.
@article{Russo2020,
title = {Coordinated prefrontal state transition leads extinction of reward-seeking behaviors},
author = {Rainer Spanagel Tianyang Ma Eleonora Russo},
url = {https://www.biorxiv.org/content/10.1101/2020.02.26.964510v1.full},
doi = {https://doi.org/10.1101/2020.02.26.964510},
year = {2020},
date = {2020-02-27},
journal = {biorxiv},
abstract = {Extinction learning suppresses conditioned reward responses and is thus fundamental to adapt to changing environmental demands and to control excessive reward seeking. The medial prefrontal cortex (mPFC) monitors and controls conditioned reward responses. Using in vivo multiple single-unit recordings of mPFC we studied the relationship between single-unit and population dynamics during different phases of an operant conditioning task. To examine the fine temporal relation between neural activity and behavior, we developed a model-based statistical analysis that captured behavioral idiosyncrasies. We found that single-unit responses to conditioned stimuli changed throughout the course of a session even under stable experimental conditions and consistent behavior. However, when behavioral responses to task contingencies had to be updated during the extinction phase, unit-specific modulations became coordinated across the whole population, pushing the network into a new stable attractor state. These results show that extinction learning is not associated with suppressed mPFC responses to conditioned stimuli, but is driven by single-unit coordination into population-wide transitions of the animal’s internal state.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
