@Article{ NonnenmacherBBBM2017,
title = {Signatures of criticality arise from random subsampling in simple population models},
journal = {PLoS Computational Biology},
year = {2017},
month = {10},
volume = {13},
number = {10},
pages = {1-23},
abstract = {The rise of large-scale recordings of neuronal activity has fueled the hope to gain new insights into the collective activity of neural ensembles. How can one link the statistics of neural population activity to underlying principles and theories? One attempt to interpret such data builds upon analogies to the behaviour of collective systems in statistical physics. Divergence of the specific heat—a measure of population statistics derived from thermodynamics—has been used to suggest that neural populations are optimized to operate at a “critical point”. However, these findings have been challenged by theoretical studies which have shown that common inputs can lead to diverging specific heat. Here, we connect “signatures of criticality”, and in particular the divergence of specific heat, back to statistics of neural population activity commonly studied in neural coding: firing rates and pairwise correlations. We show that the specific heat diverges whenever the average correlation strength does not depend on population size. This is necessarily true when data with correlations is randomly subsampled during the analysis process, irrespective of the detailed structure or origin of correlations. We also show how the characteristic shape of specific heat capacity curves depends on firing rates and correlations, using both analytically tractable models and numerical simulations of a canonical feed-forward population model. To analyze these simulations, we develop efficient methods for characterizing large-scale neural population activity with maximum entropy models. We find that, consistent with experimental findings, increases in firing rates and correlation directly lead to more pronounced signatures. Thus, previous reports of thermodynamical criticality in neural populations based on the analysis of specific heat can be explained by average firing rates and correlations, and are not indicative of an optimized coding strategy. We conclude that a reliable interpretation of statistical tests for theories of neural coding is possible only in reference to relevant ground-truth models.},
web_url = {http://journals.plos.org/ploscompbiol/article/file?id=10.1371/journal.pcbi.1005718&type=printable},
state = {published},
DOI = {10.1371/journal.pcbi.1005718},
EPUB = {e1005718},
author = {Nonnenmacher M{mnonnenmacher}; Behrens C; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}; Macke JH{jakob}}
}
@Article{ SchuttHMW2016,
title = {Painfree and accurate Bayesian estimation of psychometric functions for (potentially) overdispersed data},
journal = {Vision Research},
year = {2016},
month = {5},
volume = {122},
pages = {105–123},
abstract = {The psychometric function describes how an experimental variable, such as stimulus strength, influences the behaviour of an observer. Estimation of psychometric functions from experimental data plays a central role in fields such as psychophysics, experimental psychology and in the behavioural neurosciences. Experimental data may exhibit substantial overdispersion, which may result from non-stationarity in the behaviour of observers. Here we extend the standard binomial model which is typically used for psychometric function estimation to a beta-binomial model. We show that the use of the beta-binomial model makes it possible to determine accurate credible intervals even in data which exhibit substantial overdispersion. This goes beyond classical measures for overdispersion-goodness-of-fit-which can detect overdispersion but provide no method to do correct inference for overdispersed data. We use Bayesian inference methods for estimating the posterior distribution of the parameters of the psychometric function. Unlike previous Bayesian psychometric inference methods our software implementation-psignifit 4-performs numerical integration of the posterior within automatically determined bounds. This avoids the use of Markov chain Monte Carlo (MCMC) methods typically requiring expert knowledge. Extensive numerical tests show the validity of the approach and we discuss implications of overdispersion for experimental design. A comprehensive MATLAB toolbox implementing the method is freely available; a python implementation providing the basic capabilities is also available.},
web_url = {http://www.sciencedirect.com/science/article/pii/S0042698916000390},
state = {published},
DOI = {10.1016/j.visres.2016.02.002},
author = {Sch\"utt HH; Harmeling S{harmeling}; Macke JH{jakob}; Wichmann FA{felix}}
}
@Article{ PanzeriMGk2015,
title = {Neural population coding: combining insights from microscopic and mass signals},
journal = {Trends in Cognitive Sciences},
year = {2015},
month = {3},
volume = {19},
number = {3},
pages = {162–172},
abstract = {Behavior relies on the distributed and coordinated activity of neural populations. Population activity can be measured using multi-neuron recordings and neuroimaging. Neural recordings reveal how the heterogeneity, sparseness, timing, and correlation of population activity shape information processing in local networks, whereas neuroimaging shows how long-range coupling and brain states impact on local activity and perception. To obtain an integrated perspective on neural information processing we need to combine knowledge from both levels of investigation. We review recent progress of how neural recordings, neuroimaging, and computational approaches begin to elucidate how interactions between local neural population activity and large-scale dynamics shape the structure and coding capacity of local information representations, make them state-dependent, and control distributed populations that collectively shape behavior.},
web_url = {http://www.sciencedirect.com/science/article/pii/S1364661315000030},
state = {published},
DOI = {10.1016/j.tics.2015.01.002},
author = {Panzeri S{stefano}; Macke JH{jakob}; Gross J; Kayser C{kayser}{Department Physiology of Cognitive Processes}{Research Group Physiology of Sensory Integration}}
}
@Article{ KuffnerZNHSWLFMHCSEGHvMMSTVSL2014,
title = {Crowdsourced analysis of clinical trial data to predict amyotrophic lateral sclerosis progression},
journal = {Nature Biotechnology},
year = {2015},
month = {1},
volume = {33},
number = {1},
pages = {51-57},
abstract = {Amyotrophic lateral sclerosis (ALS) is a fatal neurodegenerative disease with substantial heterogeneity in its clinical presentation. This makes diagnosis and effective treatment difficult, so better tools for estimating disease progression are needed. Here, we report results from the DREAM-Phil Bowen ALS Prediction Prize4Life challenge. In this crowdsourcing competition, competitors developed algorithms for the prediction of disease progression of 1,822 ALS patients from standardized, anonymized phase 2/3 clinical trials. The two best algorithms outperformed a method designed by the challenge organizers as well as predictions by ALS clinicians. We estimate that using both winning algorithms in future trial designs could reduce the required number of patients by at least 20%. The DREAM-Phil Bowen ALS Prediction Prize4Life challenge also identified several potential nonstandard predictors of disease progression including uric acid, creatinine and surprisingly, blood pressure, shedding light on ALS pathobiology. This analysis reveals the potential of a crowdsourcing competition that uses clinical trial data for accelerating ALS research and development.},
web_url = {http://www.nature.com/nbt/journal/v33/n1/pdf/nbt.3051.pdf},
state = {published},
DOI = {10.1038/nbt.3051},
author = {K\"uffner R; Zach N; Norel R; Hawe J; Schoenfeld D; Wang L; Li G; Fang L; Mackey L; Hardiman O; Cudkowicz M; Sherman A; Ertaylan G; Grosse-Wentrup M{moritzgw}{Department Empirical Inference}; Hothorn T; van Ligtenberg J; Macke JH{jakob}; Meyer T; Sch\"olkopf B{bs}{Department Empirical Inference}; Tran L; Vaughan R; Stolovitzky G; Leitner ML}
}
@Article{ FrundWM2014,
title = {Quantifying the effect of intertrial dependence on perceptual decisions},
journal = {Journal of Vision},
year = {2014},
month = {6},
volume = {14},
number = {7:9},
pages = {1-16},
abstract = {In the perceptual sciences, experimenters study the causal mechanisms of perceptual systems by probing observers with carefully constructed stimuli. It has long been known, however, that perceptual decisions are not only determined by the stimulus, but also by internal factors. Internal factors could lead to a statistical influence of previous stimuli and responses on the current trial, resulting in serial dependencies, which complicate the causal inference between stimulus and response. However, the majority of studies do not take serial dependencies into account, and it has been unclear how strongly they influence perceptual decisions. We hypothesize that one reason for this neglect is that there has been no reliable tool to quantify them and to correct for their effects. Here we develop a statistical method to detect, estimate, and correct for serial dependencies in behavioral data. We show that even trained psychophysical observers suffer from strong history dependence. A substantial fraction of the decision variance on difficult stimuli was independent of the stimulus but dependent on experimental history. We discuss the strong dependence of perceptual decisions on internal factors and its implications for correct data interpretation.},
web_url = {http://www.journalofvision.org/content/14/7/9.full.pdf+html},
state = {published},
DOI = {10.1167/14.7.9},
author = {Fr\"und I; Wichmann FA{felix}; Macke J{jakob}}
}
@Article{ WatanabeBMML2013,
title = {Temporal Jitter of the BOLD Signal Reveals a Reliable Initial Dip and Improved Spatial Resolution},
journal = {Current Biology},
year = {2013},
month = {11},
volume = {23},
number = {21},
pages = {2146–2150},
abstract = {fMRI, one of the most important noninvasive brain imaging methods, relies on the blood oxygen level-dependent (BOLD) signal, whose precise underpinnings are still not fully understood [1]. It is a widespread assumption that the components of the hemodynamic response function (HRF) are fixed relative to each other in time, leading most studies as well as analysis tools to focus on trial-averaged responses, thus using or estimating a condition- or location-specific “canonical HRF” [2, 3 and 4]. In the current study, we examined the nature of the variability of the BOLD response and asked in particular whether the positive BOLD peak is subject to trial-to-trial temporal jitter. Our results show that the positive peak of the stimulus-evoked BOLD signal exhibits a trial-to-trial temporal jitter on the order of seconds. Moreover, the trial-to-trial variability can be exploited to uncover the initial dip in the majority of voxels by pooling trial responses with large peak latencies. Initial dips exposed by this procedure possess higher spatial resolution compared to the positive BOLD signal in the human visual cortex. These findings allow for the reliable observation of fMRI signals that are physiologically closer to neural activity, leading to improvements in both temporal and spatial resolution.},
web_url = {http://www.sciencedirect.com/science/article/pii/S0960982213011160},
state = {published},
DOI = {10.1016/j.cub.2013.08.057},
author = {Watanabe M{watanabe}{Department Physiology of Cognitive Processes}; Bartels A{abartels}{Department Physiology of Cognitive Processes}; Macke JH{jakob}; Murayama Y{yusuke}{Department Physiology of Cognitive Processes}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}}
}
@Article{ MackeML2013_2,
title = {Estimation bias in maximum entropy models},
journal = {Entropy},
year = {2013},
month = {8},
volume = {15},
number = {8},
pages = {3109-3219},
abstract = {Maximum entropy models have become popular statistical models in neuroscience and other areas in biology and can be useful tools for obtaining estimates of mutual information in biological systems. However, maximum entropy models fit to small data sets can be subject to sampling bias; i.e., the true entropy of the data can be severely underestimated. Here, we study the sampling properties of estimates of the entropy obtained from maximum entropy models. We focus on pairwise binary models, which are used extensively to model neural population activity. We show that if the data is well described by a pairwise model, the bias is equal to the number of parameters divided by twice the number of observations. If, however, the higher order correlations in the data deviate from those predicted by the model, the bias can be larger. Using a phenomenological model of neural population recordings, we find that this additional bias is highest for small firing probabilities, strong correlations and large population sizes—for the parameters we tested, a factor of about four higher. We derive guidelines for how long a neurophysiological experiment needs to be in order to ensure that the bias is less than a specified criterion. Finally, we show how a modified plug-in estimate of the entropy can be used for bias correction.},
web_url = {http://www.mdpi.com/1099-4300/15/8/3109/pdf},
state = {published},
DOI = {10.3390/e15083109},
author = {Macke JH{jakob}; Murray I{iain}; Latham PE}
}
@Article{ HaefnerGMB2013,
title = {Inferring decoding strategies from choice probabilities in the presence of correlated variability},
journal = {Nature Neuroscience},
year = {2013},
month = {2},
volume = {16},
number = {2},
pages = {235–242},
abstract = {The activity of cortical neurons in sensory areas covaries with perceptual decisions, a relationship that is often quantified by choice probabilities. Although choice probabilities have been measured extensively, their interpretation has remained fraught with difficulty. We derive the mathematical relationship between choice probabilities, read-out weights and correlated variability in the standard neural decision-making model. Our solution allowed us to prove and generalize earlier observations on the basis of numerical simulations and to derive new predictions. Notably, our results indicate how the read-out weight profile, or decoding strategy, can be inferred from experimentally measurable quantities. Furthermore, we developed a test to decide whether the decoding weights of individual neurons are optimal for the task, even without knowing the underlying correlations. We confirmed the practicality of our approach using simulated data from a realistic population model. Thus, our findings provide a theoretical foundation for a growing body of experimental results on choice probabilities and correlations.},
web_url = {http://www.nature.com/neuro/journal/v16/n2/pdf/nn.3309.pdf},
state = {published},
DOI = {10.1038/nn.3309},
author = {Haefner RM{rhaefner}{Research Group Computational Vision and Neuroscience}; Gerwinn S{sgerwinn}{Department Empirical Inference}{Research Group Computational Vision and Neuroscience}; Macke JH{jakob}; Bethge M{mbethge}}
}
@Article{ SchwartzMATB2012,
title = {Low Error Discrimination using a Correlated Population Code},
journal = {Journal of Neurophysiology},
year = {2012},
month = {8},
volume = {108},
number = {4},
pages = {1069-1088},
abstract = {We explored the manner in which spatial information is encoded by retinal ganglion cell populations. We flashed a set of 36 shape stimuli onto the tiger salamander retina and used different decoding algorithms to read out information from a population of 162 ganglion cells. We compared the discrimination performance of linear decoders, which ignore correlation induced by common stimulation, against nonlinear decoders, which can accurately model these correlations. Similar to previous studies, decoders that ignored correlation suffered only a modest drop in discrimination performance for groups of up to ∼30 cells. However, for more realistic groups of 100+ cells, we found order-of-magnitude differences in the error rate. We also compared decoders that used only the presence of a single spike from each cell against more complex decoders that included information from multiple spike counts and multiple time bins. More complex decoders substantially outperformed simpler decoders, showing the importance of spike timing information. Particularly effective was the first spike latency representation, which allowed zero discrimination errors for the majority of shape stimuli. Furthermore, the performance of nonlinear decoders showed even greater enhancement compared to linear decoders for these complex representations. Finally, decoders that approximated the correlation structure in the population by matching all pairwise correlations with a maximum entropy model fit to all 162 neurons were quite successful, especially for the spike latency representation. Together, these results suggest a picture in which linear decoders allow a coarse categorization of shape stimuli, while nonlinear decoders, which take advantage of both correlation and spike timing, are needed to achieve high-fidelity discrimination.},
web_url = {http://jn.physiology.org/content/early/2012/04/20/jn.00564.2011.full.pdf+html},
state = {published},
DOI = {10.1152/jn.00564.2011},
author = {Schwartz G; Macke J{jakob}; Amodei D; Tang H; Berry MJ}
}
@Article{ BuesingMS2012,
title = {Learning stable, regularised latent models of neural population dynamics},
journal = {Network},
year = {2012},
month = {3},
volume = {23},
number = {1-2},
pages = {24-47},
abstract = {Ongoing advances in experimental technique are making commonplace simultaneous recordings of the activity of tens to hundreds of cortical neurons at high temporal resolution. Latent population models, including Gaussian-process factor analysis and hidden linear dynamical system (LDS) models, have proven effective at capturing the statistical structure of such data sets. They can be estimated efficiently, yield useful visualisations of population activity, and are also integral building-blocks of decoding algorithms for brain-machine interfaces (BMI). One practical challenge, particularly to LDS models, is that when parameters are learned using realistic volumes of data the resulting models often fail to reflect the true temporal continuity of the dynamics; and indeed may describe a biologically-implausible unstable population dynamic that is, it may predict neural activity that grows without bound. We propose a method for learning LDS models based on expectation maximisation that constrains parameters to yield stable systems and at the same time promotes capture of temporal structure by appropriate regularisation. We show that when only little training data is available our method yields LDS parameter estimates which provide a substantially better statistical description of the data than alternatives, whilst guaranteeing stable dynamics. We demonstrate our methods using both synthetic data and extracellular multi-electrode recordings from motor cortex.},
web_url = {http://informahealthcare.com/doi/abs/10.3109/0954898X.2012.677095},
state = {published},
DOI = {10.3109/0954898X.2012.677095},
author = {Buesing L; Macke JH{jakob}; Sahani M}
}
@Article{ MackeBb2011,
title = {Statistical analysis of multi-cell recordings: linking population coding models to experimental data},
journal = {Frontiers in Computational Neuroscience},
year = {2011},
month = {7},
volume = {5},
number = {35},
pages = {1-2},
abstract = {Modern recording techniques such as multi-electrode arrays and two-photon imaging methods are capable of simultaneously monitoring the activity of large neuronal ensembles at single cell resolution. These methods finally give us the means to address some of the most crucial questions in systems neuroscience: what are the dynamics of neural population activity? How do populations of neurons perform computations? What is the functional organization of neural ensembles?
While the wealth of new experimental data generated by these techniques provides exciting opportunities to test ideas about how neural ensembles operate, it also provides major challenges: multi-cell recordings necessarily yield data which is high-dimensional in nature. Understanding this kind of data requires powerful statistical techniques for capturing the structure of the neural population responses, as well as their relationship with external stimuli or behavioral observations. Furthermore, linking recorded neural population activity to the predictions of theoretical models of population coding has turned out not to be straightforward.
These challenges motivated us to organize a workshop at the 2009 Computational Neuroscience Meeting in Berlin to discuss these issues. In order to collect some of the recent progress in this field, and to foster discussion on the most important directions and most pressing questions, we issued a call for papers for this Research Topic. We asked authors to address the following four questions:
1. What classes of statistical methods are most useful for modeling population activity?
2. What are the main limitations of current approaches, and what can be done to overcome them?
3. How can statistical methods be used to empirically test existing models of (probabilistic) population coding?
4. What role can statistical methods play in formulating novel hypotheses about the principles of information processing in neural populations?
A total of 15 papers addressing questions related to these themes are now collected in this Research Topic. Three of these articles have resulted in “Focused reviews” in Frontiers in Neuroscience (Crumiller et al., 2011; Rosenbaum et al., 2011; Tchumatchenko et al., 2011), illustrating the great interest in the topic. Many of the articles are devoted to a better understanding of how correlations arise in neural circuits, and how they can be detected, modeled, and interpreted. For example, by modeling how pairwise correlations are transformed by spiking non-linearities in simple neural circuits, Tchumatchenko et al. (2010) show that pairwise correlation coefficients have to be interpreted with care, since their magnitude can depend strongly on the temporal statistics of their input-correlations. In a similar spirit, Rosenbaum et al. (2010) study how correlations can arise and accumulate in feed-forward circuits as a result of pooling of correlated inputs.
Lyamzin et al. (2010) and Krumin et al. (2010) present methods for simulating correlated population activity and extend previous work to more general settings. The method of Lyamzin et al. (2010) allows one to generate synthetic spike trains which match commonly reported statistical properties, such as time varying firing rates as well signal and noise correlations. The Hawkes framework presented by Krumin et al. (2010) allows one to fit models of recurrent population activity to the correlation-structure of experimental data. Louis et al. (2010) present a novel method for generating surrogate spike trains which can be useful when trying to assess the significance and time-scale of correlations in neural spike trains. Finally, Pipa and Munk (2011) study spike synchronization in prefrontal cortex during working memory.
A number of studies are also devoted to advancing our methodological toolkit for analyzing various aspects of population activity (Gerwinn et al., 2010; Machens, 2010; Staude et al., 2010; Yu et al., 2010). For example, Gerwinn et al. (2010) explain how full probabilistic inference can be performed in the popular model class of generalized linear models (GLMs), and study the effect of using prior distributions on the parameters of the stimulus and coupling filters. Staude et al. (2010) extend a method for detecting higher-order correlations between neurons via population spike counts to non-stationary settings. Yu et al. (2010) describe a new technique for estimating the information rate of a population of neurons using frequency-domain methods. Machens (2010) introduces a novel extension of principal component analysis for separating the variability of a neural response into different sources.
Focusing less on the spike responses of neural populations but on aggregate signals of population activity, Boatman-Reich et al. (2010) and Hoerzer et al. (2010) describe methods for a quantitative analysis of field potential recordings. While Boatman-Reich et al. (2010) discuss a number of existing techniques in a unified framework and highlight the potential pitfalls associated with such approaches, Hoerzer et al. (2010) demonstrate how multivariate autoregressive models and the concept of Granger causality can be used to infer local functional connectivity in area V4 of behaving macaques.
A final group of studies is devoted to understanding experimental data in light of computational models (Galán et al., 2010; Pandarinath et al., 2010; Shteingart et al., 2010). Pandarinath et al. (2010) present a novel mechanism that may explain how neural networks in the retina switch from one state to another by a change in gap junction coupling, and conjecture that this mechanism might also be found in other neural circuits. Galán et al. (2010) present a model of how hypoxia may change the network structure in the respiratory networks in the brainstem, and analyze neural correlations in multi-electrode recordings in light of this model. Finally, Shteingart et al. (2010) show that the spontaneous activation sequences they find in cultured networks cannot be explained by Zipf’s law, but rather require a wrestling model.
The papers of this Research Topic thus span a wide range of topics in the statistical modeling of multi-cell recordings. Together with other recent advances, they provide us with a useful toolkit to tackle the challenges presented by the vast amount of data collected with modern recording techniques. The impact of novel statistical methods on the field and their potential to generate scientific progress, however, depends critically on how readily they can be adopted and applied by laboratories and researchers working with experimental data. An important step toward this goal is to also publish computer code along with the articles (Barnes, 2010) as a successful implementation of advanced methods also relies on many details which are hard to communicate in the article itself. In this way it becomes much more likely that other researchers can actually use the methods, and unnecessary re-implementations can be avoided. Some of the papers in this Research Topic already follow this goal (Gerwinn et al., 2010; Louis et al., 2010; Lyamzin et al., 2010). We hope that this practice becomes more and more common in the future and encourage authors and editors of Research Topics to make as much code available as possible, ideally in a format that can be easily integrated with existing software sharing initiatives (Herz et al., 2008; Goldberg et al., 2009).},
web_url = {http://www.frontiersin.org/Computational_Neuroscience/10.3389/fncom.2011.00035/full},
state = {published},
DOI = {10.3389/fncom.2011.00035},
author = {Macke J{jakob}; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}}
}
@Article{ MackeOb2011,
title = {Common Input Explains Higher-Order Correlations and Entropy in a Simple Model of Neural Population Activity},
journal = {Physical Review Letters},
year = {2011},
month = {5},
volume = {106},
number = {20},
pages = {1-4},
abstract = {Simultaneously recorded neurons exhibit correlations whose underlying causes are not known. Here, we use a population of threshold neurons receiving correlated inputs to model neural population recordings. We show analytically that small changes in second-order correlations can lead to large changes in higher-order redundancies, and that the resulting interactions have a strong impact on the entropy, sparsity, and statistical heat capacity of the population. Our findings for this simple model may explain some surprising effects recently observed in neural population recordings.},
web_url = {http://prl.aps.org/pdf/PRL/v106/i20/e208102},
state = {published},
DOI = {10.1103/PhysRevLett.106.208102},
EPUB = {208102},
author = {Macke JH{jakob}; Opper M; Bethge M{mbethge}}
}
@Article{ 6516,
title = {Gaussian process methods for estimating cortical maps},
journal = {NeuroImage},
year = {2011},
month = {5},
volume = {56},
number = {2},
pages = {570-581},
abstract = {A striking feature of cortical organization is that the encoding of many stimulus features, for example orientation or direction selectivity, is arranged into topographic maps. Functional imaging methods such as optical imaging of intrinsic signals, voltage sensitive dye imaging or functional magnetic resonance imaging are important tools for studying the structure of cortical maps. As functional imaging measurements are usually noisy, statistical processing of the data is necessary to extract maps from the imaging data. We here present a probabilistic model of functional imaging data based on Gaussian processes. In comparison to conventional approaches, our model yields superior estimates of cortical maps from smaller amounts of data. In addition, we obtain quantitative uncertainty estimates, i.e. error bars on properties of the estimated map. We use our probabilistic model to study the coding properties of the map and the role of noise-correlations by decoding the stimulus from single trials of an imaging experiment.},
web_url = {http://www.sciencedirect.com/science?_ob=MImg&_imagekey=B6WNP-5032NNX-1-3&_cdi=6968&_user=29041&_pii=S1053811910007007&_origin=&_coverDate=05%2F15%2F2011&_sk=999439997&view=c&wchp=dGLbVlz-zSkWl&md5=17cff103ca4f9e756eee9e6711fca3e4&ie=/sdarticle.pdf},
state = {published},
DOI = {10.1016/j.neuroimage.2010.04.272},
author = {Macke JH{jakob}; Gerwinn S{sgerwinn}{Research Group Computational Vision and Neuroscience}; White LW; Kaschube M; Bethge M{mbethge}}
}
@Article{ 7040,
title = {Reconstructing stimuli from the spike-times of leaky integrate and fire neurons},
journal = {Frontiers in Neuroscience},
year = {2011},
month = {2},
volume = {5},
number = {1},
pages = {1-16},
abstract = {Reconstructing stimuli from the spike trains of neurons is an important approach for understanding the neural code. One of the difficulties associated with this task is that signals which are varying continuously in time are encoded into sequences of discrete events or spikes. An important problem is to determine how much information about the continuously varying stimulus can be extracted from the time-points at which spikes were observed, especially if these time-points are subject to some sort of randomness. For the special case of spike trains generated by leaky integrate and fire neurons, noise can be introduced by allowing variations in the threshold every time a spike is released. A simple decoding algorithm previously derived for the noiseless case can be extended to the stochastic case, but turns out to be biased. Here, we review a solution to this problem, by presenting a simple yet efficient algorithm which greatly reduces the bias, and therefore leads to better decoding performance in the stochastic case.},
web_url = {http://www.frontiersin.org/Neuroscience/10.3389/fnins.2011.00001/abstract},
state = {published},
DOI = {10.3389/fnins.2011.00001},
EPUB = {1-9},
author = {Gerwinn S{sgerwinn}{Research Group Computational Vision and Neuroscience}; Macke JH{jakob}; Bethge M{mbethge}}
}
@Article{ LyamzinML2010,
title = {Modeling population spike trains with specified time-varying spike rates, trial-to-trial variability, and pairwise signal and noise correlations},
journal = {Frontiers in Computational Neuroscience},
year = {2010},
month = {10},
volume = {4},
number = {144},
pages = {1-11},
abstract = {As multi-electrode and imaging technology begin to provide us with simultaneous recordings of large neuronal populations, new methods for modeling such data must also be developed. Here, we present a model for the type of data commonly recorded in early sensory pathways: responses to repeated trials of a sensory stimulus in which each neuron has it own time-varying spike rate (as described by its PSTH) and the dependencies between cells are characterized by both signal and noise correlations. This model is an extension of previous attempts to model population spike trains designed to control only the total correlation between cells. In our model, the response of each cell is represented as a binary vector given by the dichotomized sum of a deterministic “signal” that is repeated on each trial and a Gaussian random “noise” that is different on each trial. This model allows the simulation of population spike trains with PSTHs, trial-to-trial variability, and pairwise correlations that match those measured experimentally. Furthermore, the model also allows the noise correlations in the spike trains to be manipulated independently of the signal correlations and single-cell properties. To demonstrate the utility of the model, we use it to simulate and manipulate experimental responses from the mammalian auditory and visual systems. We also present a general form of the model in which both the signal and noise are Gaussian random processes, allowing the mean spike rate, trial-to-trial variability, and pairwise signal and noise correlations to be specified independently. Together, these methods for modeling spike trains comprise a potentially powerful set of tools for both theorists and experimentalists studying population responses in sensory systems.},
web_url = {http://www.frontiersin.org/Computational_Neuroscience/10.3389/fncom.2010.00144/abstract},
state = {published},
DOI = {10.3389/fncom.2010.00144},
author = {Lyamzin DR; Macke JH{jakob}; Lesica NA}
}
@Article{ 6515,
title = {Estimating predictive stimulus features from psychophysical data: The decision image technique applied to human faces},
journal = {Journal of Vision},
year = {2010},
month = {5},
volume = {10},
number = {5:22},
pages = {1-24},
abstract = {One major challenge in the sensory sciences is to identify the stimulus features on which sensory systems base their computations, and which are predictive of a behavioral decision: they are a prerequisite for computational models of perception. We describe a technique (decision images) for extracting predictive stimulus features using logistic regression. A decision image not only defines a region of interest within a stimulus but is a quantitative template which defines a direction in stimulus space. Decision images thus enable the development of predictive models, as well as the generation of optimized stimuli for subsequent psychophysical investigations. Here we describe our method and apply it to data from a human face classification experiment. We show that decision images are able to predict human responses not only in terms of overall percent correct but also in terms of the probabilities with which individual faces are (mis-) classified by individual observers. We show that the most predictive dimension for gender categorization is neither aligned with the axis defined by the two class-means, nor with the first principal component of all faces-two hypotheses frequently entertained in the literature. Our method can be applied to a wide range of binary classification tasks in vision or other psychophysical contexts.},
web_url = {http://www.journalofvision.org/content/10/5/22.full.pdf+html},
state = {published},
DOI = {10.1167/10.5.22},
author = {Macke JH{jakob}; Wichmann FA{felix}}
}
@Article{ 6502,
title = {Bayesian inference for generalized linear models for spiking neurons},
journal = {Frontiers in Computational Neuroscience},
year = {2010},
month = {4},
volume = {4},
number = {12},
pages = {1-17},
abstract = {Generalized Linear Models (GLMs) are commonly used statistical methods for modelling the relationship between neural population activity and presented stimuli. When the dimension of the parameter space is large, strong regularization has to be used in order to fit GLMs to datasets of realistic size without overfitting. By imposing properly chosen priors over parameters, Bayesian inference provides an effective and principled approach for achieving regularization. Here we show how the posterior distribution over model parameters of GLMs can be approximated by a Gaussian using the Expectation Propagation algorithm. In this way, we obtain an estimate of the posterior mean and posterior covariance, allowing us to calculate Bayesian confidence intervals that characterize the uncertainty about the optimal solution. From the posterior we also obtain a different point estimate, namely the posterior mean as opposed to the commonly used maximum a posteriori estimate. We systematically compare the different inference techniques on simulated as well as on multi-electrode recordings of retinal ganglion cells, and explore the effects of the chosen prior and the performance measure used. We find that good performance can be achieved by choosing an Laplace prior together with the posterior mean estimate.},
web_url = {http://frontiersin.org/neuroscience/computationalneuroscience/paper/10.3389/fncom.2010.00012/pdf/},
state = {published},
DOI = {10.3389/fncom.2010.00012},
author = {Gerwinn S{sgerwinn}{Research Group Computational Vision and Neuroscience}; Macke J{jakob}; Bethge M{mbethge}}
}
@Article{ 6102,
title = {Bayesian population decoding of spiking neurons},
journal = {Frontiers in Computational Neuroscience},
year = {2009},
month = {10},
volume = {3},
number = {21},
pages = {1-14},
abstract = {The timing of action potentials in spiking neurons depends on the temporal dynamics of their inputs and contains information about temporal fluctuations in the stimulus. Leaky integrate-and-fire neurons constitute a popular class of encoding models, in which spike times depend directly on the temporal structure of the inputs. However, optimal decoding rules for these models have only been studied explicitly in the noiseless case. Here, we study decoding rules for probabilistic inference of a continuous stimulus from the spike times of a population of leaky integrate-and-fire neurons with threshold noise. We derive three algorithms for approximating the posterior distribution over stimuli as a function of the observed spike trains. In addition to a reconstruction of the stimulus we thus obtain an estimate of the uncertainty as well. Furthermore, we derive a `spike-by-spike‘ online decoding scheme that recursively updates the posterior with the arrival of each new spike. We use these decoding rules to reconstruct time-varying stimuli represented by a Gaussian process from spike trains of single neurons as well as neural populations.},
web_url = {http://www.frontiersin.org/computationalneuroscience/paper/10.3389/neuro.10/021.2009/pdf/},
state = {published},
DOI = {10.3389/neuro.10.021.2009},
author = {Gerwinn S{sgerwinn}{Research Group Computational Vision and Neuroscience}; Macke JH{jakob}; Bethge M{mbethge}}
}
@Article{ 5157,
title = {Generating Spike Trains with Specified Correlation Coefficients},
journal = {Neural Computation},
year = {2009},
month = {2},
volume = {21},
number = {2},
pages = {397-423},
abstract = {Spike trains recorded from populations of neurons can exhibit substantial pairwise correlations between neurons and rich temporal structure. Thus, for the realistic simulation and analysis of neural systems, it is essential to have efficient methods for generating artificial spike trains with specified correlation structure. Here we show how correlated binary spike trains can be simulated by means of a latent multivariate gaussian model. Sampling from the model is computationally very efficient and, in particular, feasible even for large populations of neurons. The entropy of the model is close to the theoretical maximum for a wide range of parameters. In addition, this framework naturally extends to correlations over time and offers an elegant way to model correlated neural spike counts with arbitrary marginal distributions.},
file_url = {/fileadmin/user_upload/files/publications/macke2009_5157[0].pdf},
web_url = {http://www.mitpressjournals.org/doi/pdf/10.1162/neco.2008.02-08-713},
state = {published},
DOI = {10.1162/neco.2008.02-08-713},
author = {Macke JH{jakob}; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Ecker AS{aecker}; Tolias AS{atolias}; Bethge M{mbethge}}
}
@Article{ 4877,
title = {Comparison of Pattern Recognition Methods in Classifying High-resolution BOLD Signals Obtained at High Magnetic Field in Monkeys},
journal = {Magnetic Resonance Imaging},
year = {2008},
month = {9},
volume = {26},
number = {7},
pages = {1007-1014},
abstract = {Pattern recognition methods have shown that functional magnetic resonance imaging (fMRI) data can reveal significant information about brain activity. For example, in the debate of how object categories are represented in the brain, multivariate analysis has been used to provide evidence of a distributed encoding scheme [Science 293:5539 (2001) 24252430]. Many follow-up studies have employed different methods to analyze human fMRI data with varying degrees of success [Nature reviews 7:7 (2006) 523534]. In this study, we compare four popular pattern recognition methods: correlation analysis, support-vector machines (SVM), linear discriminant analysis (LDA) and Gaussian naïve Bayes (GNB), using data collected at high field (7 Tesla) with higher resolution than usual fMRI studies. We investigate prediction performance on single trials and for averages across varying numbers of stimulus presentations. The performance of the various algorithms depends on the nature of the brain activity being categorized: for
several tasks, many of the methods work well, whereas for others, no method performs above chance level. An important factor in overall classification performance is careful preprocessing of the data, including dimensionality reduction, voxel selection and outlier elimination.},
file_url = {/fileadmin/user_upload/files/publications/sdarticle_4877[0].pdf},
web_url = {http://www.sciencedirect.com/science?_ob=MImg&_imagekey=B6T9D-4T5BWJY-5-7&_cdi=5112&_user=29041&_orig=browse&_coverDate=09%2F30%2F2008&_sk=999739992&view=c&wchp=dGLbVzz-zSkWb&md5=25e9},
state = {published},
DOI = {http://dx.doi.org/10.1016/j.mri.2008.02.016},
author = {Ku S-P{shihpi}{Department Physiology of Cognitive Processes}; Gretton A{arthur}{Department Empirical Inference}; Macke J{jakob}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}}
}
@Article{ 4667,
title = {Contour-propagation Algorithms for Semi-automated Reconstruction of Neural Processes},
journal = {Journal of Neuroscience Methods},
year = {2008},
month = {1},
volume = {167},
number = {2},
pages = {349-357},
abstract = {A new technique, Serial Block Face Scanning Electron Microscopy (SBFSEM), allows for automatic
sectioning and imaging of biological tissue with a scanning electron microscope. Image
stacks generated with this technology have a resolution sufficient to distinguish different cellular
compartments, including synaptic structures, which should make it possible to obtain detailed
anatomical knowledge of complete neuronal circuits. Such an image stack contains several thousands
of images and is recorded with a minimal voxel size of 10-20nm in the x and y- and 30nm
in z-direction. Consequently, a tissue block of 1mm3 (the approximate volume of the Calliphora
vicina brain) will produce several hundred terabytes of data. Therefore, highly automated 3D
reconstruction algorithms are needed. As a first step in this direction we have developed semiautomated
segmentation algorithms for a precise contour tracing of cell membranes. These
algorithms were embedded into an easy-to-operate user interface, which allows direct 3D observation
of the extracted objects during the segmentation of image stacks. Compared to purely
manual tracing, processing time is greatly accelerated.},
file_url = {/fileadmin/user_upload/files/publications/Macke_Maack_07_JNeuMeth_Segmentation_[0].pdf},
web_url = {http://www.sciencedirect.com/science?_ob=MImg&_imagekey=B6T04-4PCXG9T-1-1&_cdi=4852&_user=29041&_orig=browse&_coverDate=08%2F10%2F2007&_sk=999999999&view=c&wch},
state = {published},
DOI = {10.1016/j.jneumeth.2007.07.021},
author = {Macke JH{jakob}; Maack N; Gupta R; Denk W; Sch\"olkopf B{bs}{Department Empirical Inference}; Borst A}
}
@Inproceedings{ ParkBM2015,
title = {Unlocking neural population non-stationarity using a hierarchical dynamics model},
year = {2016},
pages = {145-153},
abstract = {Neural population activity often exhibits rich variability. This variability is thought to arise from single-neuron stochasticity, neural dynamics on short time-scales, as well as from modulations of neural firing properties on long time-scales, often referred to as non-stationarity. To better understand the nature of co-variability in neural circuits and their impact on cortical information processing, we introduce a hierarchical dynamics model that is able to capture inter-trial modulations in firing rates, as well as neural population dynamics. We derive an algorithm for Bayesian Laplace propagation for fast posterior inference, and demonstrate that our model provides a better account of the structure of neural firing than existing stationary dynamics models, when applied to neural population recordings from primary visual cortex.},
file_url = {fileadmin/user_upload/files/publications/2015/NIPS-2015-Park.pdf},
web_url = {http://papers.nips.cc/paper/5790-unlocking-neural-population-non-stationarities-using-hierarchical-dynamics-models},
editor = {Cortes, C. , N.D. Lawrence, D.D. Lee, M. Sugiyama, R. Garnett, R. Garnett},
publisher = {Curran},
address = {Red Hook, NY, USA},
booktitle = {Advances in Neural Information Processing Systems 28},
event_name = {Twenty-Ninth Annual Conference on Neural Information Processing Systems (NIPS 2015)},
event_place = {Montréal, Canada},
state = {published},
author = {Park M{mpark}; Bohner G; Macke J{jakob}}
}
@Inproceedings{ PutzkyFBM2014,
title = {A Bayesian model for identifying hierarchically organised states in neural population activity},
year = {2015},
pages = {3095-3103},
abstract = {Neural population activity in cortical circuits is not solely driven by external inputs, but is also modulated by endogenous states. These cortical states vary on multiple time-scales and also across areas and layers of the neocortex. To understand information processing in cortical circuits, we need to understand the statistical structure of internal states and their interaction with sensory inputs. Here, we present a statistical model for extracting hierarchically organized neural population states from multi-channel recordings of neural spiking activity. We model population states using a hidden Markov decision tree with state-dependent tuning parameters and a generalized linear observation model. Using variational Bayesian inference, we estimate the posterior distribution over parameters from population recordings of neural spike trains. On simulated data, we show that we can identify the underlying sequence of population states over time and reconstruct the ground truth parameters. Using extracellular population recordings from visual cortex, we find that a model with two levels of population states outperforms a generalized linear model which does not include state-dependence, as well as models which only including a binary state. Finally, modelling of state-dependence via our model also improves the accuracy with which sensory stimuli can be decoded from the population response.},
file_url = {fileadmin/user_upload/files/publications/2014/NIPS-2014-Putzky-Paper.pdf},
file_url2 = {fileadmin/user_upload/files/publications/2014/NIPS-2014-Putzky-Suppl.pdf},
web_url = {http://papers.nips.cc/paper/5338-a-bayesian-model-for-identifying-hierarchically-organised-states-in-neural-population-activity},
editor = {Ghahramani, Z. , M. Welling, C. Cortes, N. D. Lawrence, K. Q. Weinberger},
publisher = {Curran},
address = {Red Hook, NY, USA},
booktitle = {Advances in Neural Information Processing Systems 27},
event_name = {Twenty-Eighth Annual Conference on Neural Information Processing Systems (NIPS 2014)},
event_place = {Montréal, Quebec, Canada},
state = {published},
ISBN = {978-1-5108-0041-0},
author = {Putzky P{pputzky}; Franzen F{ffranzen}; Bassetto G{gbassetto}; Macke JH{jakob}}
}
@Inproceedings{ ArcherKPM2014,
title = {Low-dimensional models of neural population activity in sensory cortical circuits},
year = {2015},
pages = {343-351},
abstract = {Neural responses in visual cortex are influenced by visual stimuli and by ongoing spiking activity in local circuits. An important challenge in computational neuroscience is to develop models that can account for both of these features in large multi-neuron recordings and to reveal how stimulus representations interact with and depend on cortical dynamics. Here we introduce a statistical model of neural population activity that integrates a nonlinear receptive field model with a latent dynamical model of ongoing cortical activity. This model captures the temporal dynamics, effective network connectivity in large population recordings, and correlations due to shared stimulus drive as well as common noise. Moreover, because the nonlinear stimulus inputs are mixed by the ongoing dynamics, the model can account for a relatively large number of idiosyncratic receptive field shapes with a small number of nonlinear inputs to a low-dimensional latent dynamical model. We introduce a fast estimation method using online expectation maximization with Laplace approximations. Inference scales linearly in both population size and recording duration. We apply this model to multi-channel recordings from primary visual cortex and show that it accounts for a large number of individual neural receptive fields using a small number of nonlinear inputs and a low-dimensional dynamical model.},
file_url = {fileadmin/user_upload/files/publications/2014/NIPS-2014-Archer.pdf},
web_url = {http://papers.nips.cc/paper/5263-low-dimensional-models-of-neural-population-activity-in-sensory-cortical-circuits},
editor = {Ghahramani, Z. , M. Welling, C. Cortes, N. D. Lawrence, K. Q. Weinberger},
publisher = {Curran},
address = {Red Hook, NY, USA},
booktitle = {Advances in Neural Information Processing Systems 27},
event_name = {Twenty-Eighth Annual Conference on Neural Information Processing Systems (NIPS 2014)},
event_place = {Montréal, Quebec, Canada},
state = {published},
ISBN = {978-1-5108-0041-0},
author = {Archer EW{earcher}; Koster U; Pillow JW; Macke JH{jakob}}
}
@Inproceedings{ TuragaBPDPHM2013,
title = {Inferring neural population dynamics from multiple partial recordings of the same neural circuit},
year = {2014},
pages = {539-547},
abstract = {Simultaneous recordings of the activity of large neural populations are extremely valuable as they can be used to infer the dynamics and interactions of neurons in a local circuit, shedding light on the computations performed. It is now possible to measure the activity of hundreds of neurons using 2-photon calcium imaging. However, many computations are thought to involve circuits consisting of thousands of neurons, such as cortical barrels in rodent somatosensory cortex. Here we contribute a statistical method for stitching" together sequentially imaged sets of neurons into one model by phrasing the problem as fitting a latent dynamical system with missing observations. This method allows us to substantially expand the population-sizes for which population dynamics can be characterized---beyond the number of simultaneously imaged neurons. In particular, we demonstrate using recordings in mouse somatosensory cortex that this method makes it possible to predict noise correlations between non-simultaneously recorded neuron pairs.},
file_url = {fileadmin/user_upload/files/publications/2013/NIPS-2013-Turaga.pdf},
web_url = {http://nips.cc/Conferences/2013/},
editor = {Burges, C.J.C. , L. Bottou, M. Welling, Z. Ghahramani, K.Q. Weinberger},
publisher = {Curran},
address = {Red Hook, NY, USA},
booktitle = {Advances in Neural Information Processing Systems 26},
event_name = {Twenty-Seventh Annual Conference on Neural Information Processing Systems (NIPS 2013)},
event_place = {Stateline, NV, USA},
state = {published},
ISBN = {978-1-63266-024-4},
author = {Turaga SC; Buesing L; Packer AM; Dalgleish H; Pettit N; Hausser M; Macke JH{jakob}}
}
@Inproceedings{ BusingMS2013,
title = {Spectral learning of linear dynamics from generalised-linear observations with application to neural population data},
year = {2013},
month = {4},
pages = {1691-1699},
abstract = {Latent linear dynamical systems with generalised-linear observation models arise in a variety of applications, for example when modelling the spiking activity of populations of neurons. Here, we show how spectral learning methods for linear systems with Gaussian observations (usually called subspace identification in this context) can be extended to estimate the parameters of dynamical system models observed through non-Gaussian noise models. We use this approach to obtain estimates of parameters for a dynamical model of neural population data, where the observed spike-counts are Poisson-distributed with log-rates determined by the latent dynamical process, possibly driven by external inputs. We show that the extended system identification algorithm is consistent and accurately recovers the correct parameters on large simulated data sets with much smaller computational cost than approximate expectation-maximisation (EM) due to the non-iterative nature of subspace identification. Even on smaller data sets, it provides an effective initialization for EM, leading to more robust performance and faster convergence. These benefits are shown to extend to real neural data.},
file_url = {fileadmin/user_upload/files/publications/2013/NIPS-2012-Buesing.pdf},
web_url = {https://papers.nips.cc/book/advances-in-neural-information-processing-systems-25-2012},
editor = {Bartlett, P. , F.C.N. Pereira, L. Bottou, C.J.C. Burges, K.Q. Weinberger},
publisher = {Curran},
address = {Red Hook, NY, USA},
booktitle = {Advances in Neural Information Processing Systems 25},
event_name = {Twenty-Sixth Annual Conference on Neural Information Processing Systems (NIPS 2012)},
event_place = {Lake Tahoe, NV, USA},
state = {published},
ISBN = {978-1-627-48003-1},
author = {Buesing L; Macke JH{jakob}; Sahani M}
}
@Inproceedings{ MackeBCYSS2012,
title = {Empirical models of spiking in neural populations},
year = {2012},
month = {1},
pages = {1350-1358},
abstract = {Neurons in the neocortex code and compute as part of a locally interconnected population. Large-scale multi-electrode recording makes it possible to access
these population processes empirically by fitting statistical models to unaveraged data. What statistical structure best describes the concurrent spiking of cells within a local network? We argue that in the cortex, where firing exhibits extensive correlations in both time and space and where a typical sample of neurons still reflects
only a very small fraction of the local population, the most appropriate model captures shared variability by a low-dimensional latent process evolving with smooth
dynamics, rather than by putative direct coupling. We test this claim by comparing a latent dynamical model with realistic spiking observations to coupled generalised
linear spike-response models (GLMs) using cortical recordings. We find that the latent dynamical approach outperforms the GLM in terms of goodness-offit, and reproduces the temporal correlations in the data more accurately. We also compare models whose observations models are either derived from a Gaussian or point-process models, finding that the non-Gaussian model provides slightly better goodness-of-fit and more realistic population spike counts.},
file_url = {fileadmin/user_upload/files/publications/2012/NIPS-2011-Macke.pdf},
web_url = {https://papers.nips.cc/book/advances-in-neural-information-processing-systems-24-2011},
editor = {Shawe-Taylor , J. , R.S. Zemel, P. Bartlett, F. Pereira, K.Q. Weinberger},
publisher = {Curran},
address = {Red Hook, NY, USA},
booktitle = {Advances in Neural Information Processing Systems 24},
event_name = {Twenty-Fifth Annual Conference on Neural Information Processing Systems (NIPS 2011)},
event_place = {Granada, Spain},
state = {published},
ISBN = {978-1-618-39599-3},
author = {Macke JH{jakob}; B\"using L; Cunningham JP; Yu BM; Shenoy KV; Sahani M}
}
@Inproceedings{ MackeML2012,
title = {How biased are maximum entropy models?},
year = {2012},
month = {1},
pages = {2034-2042},
abstract = {Maximum entropy models have become popular statistical models in neuroscience and other areas in biology, and can be useful tools for obtaining estimates of mutual
information in biological systems. However, maximum entropy models fit to small data sets can be subject to sampling bias; i.e. the true entropy of the data can be severely underestimated. Here we study the sampling properties of estimates of the entropy obtained from maximum entropy models. We show that if the data is generated by a distribution that lies in the model class, the bias is equal to the number of parameters divided by twice the number of observations. However, in practice, the true distribution is usually outside the model class, and we show here that this misspecification can lead to much larger bias. We provide a perturbative approximation of the maximally expected bias when the true model is out of
model class, and we illustrate our results using numerical simulations of an Ising model; i.e. the second-order maximum entropy distribution on binary data.},
file_url = {fileadmin/user_upload/files/publications/2012/NIPS-2011-Macke-2.pdf},
web_url = {https://papers.nips.cc/book/advances-in-neural-information-processing-systems-24-2011},
editor = {Shawe-Taylor , J. , R.S. Zemel, P. Bartlett, F. Pereira, K.Q. Weinberger},
publisher = {Curran},
address = {Red Hook, NY, USA},
booktitle = {Advances in Neural Information Processing Systems 24},
event_name = {Twenty-Fifth Annual Conference on Neural Information Processing Systems (NIPS 2011)},
event_place = {Granada, Spain},
state = {published},
ISBN = {978-1-618-39599-3},
author = {Macke JH{jakob}; Murray I; Latham P}
}
@Inproceedings{ 6121,
title = {Bayesian estimation of orientation preference maps},
year = {2010},
month = {4},
pages = {1195-1203},
abstract = {Imaging techniques such as optical imaging of intrinsic signals, 2-photon calcium imaging and voltage sensitive dye imaging can be used to measure the functional organization of visual cortex across different spatial and temporal scales. Here, we present Bayesian methods based on Gaussian processes for extracting topographic maps from functional imaging data. In particular, we focus on the estimation of
orientation preference maps (OPMs) from intrinsic signal imaging data. We model the underlying map as a bivariate Gaussian process, with a prior covariance function that reflects known properties of OPMs, and a noise covariance adjusted to the data. The posterior mean can be interpreted as an optimally smoothed estimate of the map, and can be used for model based interpolations of the map from sparse measurements. By sampling from the posterior distribution, we can get error bars on statistical properties such as preferred orientations, pinwheel locations or pinwheel counts. Finally, the use of an explicit probabilistic model facilitates interpretation of parameters and quantitative model comparisons. We demonstrate our model both on simulated data and on intrinsic signaling data from ferret visual cortex.},
file_url = {/fileadmin/user_upload/files/publications/NIPS2009-Macke_6121[0].pdf},
web_url = {http://nips.cc/Conferences/2009/},
editor = {Bengio, Y. , D. Schuurmans, J. Lafferty, C. Williams, A. Culotta},
publisher = {Curran},
address = {Red Hook, NY, USA},
booktitle = {Advances in Neural Information Processing Systems 22},
event_name = {23rd Annual Conference on Neural Information Processing Systems (NIPS 2009)},
event_place = {Vancouver, BC, Canada},
state = {published},
ISBN = {978-1-615-67911-9},
author = {Macke JH{jakob}; Gerwinn S{sgerwinn}{Research Group Computational Vision and Neuroscience}; Kaschube M; White LE; Bethge M{mbethge}}
}
@Inproceedings{ 4728,
title = {Bayesian Inference for Spiking Neuron Models with a Sparsity Prior},
year = {2008},
month = {9},
pages = {529-536},
abstract = {Generalized linear models are the most commonly used tools to describe the stimulus selectivity of sensory neurons. Here we present a Bayesian treatment of such models. Using the expectation propagation algorithm, we are able to approximate the full posterior distribution over all weights. In addition, we use a Laplacian prior to favor sparse solutions. Therefore, stimulus features that do not critically influence neural activity will be assigned zero weights and thus be effectively excluded by the model. This feature selection mechanism facilitates both the interpretation of the neuron model as well as its predictive abilities. The posterior distribution can be used to obtain confidence intervals which makes it possible to assess the statistical significance of the solution. In neural data analysis, the available amount of experimental measurements is often limited whereas the parameter space is large. In such a situation, both regularization by a sparsity prior and uncertainty estimates for the model parameters are essential.
We apply our method to multi-electrode recordings of retinal ganglion cells and use our uncertainty estimate to test the statistical significance of functional couplings between neurons. Furthermore we used the sparsity of the Laplace prior to select those filters from a spike-triggered covariance analysis that are most informative about the neural response.},
file_url = {/fileadmin/user_upload/files/publications/BayesLNP_4728[0].pdf},
web_url = {http://nips.cc/Conferences/2007/},
editor = {Platt, J. C., D. Koller, Y. Singer, S. Roweis},
publisher = {Curran},
address = {Red Hook, NY, USA},
booktitle = {Advances in neural information processing systems 20},
event_name = {Twenty-First Annual Conference on Neural Information Processing Systems (NIPS 2007)},
event_place = {Vancouver, BC, Canada},
state = {published},
ISBN = {978-1-605-60352-0},
author = {Gerwinn S{sgerwinn}{Department Empirical Inference}{Research Group Computational Vision and Neuroscience}; Macke J{jakob}; Seeger M{seeger}{Department Empirical Inference}; Bethge M{mbethge}}
}
@Inproceedings{ 4738,
title = {Receptive Fields without Spike-Triggering},
year = {2008},
month = {9},
pages = {969-976},
abstract = {Stimulus selectivity of sensory neurons is often characterized by estimating their receptive field properties such as orientation selectivity. Receptive fields are usually derived from the mean (or covariance) of the spike-triggered stimulus ensemble. This approach treats each spike as an independent message but does not take into account that information might be conveyed through patterns of neural activity that are distributed across space or time. Can we find a concise description for the processing of a whole population of neurons analogous to the receptive field for single neurons? Here, we present a generalization of the linear receptive field which is not bound to be triggered on individual spikes but can be meaningfully
linked to distributed response patterns. More precisely, we seek to identify those stimulus features and the corresponding patterns of neural activity that are most
reliably coupled. We use an extension of reverse-correlation methods based on canonical correlation analysis. The resulting population receptive fields span the
subspace of stimuli that is most informative about the population response. We evaluate our approach using both neuronal models and multi-electrode recordings from rabbit retinal ganglion cells. We show how the model can be extended to capture nonlinear stimulus-response relationships using kernel canonical correlation analysis, which makes it possible to test different coding mechanisms. Our technique can also be used to calculate receptive fields from multi-dimensional neural measurements such as those obtained from dynamic imaging methods.},
file_url = {/fileadmin/user_upload/files/publications/NIPS2007-Macke_4738[0].pdf},
web_url = {http://nips.cc/Conferences/2007/},
editor = {Platt, J. C., D. Koller, Y. Singer, S. Roweis},
publisher = {Curran},
address = {Red Hook, NY, USA},
booktitle = {Advances in neural information processing systems 20},
event_name = {Twenty-First Annual Conference on Neural Information Processing Systems (NIPS 2007)},
event_place = {Vancouver, BC, Canada},
state = {published},
ISBN = {978-1-605-60352-0},
author = {Macke JH{jakob}; Zeck G{gzeck}; Bethge M{mbethge}}
}
@Inproceedings{ 4266,
title = {Inducing Metric Violations in Human Similarity Judgements},
year = {2007},
month = {9},
pages = {777-784},
abstract = {Attempting to model human categorization and similarity judgements is both a very interesting but also an exceedingly difficult challenge. Some of the difficulty
arises because of conflicting evidence whether human categorization and similarity judgements should or should not be modelled as to operate on a mental representation that is essentially metric. Intuitively, this has a strong appeal as it would allow (dis)similarity to be represented geometrically as distance in some internal space. Here we show how a single stimulus, carefully constructed in a
psychophysical experiment, introduces l2 violations in what used to be an internal similarity space that could be adequately modelled as Euclidean. We term this one
influential data point a conflictual judgement. We present an algorithm of how to analyse such data and how to identify the crucial point. Thus there may not be a
strict dichotomy between either a metric or a non-metric internal space but rather degrees to which potentially large subsets of stimuli are represented metrically
with a small subset causing a global violation of metricity.},
file_url = {/fileadmin/user_upload/files/publications/NIPS2006-Laub_4266[0].pdf},
web_url = {http://nips.cc/Conferences/2006/},
editor = {Schölkopf, B. , J. Platt, T. Hofmann},
publisher = {MIT Press},
address = {Cambridge, MA, USA},
booktitle = {Advances in Neural Information Processing Systems 19},
event_name = {Twentieth Annual Conference on Neural Information Processing Systems (NIPS 2006)},
event_place = {Vancouver, BC, Canada},
state = {published},
ISBN = {0-262-19568-2},
author = {Laub J; Macke JH{jakob}; M\"uller K-R{klaus}{Department Empirical Inference}; Wichmann FA{felix}{Department Empirical Inference}}
}
@Inproceedings{ 4305,
title = {Unsupervised learning of a steerable basis for invariant image representations},
year = {2007},
month = {2},
pages = {1-12},
abstract = {There are two aspects to unsupervised learning of invariant representations of images: First, we can reduce the dimensionality of the representation by finding an optimal trade-off between temporal stability and informativeness. We show that the answer to this optimization problem is generally not unique so that there is still considerable freedom in choosing a suitable basis. Which of the many optimal representations should be selected? Here, we focus on this second aspect, and seek to find representations that are invariant under geometrical transformations occuring in sequences of natural images. We utilize ideas of steerability and Lie groups, which have been developed in the context of filter design. In particular, we show how an anti-symmetric version of canonical correlation analysis can be used to learn a full-rank image basis which is steerable with respect to rotations. We provide a geometric interpretation of this algorithm by showing that it finds the two-dimensional eigensubspaces of the avera
ge bivector. For data which exhibits a variety of transformations, we develop a bivector clustering algorithm, which we use to learn a basis of generalized quadrature pairs (i.e. complex cells) from sequences of natural images.},
file_url = {/fileadmin/user_upload/files/publications/SPIE2007-Bethge_4305[0].pdf},
web_url = {http://www.ece.northwestern.edu/~pappas/hvei/past/6806.html},
editor = {Rogowitz, B. E.},
publisher = {SPIE},
address = {Bellingham, WA, USA},
series = {Proceedings of the SPIE ; 6492},
booktitle = {Human Vision and Electronic Imaging XII},
event_name = {SPIE Human Vision and Electronic Imaging Conference 2007},
event_place = {San Jose, CA, USA},
state = {published},
ISBN = {978-0-8194-6605-1},
DOI = {10.1117/12.711119},
author = {Bethge M{mbethge}; Gerwinn S{sgerwinn}{Department Empirical Inference}{Research Group Computational Vision and Neuroscience}; Macke JH{jakob}}
}
@Inbook{ Macke2014,
title = {Electrophysiology Analysis, Bayesian},
year = {2015},
pages = {1078-1082},
abstract = {Bayesian analysis of electrophysiological data refers to the statistical processing of data obtained in electrophysiological experiments (i.e., recordings of action potentials or voltage measurements with electrodes or imaging devices) which utilize methods from Bayesian statistics. Bayesian statistics is a framework for describing and modelling empirical data using the mathematical language of probability to model uncertainty. Bayesian statistics provides a principled and flexible framework for combining empirical observations with prior knowledge and for quantifying uncertainty. These features are especially useful for analysis questions in which the dataset sizes are small in comparison to the complexity of the model, which is often the case in neurophysiological data analysis.},
file_url = {fileadmin/user_upload/files/publications/2014/Bayesian_Neurophysiology.pdf},
web_url = {http://link.springer.com/content/pdf/10.1007%2F978-1-4614-7320-6_448-1.pdf},
editor = {Jaeger, D. , R. Jung},
publisher = {Springer},
address = {New York, NY, USA},
booktitle = {Encyclopedia of Computational Neuroscience},
state = {published},
ISBN = {978-1-4614-6674-1},
DOI = {10.1007/978-1-4614-7320-6_448-1},
author = {Macke JH{jakob}}
}
@Inbook{ MackeBS2015,
title = {Estimating State and Parameters in State Space Models of Spike Trains},
year = {2015},
pages = {137-159},
abstract = {Neural computations at all scales of evolutionary and behavioural complexity are carried out by recurrently connected networks of neurons that communicate with each other, with neurons elsewhere in the brain, and with muscles through the firing of action potentials or “spikes.” To understand how nervous tissue computes, it is therefore necessary to understand how the spiking of neurons is shaped both by inputs to the network and by the recurrent action of existing network activity. Whereas most historical spike data were collected one neuron at a time, new techniques including silicon multielectrode array recording and scanning 2-photon, light-sheet or light-field fluorescence calcium imaging increasingly make it possible to record spikes from dozens, hundreds and potentially thousands of individual neurons simultaneously. These new data offer unprecedented empirical access to network computation, promising breakthroughs both in our understanding of neural coding and computation (Stevenson & Kording 2011), and our ability to build prosthetic neural interfaces (Santhanam et al. 2006). Fulfillment of this promise will require powerful methods for data modeling and analysis, able to capture the structure of statistical dependence of network activity across neurons and time.
Probabilistic latent state space models (SSMs) are particularly well-suited to this task. Neural activity often appears stochastic, in that repeated trials under the same controlled experimental conditions can evoke quite different patterns of firing. Some part of this variation may reflect differences in the way the computation unfolds on each trial. Another part might reflect noisy creation and transmission of neural signals. Yet more may come from chaotic amplification of small perturbations. As computational signals are thought to be distributed across the population (in a “population code”), variation in the computation may be distinguished by its common impact on different neurons and the systematic evolution of these common effects in time.
An SSM is able to capture such structured variation through the evolution of its latent state trajectory. This latent state provides a summary description of all factors modulating neural activity that are not observed directly. These factors could include processes such as arousal, attention, cortical state (Harris & Thiele 2011) or behavioural states of the animal (Niell & Stryker 2010; Maimon 2011).},
file_url = {fileadmin/user_upload/files/publications/2015/Macke_Busing_Sahani_DRAFT.pdf},
web_url = {http://ebooks.cambridge.org/chapter.jsf?bid=CBO9781139941433&cid=CBO9781139941433A054},
editor = {Chen, Z.},
publisher = {Cambridge University Press},
address = {Cambridge, UK},
booktitle = {Advanced State Space Methods for Neural and Clinical Data},
state = {published},
ISBN = {978-1-107-07919-9},
DOI = {10.1017/CBO9781139941433.007},
author = {Macke JH{jakob}; Buesing L; Sahani M}
}
@Techreport{ ParkM2014,
title = {Hierarchical models for neural population dynamics
in the presence of non-stationarity},
year = {2015},
month = {1},
abstract = {Neural population activity often exhibits rich variability and temporal structure. This variability is thought to arise from single-neuron stochasticity, neural dynam-
ics on short time-scales, as well as from modulations of neural firing properties on long time-scales, often referred to as “non-stationarity”. To better understand the
nature of co-variability in neural circuits and their impact on cortical information processing, we need statistical models that are able to capture multiple sources
of variability on different time-scales. Here, we introduce a hierarchical statistical model of neural population activity which models both neural population dynamics as well as inter-trial modulations in firing rates. In addition, we extend the model to allow us to capture non-stationarities in the population dynamics itself (i.e., correlations across neurons). We develop variational inference methods for learning model parameters, and demonstrate that the method can recover
non-stationarities in both average firing rates and correlation structure. Applied to neural population recordings from anesthetized macaque primary visual cortex,
our models provide a better account of the structure of neural firing than stationary dynamics models.},
web_url = {http://arxiv.org/pdf/1410.3111v1},
state = {submitted},
author = {Park M; Macke JH{jakob}}
}
@Techreport{ 5865,
title = {The effect of pairwise neural
correlations on global population
statistics},
year = {2009},
month = {3},
number = {183},
abstract = {Simultaneously recorded neurons often exhibit correlations in their spiking activity. These correlations
shape the statistical structure of the population activity, and can lead to substantial redundancy across
neurons. Here, we study the effect of pairwise correlations on the population spike count statistics and redundancy
in populations of threshold-neurons in which response-correlations arise from correlated Gaussian inputs. We investigate
the scaling of the redundancy as the population size is increased, and compare the asymptotic redundancy
in our models to the corresponding maximum- and minimum entropy models.},
file_url = {/fileadmin/user_upload/files/publications/MPIK-TR-183_[0].pdf},
state = {published},
author = {Macke JH{jakob}; Opper M; Bethge M{mbethge}}
}
@Poster{ SpeiserTAM2017,
title = {Amortized inference for fast spike prediction from calcium imaging data},
year = {2017},
month = {2},
day = {25},
pages = {207-208},
abstract = {Calcium imaging allows neuronal activity measurements from large populations of spatially identified neurons invivo.
However, spike inference algorithms are needed to infer spike times from fluorescence measurements of calcium concentration. Bayesian model inversion can be used to infer spikes, using carefully designed generative models that describe how spiking activity in a neuron influences measured fluorescence. Model inversion typically requires either computationally expensive MCMC sampling methods, or faster but approximate maximuma-posteriori estimation. We present a method for efficiently inverting generative models for spike inference. Our method is several orders of magnitude faster than existing approaches, allowing for generative-model based spike inference in real-time for large-scale population neural imaging, and can be applied to a wide range of linear and nonlinear generative models. We use recent advances in black-box variational inference (BBVI, Ranganath 2014) and ‘amortize’ inference by learning a deep network based recognition-model for fast model inversion (Mnih 2016). At training time, we simultaneously optimize the parameters of the generative model as well as the weights of a deep neural network which predicts the posterior approximation. At test time, performing inference for a given trace amounts to a fast single forward pass through the network at constant computational cost, and without the need for iterative optimization or MCMC sampling. On simple synthetic datasets, we show that our method is just as accurate as existing methods. However, the BBVI approach works with a wide range of generative models in a black-box manner as long as they are differentiable. In particular, we show that using a nonlinear generative model is better suited to describe GCaMP6 data (Chen 2013), leading to improved performance on real data. The framework can also easily be extended to combine supervised and unsupervised objectives enabling
semi-supervised learning of spike inference.},
web_url = {http://www.cosyne.org/c/index.php?title=Cosyne2017_posters_3},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2017)},
event_place = {Salt Lake City, UT, USA},
state = {published},
author = {Speiser A; Turaga S; Archer E{earcher}; Macke J{jakob}}
}
@Poster{ LueckmannMN2017,
title = {Can serial dependencies in choices and neural activity explain choice probabilities?},
year = {2017},
month = {2},
day = {24},
pages = {153},
abstract = {The activity of sensory neurons co-varies with choice during perceptual decisions, commonly quantified as “choice
probability”. Moreover, choices are influenced by a subject’s previous choice (serial dependencies) and neuronal
activity often shows temporal correlations on long (seconds) timescales. Here, we ask whether these findings
are linked, specifically: How are choice probabilities in sensory neurons influenced by serial dependencies in
choices and neuronal activity? Do serial dependencies in choices and neural activity reflect the same underlying
process? Using generalized linear models (GLMs) we analyze simultaneous measurements of behavior and V2 neural activity in macaques performing a visual discrimination task. We observe that past decisions are substantially more predictive of the current choice than the current spike count. Moreover, spiking activity exhibits strong correlations from trial to trial. We dissect temporal correlations by systematically varying the order of
predictors in the GLM, and find that these correlations reflect two largely separate processes: There is neither a
direct effect of the previous-trial spike count on choice, nor a direct effect of preceding choices on the spike count.
Additionally, variability in spike counts can largely be explained by slow fluctuations across multiple trials (using a Gaussian Process latent modulator within the GLM). Is choice-probability explained by history effects, i.e. how big is the residual choice probability after correcting for temporal correlations? We compute semi-partial correlations between choices and neural activity, which constitute a lower bound on the residual choice probability. We find that removing history effects by using semi-partial correlations does not systematically change the magnitude of choice probabilities. We therefore conclude that despite the substantial serial dependencies in choices and neural activity these do not explain the observed choice probability. Rather, the serial dependencies in choices and spiking activity reflect two parallel processes which are correlated by instantaneous co-variations between choices and activity.},
web_url = {http://www.cosyne.org/c/index.php?title=Cosyne2017_posters_2},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2017)},
event_place = {Salt Lake City, UT, USA},
state = {published},
author = {Lueckmann J-M; Macke J{jakob}; Nienborg H}
}
@Poster{ GoncalvesLBNM2017,
title = {Flexible Bayesian inference for mechanistic models of neural dynamics},
year = {2017},
month = {2},
day = {24},
pages = {113},
abstract = {One of the central goals of computational neuroscience is to understand the dynamics of single neurons and neural ensembles. However, linking mechanistic models of neural dynamics to empirical observations of neural activity has been challenging. Statistical inference is only possible for a few models of neural dynamics (e.g. GLMs), and no generally applicable, effective statistical inference algorithms are available: As a consequence, comparisons between models and data are either qualitative or rely on manual parameter tweaking, parameterfitting using heuristics or brute-force search. Furthermore, parameter-fitting approaches typically return a single best-fitting estimate, but do not characterize the entire space of models that would be consistent with data. We overcome this limitation by presenting a general method for Bayesian inference on mechanistic models of neural dynamics. Our approach can be applied in a ‘black box’ manner to a wide range of neural models without requiring model-specific modifications. In particular, it extends to models without explicit likelihoods (e.g. most spiking networks). We achieve this goal by building on recent advances in likelihood-free Bayesian inference (Papamakarios and Murray 2016, Moreno et al. 2016): the key idea is to simulate multiple data-sets from different parameters, and then to train a probabilistic neural network which approximates the mapping from data to posterior distribution. We illustrate this approach using Hodgkin-Huxley models of single neurons and models of spiking networks: On simulated data, estimated posterior distributions recover ground-truth parameters, and reveal the manifold of parameters for which the model exhibits the same behaviour. On in-vitro recordings of membrane voltages, we recover multivariate posteriors over biophysical parameters, and voltage traces accurately match empirical data. Our approach will enable neuroscientists to perform Bayesian inference on complex neural dynamics models without having to design model-specific algorithms, closing the gap between biophysical and statistical approaches to neural dynamics.},
web_url = {http://www.cosyne.org/c/index.php?title=Cosyne2017_posters_2},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2017)},
event_place = {Salt Lake City, UT, USA},
state = {published},
author = {Goncalves P; Lueckmann J-M; Bassetto G{gbassetto}; Nonnenmacher M; Macke J{jakob}}
}
@Poster{ BassettoM2017,
title = {Using bayesian inference to estimate receptive fields from a small number of spikes},
year = {2017},
month = {2},
day = {23},
pages = {63-64},
abstract = {crucial step towards understanding how the external world is represented by sensory neurons is the characterization
of neural receptive fields. Advances in experimental methods give increasing opportunity to study sensory processing in behaving animals, but also necessitate the ability to estimate receptive fields from very small spike-counts. For visual neurons, the stimulus space can be very high dimensional, raising challenges for data-analysis: How can one accurately estimate neural receptive fields using only a few spikes, and obtain quantitative uncertainty-estimates about tuning properties (such as location and preferred orientation)? For many sensory areas, there are canonical parametric models of receptive field shapes (e.g., Gabor functions for primary visual cortex) which can be used to constrain receptive fields – we will use such parametric models for receptive field estimation in low-data regimes using full Bayesian inference. We will focus on modelling simple cells in primary visual cortex, but our approach will be applicable more generally. We model the spike generation process using a generalized linear model (GLM), with a receptive field parameterized as a time-modulated Gabor. Use of the parametric model dramatically reduces the number of parameters, and allows us to directly estimate the posterior distribution over interpretable model parameters. We develop an efficient Markov Chain Monte Carlo procedure which is adapted to receptive field estimation from movie-data, by exploiting spatio-temporal separability of receptive fields. We show that the method successfully detects the presence or absence of a receptive field in simulated data even when the total number of spikes is low, and can correctly recover ground-truth parameters. When applied to electrophysiological recordings, it returns estimates of model parameters which are consistent across different subsets of the data. In comparison with non-parametric methods based on Gaussian Processes, we find that it leads to better spike-prediction performance.},
web_url = {http://www.cosyne.org/c/index.php?title=Cosyne2017_posters_1},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2017)},
event_place = {Salt Lake City, UT, USA},
state = {published},
author = {Bassetto G{gbassetto}; Macke J{jakob}}
}
@Poster{ BassettoM2016,
title = {Full Bayesian inference for model-based receptive field
estimation, with application to primary visual cortex},
year = {2016},
month = {9},
day = {21},
pages = {117-118},
abstract = {A central question in sensory neuroscience is to understand how sensory information is represented in neural activity. A crucial step towards the solution of this problem is the characterization of the neuron’s receptive field (RF), which provides a quantitative description of those features of a rich sensory stimulus that modulate the firing rate of the neuron.
For visual neurons, the stimulus space can be very high dimensional, and RFs have to be estimated from neurophysiological recordings of limited size. The scarcity of data makes it paramount to have statistical methods which incorporate prior knowledge into the estimation process (Park & Pillow 2011), as well as to provide quantitative estimates of uncertainty about the inferred RFs (Stevenson et al 2011). For many sensory areas, there are canonical parametric models of RF shapes – e.g., Gabor functions for RFs in primary visual cortex (V1) (Jones & Palmer 1987). Bayesian methods provide a quantitative way of evaluating these models on empirical data by estimating the uncertainty of the inferred model parameters.
We present a technique for full Bayesian inference of the parameters of parametric RF models, focusing on Gabor-shapes for V1. We model the spike generation process by means of a generalized linear model (GLM, Paninski 2004), whose linear filter (i.e., RF) is parameterized as a time-modulated Gabor-function. Use of this model dramatically reduces the number of parameters required to describe the RF, and allows us to directly estimate the posterior distribution over interpretable model parameters (e.g. location, orientation, etc.). The resulting model is non-linear in the parameters. We present an efficient Markov Chain Monte Carlo procedure for inferring the full posterior distribution over model parameters.
We show that the method successfully detects the presence or absence of a RF in simulated data – even when the total number of spikes is very low – and can correctly recover ground-truth parameters. When applied to electrophysiological recordings, it returns estimates of model parameters which are consistent across different subsets of the data. Our current implementation is focused on the response of simple cells in V1, but the approach can readily be extended to other sensory areas or non-linear models of complex cells.},
web_url = {https://abstracts.g-node.org/conference/BC16/abstracts#/uuid/553b312a-dc80-48a6-87c2-3dddc19644d5},
event_name = {Bernstein Conference 2016},
event_place = {Berlin, Germany},
state = {published},
DOI = {10.12751/nncn.bc2016.0109},
author = {Bassetto G{gbassetto}; Macke J{jakob}}
}
@Poster{ NonnenmacherBSTM2016,
title = {Stitching neural activity in space and time: theory and practice},
year = {2016},
month = {2},
day = {27},
pages = {223-224},
abstract = {Simultaneous recordings of the activity of large neural populations are extremely valuable as they can be used to
infer the dynamics and interactions of neurons in a local circuit, shedding light on the computations performed. It
is now possible to measure the activity of hundreds of neurons using in-vivo 2-photon calcium imaging. However,
this experimental technique imposes a trade-off between the number of neurons which can be simultaneously recorded, and the temporal resolution at which the activity of those neurons can be sampled. Previous work (Turaga et al 2012, Bishop & Yu 2014) has shown that statistical models can be used to ameliorate this trade-off, by ‘stitching’ neural activity from subpopulations of neurons which have been imaged sequentially with overlap, rather than simultaneously. This makes it possible to estimate correlations even between non-simultaneously recorded neurons. In this work, we make two contributions: First, we show how taking into account correlations in the dynamics of neural activity gives rise to more general conditions under which stitching can be achieved, extending the work of (Bishop & Yu 2014). Second, we extend this framework to stitch activity both in space and time, i.e. from multiple subpopulations which might be imaged at different temporal rates. We use low-dimensional linear latent dynamical systems (LDS) to model neural population activity, and present scalable algorithms to estimate the parameters of a globally accurate LDS model from incomplete measurements. Using simulated data, we show that this approach can provide more accurate estimates of neural correlations than conventional approaches, and gives insights into the underlying neural dynamics.},
web_url = {http://www.cosyne.org/c/index.php?title=Cosyne_16},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2016)},
event_place = {Salt Lake City, UT, USA},
state = {published},
author = {Nonnenmacher M{mnonnenmacher}; Buesing L; Speiser A; Turaga S; Macke JH{jakob}}
}
@Poster{ NonnenmacherBBBM2015_2,
title = {Correlations and signatures of criticality in neural population models},
year = {2015},
month = {10},
day = {20},
volume = {45},
number = {543.23},
web_url = {http://www.sfn.org/am2015/},
event_name = {45th Annual Meeting of the Society for Neuroscience (Neuroscience 2015)},
event_place = {Chicago, IL, USA},
state = {published},
author = {Nonnenmacher M{mnonnenmacher}; Behrens C; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}; Macke JH{jakob}}
}
@Poster{ CzubaykoBNOMK2015,
title = {Anatomical basis of spiking correlation in upper layers of somatosensory cortex},
year = {2015},
month = {10},
day = {18},
volume = {45},
number = {240.25},
abstract = {In neuronal populations of the sensory cortex, stimulus responses are shaped by the cortical architecture on anatomical scales from tens of microns to millimeters. In particular, in L2/3 rodent vibrissal cortex we previously observed that whisker deflection evokes pairwise correlations that decrease both with distance between neurons and distance to the center of the whisker-associated column (Kerr, de Kock, Greenberg, Bruno, Sakmann, and Helmchen. (2007). J. Neurosci. 27: 13316-28). One possible explanation for this finding is that these correlations arise from anatomically structured common inputs. L4 spiny stellate (SS) cells send vertical axon fibers to L2/3 that are confined within the borders of the whisker-associated column and neuronal pairs closer together could exhibit greater dendritic overlap. Therefore, for pairs closer to the column center more of this overlap will intersect with SS projections. We tested this hypothesis using 2-photon targeted patching of L2/3 pyramidal pairs in anaesthetized rats to record sub- and suprathreshold stimulus responses followed by anatomic reconstruction of the neurons and barrel field. We found a positive and statistically significant association between correlated AP firing and dendritic overlay inside the whisker-associated column. This effect was strongest for suprathreshold activity evoked shortly after whisker deflection (~20 ms), and decayed rapidly thereafter. It was also robust with respect to the voxel size, determined by the L4 axon reconstructions, used to quantify dendritic overlap. No relationship was detectable for offset responses or spontaneous activity. These results support the notion that the spatially structured correlations observed for short-latency stimulus-evoked spiking arise from anatomically structured feed-forward projections.},
web_url = {http://www.sfn.org/am2015/},
event_name = {45th Annual Meeting of the Society for Neuroscience (Neuroscience 2015)},
event_place = {Chicago, IL, USA},
state = {published},
author = {Czubayko U{czubayko}; Bassetto G{gbassetto}; Narayanan RT{rnarayanan}; Oberlaender M{moberlaender}; Macke JH{jakob}; Kerr JND{jkerr}}
}
@Poster{ RullaNMWSK2015,
title = {Two-photon imaging of neuronal populations in the primary visual cortex representation of the overhead visual field},
year = {2015},
month = {10},
day = {18},
volume = {45},
number = {232.10},
abstract = {Rodents have a large binocular field of view that extends from the snout to over the animals head. Recent experiments have shown that rodents have a strong, innate, evasive behavior evoked exclusively by stimuli presented above them. However, little is known about the functional properties of cortical neurons that represent the overhead visual field. Here we describe a method for allowing direct optical recording from populations of neurons representing the overhead visual field. Firstly, the conventional microscope objective has been replaced with a periscope coupled to a miniature objective to facilitate placement of a stimulus monitor above the rat’s head. Secondly, we developed a method for presentation of visual stimuli on the OLED display of a tablet running the Android OS, and a camera-based method for calibrating the position of the stimulus display in relation to the animals head. Using this setup, we recorded in rats the activity of neurons in the representation of the overhead visual field of the primary visual cortex in response to a range of stimuli. Neurons were labeled with the calcium indicator OGB-1 with counterstaining of astrocytes using sulforhodamine 101. Stimuli were either an expanding or contracting looming dot, or a moving dot that moved at constant speed along multiple trajectories to cover all positions within the display. In both stimulus types, differing sets of foreground/background luminance were used. Preliminary results show that 19% of the neurons responded with clear and reproducible transients to the looming dot stimulus, and 30% were responsive to moving dot stimuli. The response profiles of neurons to different stimulus types and parameters were further analyzed in detail and compared between cortical areas and receptive field properties established for this cortical region.},
web_url = {http://www.sfn.org/am2015/},
event_name = {45th Annual Meeting of the Society for Neuroscience (Neuroscience 2015)},
event_place = {Chicago, IL, USA},
state = {published},
author = {Rulla S{rulla}; Ng B{benedict}; Macke J{jakob}; Wallace D{dhw}; Sawinski J{jsaw}; Kerr J{jkerr}}
}
@Poster{ BassettoSEM2015,
title = {A statistical characterization of neural population responses in V1},
year = {2015},
month = {9},
day = {16},
pages = {146-147},
abstract = {Population activity in primary visual cortex exhibits substantial variability that is correlated on multiple time scales and across neurons [1]. A quantitative account of how
visual information is encoded in population of neurons in primary visual cortex therefore requires an accurate characterization of this variability. Our goal is provide a statistical model for capturing the statistical structure of this variability and its dependence on external stimuli, with particular focus on temporal correlations both on short (withintrial) and long (across-trial) time-scales [2]. We address this question using neural population recordings from primary visual cortex in response to drifting gratings [3], using the framework of generalized linear models (GLMs). To model stimulus-driven responses, we take a non-parametric approach and employ Gaussian-process priors to model the smoothness of response-profiles across time and different stimulus orientations, and low-rank constraints to facilitate inference from limited data. We find that the parameters which control the prior smoothness are consistent across neurons within each recording session, but differ markedly across recordings. For most neurons, the time-varying response across all stimulus orientations can be well captured using a lowrank
decomposition with k = 4 dimensions. To capture slow modulations in firing rates, we include covariates in the GLM which are constrained to vary smoothly across trials,
and find that including these terms leads to significant improvements in goodness-of-fit. Finally, we use latent dynamical systems [3] with point-process observation models [4] to capture variations and co-variations in firing rates on fast time-scales. While we focus our analysis on modelling neural population responses in V1, our approach provides a general formalism for obtaining an accurate quantitative model of response variability in neural populations.},
web_url = {http://www.nncn.de/de/bernstein-conference/2015/program},
event_name = {Bernstein Conference 2015},
event_place = {Heidelberg, Germany},
state = {published},
DOI = {10.12751/nncn.bc2015.0139},
author = {Bassetto G{gbassetto}; Sandhaeger F{fsandhaeger}; Ecker A{aecker}{Department Physiology of Cognitive Processes}; Macke JH{jakob}}
}
@Poster{ SchuttHMW2015,
title = {Psignifit 4: Pain-free Bayesian Inference for Psychometric Functions},
journal = {Journal of Vision},
year = {2015},
month = {9},
volume = {15},
number = {12},
pages = {474},
abstract = {Psychometric functions are frequently used in vision science to model task performance. These sigmoid functions can be fit to data using likelihood maximization, but this ignores the reliability or variance of the point estimates. In contrast Bayesian methods automatically calculate this reliability. However, using Bayesian methods in practice usually requires expert knowledge, user interaction and computation time. Also most methods---including Bayesian ones---are vulnerable to non-stationary observers (whose performance is not constant). For such observers all methods, which assume a stationary binomial observer are overconfident in the estimates. We present Psignifit 4, a new method for fitting psychometric functions, which provides an efficient Bayesian analysis based on numerical integration, which requires little user-interaction and runs in seconds on a common office computer. Additionally it fits a beta-binomial model increasing the stability against non-stationarity and contains standard settings including a heuristic to set the prior based on the interval of stimulus levels in the experimental data. Obviously all properties of the analysis can be adjusted. To test our method it was run on extensive simulated datasets. First we tested the numerical accuracy of our method with different settings and found settings which calculate a good estimate fast and reliably. Testing the statistical properties, we find that our method calculates correct or slightly conservative confidence intervals in all tested conditions, including different sampling schemes, beta-binomial observers, other non-stationary observers and adaptive methods. When enough data was collected to overcome the small sample bias caused by the prior, the point estimates are also essentially unbiased. In summary we present a user-friendly, fast, correct and comprehensively tested Bayesian method to fit psychometric functions, which handles non-stationary observers well and is freely available as an MATLAB implementation online.},
web_url = {http://jov.arvojournals.org/article.aspx?articleid=2433582},
event_name = {15th Annual Meeting of the Vision Sciences Society (VSS 2015)},
event_place = {St. Pete Beach, FL, USA},
state = {published},
DOI = {10.1167/15.12.474},
author = {Sch\"utt H; Harmeling S{harmeling}; Macke J{jakob}; Wichmann F{felix}}
}
@Poster{ NonnenmacherBPBM2015,
title = {Correlations and signatures of criticality in neural population models},
year = {2015},
month = {3},
day = {7},
pages = {207-208},
abstract = {Large-scale recording methods make it possible to measure the statistics of neural population activity, and thereby
to gain insights into the principles that govern the collective activity of neural ensembles. One hypothesis that has emerged from this approach is that neural populations are poised at a ‘thermo-dynamic critical point’, and that this has important functional consequences (Tkacik et al 2014). Support for this hypothesis has come from studies that computed the specific heat, a measure of global population statistics, for groups of neurons subsampled from population recordings. These studies have found two effects which—in physical systems—indicate a critical point: First, specific heat diverges with population size N. Second, when manipulating population statistics by introducing a ’temperature’ in analogy to statistical mechanics, the maximum heat moves towards unit-temperature for large populations. What mechanisms can explain these observations? We show that both effects arise in a simple simulation of retinal population activity. They robustly appear across a range of parameters including biologically implausible ones, and can be understood analytically in simple models. The specific heat grows with N whenever the (average) correlation is independent of N, which is always true when uniformly subsampling a large, correlated population. For weakly correlated populations, the rate of divergence of the specific heat is proportional to the correlation strength. Thus, if retinal population codes were optimized to maximize specific heat, then this would predict that they seek to increase correlations. This is incongruent with theories of efficient coding that make
the opposite prediction. We find criticality in a simple and parsimonious model of retinal processing, and without
the need for fine-tuning or adaptation. This suggests that signatures of criticality might not require an optimized
coding strategy, but rather arise as consequence of sub-sampling a stimulus-driven neural population (Aitchison
et al 2014).},
web_url = {http://www.cosyne.org/c/index.php?title=Cosyne2015_Program},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2015)},
event_place = {Salt Lake City, UT, USA},
state = {published},
author = {Nonnenmacher M{mnonnenmacher}; Behrens C; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}; Macke J{jakob}}
}
@Poster{ NienborgM2014_2,
title = {Using sequential dependencies in neural activity and behavior to dissect choice related activity in V2},
year = {2014},
month = {11},
day = {17},
volume = {44},
pages = {435.08},
abstract = {During perceptual decisions the activity of sensory neurons co-varies with choice. Previous findings suggest that this partially reflects “bottom-up” and “top-down” effects. However, the quantitative contributions of these effects are unclear. To address this question, we take advantage of the observation that past choices influence current behavior (sequential dependencies). Here, we use data from two macaque monkeys performing a disparity discrimination task during simultaneous extracellular recordings of disparity selective V2 neurons. We quantify the sequential dependencies using generalized linear models to predict choices or spiking activity of the V2 neurons. We find that past choices predict current choices substantially better than the spike counts on the current trial, i.e. have a higher “choice probability”. In addition, we observe that past choices have a significant predictive effect on the activity of sensory neurons on the current trial. This effect results from sequential dependencies of choices and neural activity alone, but also reflects a direct influence of past choices on the spike count on the current trial. We then use these sequential dependencies to dissect the neuronal co-variation with choice: We decomposed the choice co-variation of neural spike counts into components, which can be explained by behavior or neural activity on previous trials. We find that about 30% of the observed co-variation is already explained by the animals’ previous choice, suggesting a “top-down” contribution of at least 30%. Additionally, our results exemplify how variability frequently regarded as noise reflects the systematic effect of ignored neural and behavioral co-variates, and that interpretation of co-variations between neural activity and observed behavior should take the temporal context within the experiment into account.},
web_url = {http://www.sfn.org/annual-meeting/neuroscience-2014},
event_name = {44th Annual Meeting of the Society for Neuroscience (Neuroscience 2014)},
event_place = {Washington, DC, USA},
state = {published},
author = {Nienborg H; Macke JH{jakob}}
}
@Poster{ ArcherPM2014_3,
title = {Low Dimensional Dynamical Models of Neural Populations with
Common Input},
year = {2014},
month = {10},
volume = {15},
pages = {22},
abstract = {Modern experimental technologies enable simultaneous recording of large neural populations. These high-dimensional data present a challenge for analysis. Recent work has focused on extracting low-dimensional dynamical trajectories that may underly such responses. Such
methods enable visualization and may also provide insight into neural computations. Previous work focuses on modeling a population’s dynamics without conditioning on external
stimuli. Our proposed technique integrates linear dimensionality reduction with a latent dynamical system model of neural activity. Under our model, population response is governed by a low-dimensional dynamical system with quadratic input. In this framework the number of parameters in grows linearly with population (size given fixed latent dimensionality). Hence it is computationally fast for large populations, unlike fully-connected models. Our method captures both noise correlations and low-dimensional stimulus selectivity through the simultaneous modeling of dynamics and stimulus dependence. This approach is particularly well-suited for studying the population activity of sensory cortices, where neurons often
have substantial receptive field overlap.},
web_url = {http://www.neuroschool-tuebingen-nena.de/fileadmin/user_upload/Dokumente/neuroscience/Abstractbook_NeNa2014_final.pdf},
event_name = {15th Conference of Junior Neuroscientists of Tübingen (NeNa 2014): The Changing Face of Publishing and Scientific Evaluation},
event_place = {Schramberg, Germany},
state = {published},
author = {Archer E{earcher}; Pillow J; Macke J{jakob}}
}
@Poster{ ArcherPM2014_2,
title = {Low-dimensional dynamical neural population models with
shared stimulus drive},
year = {2014},
month = {9},
day = {3},
pages = {72-73},
abstract = {Modern experimental technologies enable simultaneous recording of large neural populations. These high-dimensional data present a challenge for analysis. Recent work has focused on extracting low-dimensional dynamical trajectories that may underlie such responses. These methods enable visualization and may also provide insight into neural compuations. However, previous work focused on modeling a population’s dynamics without conditioning on external stimuli.
We propose a new technique that integrates linear dimensionality reduction (analogous to the STA and STC) with a latent dynamical system model of neural activity. Under our model, the spike response of a neural population is governed by a low- dimensional dynamical system with quadratic input. In this framework, the number of parameters grows linearly with population (size given fixed latent dimensionality). Hence, it is computationally fast for large populations, unlike fully-connected models.
Our method captures both noise correlations and low-dimensional stimulus selectivity through the simultaneous modeling of dynamics and stimulus dependence. This approach is particularly well-suited for studying the population activity of sensory cortices, where neurons often have substantial receptive field overlap.},
web_url = {http://abstracts.g-node.org/abstracts/b1c1ba29-cec7-4c65-a3d4-e6194b1bb0aa},
event_name = {Bernstein Conference 2014},
event_place = {Göttingen, Germany},
state = {published},
DOI = {10.12751/nncn.bc2014.0076},
author = {Archer E{earcher}; Pillow JW; Macke JH{jakob}}
}
@Poster{ NienborgM2014,
title = {Using sequential dependencies in neural activity and behavior to dissect choice related activity in V2},
year = {2014},
month = {9},
day = {3},
pages = {73-74},
abstract = {During perceptual decisions the activity of sensory neurons co-varies with choice. Previous findings suggest that this partially reflects “bottom-up” and “top-down” effects. However, the quantitative contributions of these effects are unclear. To address this question, we take advantage of the observation that past choices influence current behavior (sequential dependencies).
Here, we use data from two macaque monkeys performing a disparity discrimination task during simultaneous extracellular recordings of disparity selective V2 neurons. We quantify the sequential dependencies using generalized linear models to predict choices or spiking activity of the V2 neurons. We find that past choices predict current choices substantially better than the spike counts on the current trial, i.e. have a higher “choice probability”. In addition, we observe that past choices have a significant predictive effect on the activity of sensory neurons on the current trial. This effect results from sequential dependencies of choices and neural activity alone, but also reflects a direct influence of past choices on the spike count on the current trial.
We then use these sequential dependencies to dissect the neuronal co-variation with choice: We decomposed the choice co-variation of neural spike counts into components, which can be explained by behavior or neural activity on previous trials. We find that about 30% of the observed co-variation is already explained by the animals’ previous choice, suggesting a “top-down” contribution of at least 30%. Additionally, our results exemplify how variability frequently regarded as noise reflects the systematic effect of ignored neural and behavioral co-variates, and that interpretation of co-variations between neural activity and observed behavior should take the temporal context within the experiment into account.},
web_url = {http://abstracts.g-node.org/abstracts/cf7e8932-c453-4b8c-8df1-aa7414cbaca1},
event_name = {Bernstein Conference 2014},
event_place = {Göttingen, Germany},
state = {published},
DOI = {10.12751/nncn.bc2014.0077},
author = {Nienborg H; Macke JH{jakob}}
}
@Poster{ SchuttHMW2014,
title = {Pain-free bayesian inference for psychometric functions},
journal = {Perception},
year = {2014},
month = {8},
volume = {43},
number = {ECVP Abstract Supplement},
pages = {162},
abstract = {To estimate psychophysical performance, psychometric functions are usually modeled as sigmoidal functions, whose parameters are estimated by likelihood maximization. While this approach gives a point estimate, it ignores its reliability (its variance). This is in contrast to Bayesian methods, which in principle can determine the posterior of the parameters and thus the reliability of the estimates. However, using Bayesian methods in practice usually requires extensive expert knowledge, user interaction and computation time. Also many methods---including Bayesian ones---are vulnerable to non-stationary observers (whose performance is not constant). Our work provides an efficient Bayesian analysis, which runs within seconds on a common office computer, requires little user-interaction and improves robustness against non-stationarity. A Matlab implementation of our method, called PSIGNFIT 4, is freely available online. We additionally provide methods to combine posteriors to test the difference between psychometric functions (such as between conditions), obtain posterior distributions for the average of a group, and other comparisons of practical interest. Our method uses numerical integration, allowing robust estimation of a beta-binomial model that is stable against non-stationarities. Comprehensive simulations to test the numerical and statistical correctness and robustness of our method are in progress, and initial results look very promising.},
web_url = {http://pec.sagepub.com/content/43/1_suppl.toc},
event_name = {37th European Conference on Visual Perception (ECVP 2014)},
event_place = {Beograd, Serbia},
state = {published},
DOI = {10.1177/03010066140430S101},
author = {Sch\"utt H; Harmeling S{harmeling}{Department Empirical Inference}; Macke J{jakob}; Wichmann F{felix}{Department Empirical Inference}}
}
@Poster{ SchuttHMW2014_2,
title = {Pain-free Bayesian inference for psychometric functions},
year = {2014},
month = {8},
pages = {38-39},
abstract = {To estimate psychophysical performance, psychometric functions are usually modeled as sigmoidal functions, whose parameters are estimated by likelihood maximization.
While this approach gives a point estimate, it ignores its reliability (its variance). This is in contrast to Bayesian methods, which in principle can determine the posterior of the parameters and thus the reliability of the estimates. However, using Bayesian methods in practice usually requires extensive expert knowledge, user interaction and computation time. Also many methods|including Bayesian
ones|are vulnerable to non-stationary observers (whose performance is not constant). Our work provides an efficient Bayesian analysis, which runs within seconds on a
common office computer, requires little user-interaction and improves robustness against non-stationarity. A Matlab implementation of our method, called PSIGNFIT 4, is freely available online. We additionally provide methods to combine
posteriors to test the difference between psychometric functions (such as between conditions), obtain posterior distributions for the average of a group, and other
comparisons of practical interest.
Our method uses numerical integration, allowing robust estimation of a beta-binomial model that is stable against non-stationarities. Comprehensive simulations to test
the numerical and statistical correctness and robustness of our method are in progress, and initial results look very promising.},
web_url = {http://uni-tuebingen.de//uni/sii/empg2014/Program.htm},
event_name = {2014 European Mathematical Psychology Group Meeting (EMPG)},
event_place = {Tübingen, Germany},
state = {published},
author = {Sch\"utt H; Harmeling S{harmeling}; Macke J{jakob}; Wichmann F{felix}}
}
@Poster{ ArcherPM2014,
title = {Low-dimensional models of neural population recordings with complex stimulus selectivity},
year = {2014},
month = {3},
volume = {2014},
pages = {162},
abstract = {Modern experimental technologies such as multi-electrode arrays and 2-photon population calcium imaging make
it possible to record the responses of large neural populations (up to 100s of neurons) simultaneously. These
high-dimensional data pose a significant challenge for analysis. Recent work has focused on extracting lowdimensional dynamical trajectories that may underlie such responses. These methods enable visualization of
high-dimensional neural activity, and may also provide insight into the function of underlying circuitry. Previous
work, however, has primarily focused on models of a opulation’s intrinsic dynamics, without taking into
account any external stimulus drive. We propose a new technique that integrates linear dimensionality reduction
of stimulus-response functions (analogous to spike-triggered average and covariance analysis) with a latent
dynamical system (LDS) model of neural activity. Under our model, the population response is governed by a
low-dimensional dynamical system with nonlinear (quadratic) stimulus-dependent input. Parameters of the model can be learned by combining standard expectation maximization for linear dynamical system models with a recently proposed algorithms for learning quadratic feature selectivity. Unlike models with all-to-all connectivity, this
framework scales well to large populations since, given fixed latent dimensionality, the number of parameters
grows linearly with population size. Simultaneous modeling of dynamics and stimulus dependence allows our method to model correlations in response variability while also uncovering low-dimensional stimulus selectivity that is shared across a population. Because stimulus selectivity and noise correlations both arise from coupling to the underlying dynamical system, it is particularly well-suited for studying the neural population activity of sensory
cortices, where stimulus inputs received by different neurons are likely to be mediated by local circuitry, giving rise to both shared dynamics and substantial receptive field overlap.},
web_url = {http://www.cosyne.org/c/index.php?title=Cosyne_14},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2014)},
event_place = {Salt Lake City, UT, USA},
state = {published},
author = {Archer E{earcher}; Pillow JW; Macke J{jakob}}
}
@Poster{ TuragaBPDPHM2014,
title = {Predicting noise correlations for non-simultaneously measured neuron pairs},
year = {2014},
month = {3},
volume = {2014},
pages = {84},
abstract = {Simultaneous recordings of the activity of large neural populations are extremely valuable as they can be used to
infer the dynamics and interactions of neurons in a local circuit, shedding light on the computations performed. It
is now possible to measure the activity of hundreds of neurons using in-vivo 2-photon calcium imaging. However,
many computations are thought to involve circuits consisting of thousands of neurons, such as cortical barrels in rodent somatosensory cortex. Here we contribute a statistical method for “stitching” together sequentially imaged sets of neurons into one model by phrasing the problem as fitting a latent dynamical system with missing observations.
This method allows us to substantially expand the population sizes for which population dynamics can be characterized—beyond the number of simultaneously imaged neurons. We describe conditions for successful stitching and use recordings from mouse somatosensory cortex to demonstrate that this method enables accurate predictions of noise correlations between non-simultaneously recorded neuron pairs.},
web_url = {http://www.cosyne.org/c/index.php?title=Cosyne_14},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2014)},
event_place = {Salt Lake City, UT, USA},
state = {published},
author = {Turaga SC; Buesing L; Packer A; Dalgleish H; Pettit N; Hauser M; Macke J{jakob}}
}
@Poster{ TuragaBPHM2013,
title = {Inferring interactions between cell types from multiple calcium imaging snapshots of the same neural
circuit},
year = {2013},
month = {11},
day = {13},
volume = {43},
number = {743.27},
abstract = {Understanding the functional connectivity between different cortical cell types and the resulting population dynamics is a challenging and important problem. Progress with in-vivo 2-photon population calcium imaging has made it possible to densely sample neural activity in superficial layers of a local patch of cortex. In principle, such data can be used to infer the functional (statistical) connectivity between different classes of cortical neurons by fitting models such as generalized linear models or latent dynamical systems (LDS). However, this approach faces 3 major challenges which we address: 1) only small populations of neurons (~200) can currently be simultaneously imaged at any given time; 2) the cell types of individual neurons are often unknown; and 3) it is unclear how to pool data across different animals to derive an average model.
First, while it is not possible to simultaneously image all neurons in a cortical column, it is currently possible to image the activity of ~200 neurons at a time and to repeat this procedure at multiple cortical depths (down to layer 3). We present a computational method ("Stitching LDS") which allows us to "stitch" such non-simultaneously imaged populations of neurons into one large virtual population spanning different depths of cortex. Importantly - and surprisingly - this approach allows us to predict couplings and noise correlations even for pairs of neurons that were never imaged simultaneously.
Second, we automatically cluster neurons based on similarities in their functional connectivity (“Clustering LDS”). Under the assumption that such functionally defined clusters can correspond to cell types, this enables us to infer both the cell types and their functional connectivity.
Third, while connection profiles of individual cells in one class can be variable, we expect the ‘average’ influence of one cell class on another to be fairly consistent across animals. We show how our approach can be used to pool measurements across different animals in a principled manner (“Pooling LDS”). The result is a highly accurate average model of the interactions between different cell classes.
We demonstrate the utility of our computational tools by applying them to model the superficial layers of barrel cortex based on in-vivo 2-photon imaging data in awake mice.},
web_url = {http://www.sfn.org/annual-meeting/neuroscience-2013},
event_name = {43rd Annual Meeting of the Society for Neuroscience (Neuroscience 2013)},
event_place = {San Diego, CA, USA},
state = {published},
author = {Turaga SC; Buesing L; Packer M; Hausser M; Macke JH{jakob}}
}
@Poster{ MackeML2013,
title = {How biased are maximum entropy models of neural population activity?},
year = {2013},
month = {3},
number = {III-89},
web_url = {http://www.cosyne.org/c/index.php?title=Cosyne_13},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2013)},
event_place = {Salt Lake City, UT, USA},
state = {published},
author = {Macke J{jakob}; Murray I{iain}; Latham P}
}
@Poster{ BuesingMB2013,
title = {Robust estimation for neural state-space models},
year = {2013},
month = {3},
number = {II-89},
web_url = {http://www.cosyne.org/c/index.php?title=Cosyne_13},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2013)},
event_place = {Salt Lake City, UT, USA},
state = {published},
author = {Buesing L; Macke J{jakob}; Sahani M}
}
@Poster{ MackeBCYSS2012_2,
title = {Empirical models of spiking in neural populations},
year = {2012},
month = {5},
event_name = {Janelia Farm Conference 2012: Machine Learning, Statistical Inference, and Neuroscience},
event_place = {Ashburn, VA, USA},
state = {published},
author = {Macke JH{jakob}; B\"using L; Cunningham JP; Yu BM; Shenoy KV; Sahani M}
}
@Poster{ HaefnerGMB2011,
title = {Relationship between decoding strategy, choice
probabilities and neural correlations in perceptual decision-making task},
year = {2011},
month = {11},
volume = {41},
number = {17.09},
abstract = {When monkeys make a perceptual decision about ambiguous visual stimuli, individual sensory neurons in MT and other areas have been shown to covary with the decision. This observation suggests that the response variability in those very neurons causes the animal to choose one over the other option. However, the fact that sensory neurons are correlated has greatly complicated attempts to link those covariances (and the associated choice probabilities) to a direct involvement of any particular neuron in a decision-making task.
Here we report on an analytical treatment of choice probabilities in a population of correlated sensory neurons read out by a linear decoder. We present a closed-form solution that links choice probabilities, noise correlations and decoding weights for the case of fixed integration time. This allowed us to analytically prove and generalize a prior numerical finding about the choice probabilities being only due to the difference between the correlations within and between decision pools (Nienborg & Cumming 2010) and derive simplified expressions for a range of interesting cases. We investigated the implications for plausible correlation structures like pool-based and limited-range correlations.
We found that the relationship between choice probabilities and decoding weights is in general non-monotonic and highly sensitive to the underlying correlation structure. In fact, given empirical measures of the interneuronal correlations and CPs, our formulas allow to infer the individual neuronal decoding weights. We confirmed the feasibility of this approach using synthetic data. We then applied our analytical results to a published dataset of empirical noise correlations and choice probabilities (Cohen & Newsome 2008 and 2009) recorded during a classic motion discriminating task (Britten et al 1992). We found that the data are compatible with an optimal read-out scheme in which the responses of neurons with the correct direction preference are summed and those with perpendicular preference, but positively correlated noise, are subtracted. While the correlation data of Cohen & Newsome (being based on individual extracellular electrode recordings) do not give access to the full covariance structure of a neural population, our analytical formulas will make it possible to accurately infer individual read-out weights from simultaneous population recordings.},
web_url = {http://www.sfn.org/am2011/},
event_name = {41st Annual Meeting of the Society for Neuroscience (Neuroscience 2011)},
event_place = {Washington, DC, USA},
state = {published},
author = {Haefner RM{rhaefner}{Research Group Computational Vision and Neuroscience}; Gerwinn S{sgerwinn}{Research Group Computational Vision and Neuroscience}; Macke JH{jakob}; Bethge M{mbethge}}
}
@Poster{ MackeBCYSS2011,
title = {Modelling low-dimensional dynamics in recorded spiking populations},
year = {2011},
month = {2},
number = {I-34},
abstract = {Neural population activity reflects not only variations in stimulus drive ( captured by many neural encoding models) but also the rich computational dynamics of recurrent neural circuitry. Identifying this dynamical structure, and relating it to external stimuli and behavioural events, is a crucial step towards understanding neural computation. One data-driven approach is to fit hidden low-dimensional dynamical systems models to the high-dimensional spiking observations collected by microelectrode arrays (Yu et al, 2006, 2009). This approach yields low-dimensional representations of population-activity, allowing analysis and visualization of population dynamics with single trial resolution. Here, we compare two models using latent linear dynamics, with the dependence of spiking observations on the dynamical state being either linear with Gaussian observations (GaussLDS), or generalised linear with Poisson observations and an exponential nonlinearity (PoissonLDS) (Kulkarni & Paninski, 2007). Both models were fit by Expectation-Maximisation to multi-electrode recordings from pre-motor cortex in behaving monkeys during the delay-period of a delayed reach task. We evaluated the accuracy of different approximations for the E-step necessary for PoissonLDS using elliptical slice sampling. We quantified model-performance using a cross-prediction approach (Yu et al). Although only the Poisson noise model takes the discrete nature of spiking into account, we found no consistent improvement of the Poisson-model over GaussLDS: PoissonLDS was generally more accurate for low dimensions, but slightly under-performed GaussLDS in higher dimensions (cf. Lawhern et al. 2010). We also examined the ability of such models to capture conventional population metrics such as pairwise correlations and the distribution of synchronous spikes counts. We found that both models were able to reproduce these quantities with very low dynamical dimension, although the non-positivity of the Gaussian model introduced a bias. Thus, despite its verisimilitude, the Poisson observation model does not always yield more accurate predictions in real data.},
web_url = {http://www.cosyne.org/c/index.php?title=Cosyne_11_posters},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2011)},
event_place = {Salt Lake City, UT, USA},
state = {published},
author = {Macke JH{jakob}; B\"using L; Cunningham JP; Yu BM; Shenoy KV; Sahani M}
}
@Poster{ MackeOB2011_2,
title = {The effect of common input on higher-order correlations and
entropy in neural populations},
year = {2011},
month = {2},
number = {III-68},
abstract = {Finding models for capturing the statistical structure of multi-neuron firing patterns is a major challenge in sensory neuroscience. Recently, Maximum Entropy (MaxEnt) models have become popular tools for studying neural population recordings [4, 3]. These studies have found that small populations in retinal, but not in local cortical circuits, are well described by models based on pairwise correlations. It has also been found that entropy in small populations grows sublinearly [4], that sparsity in the population code is related to correlations [3], and it has been conjectured that neural populations might be at a ícritical pointí. While there have been many empirical studies using MaxEnt models, there has arguably been a lack of analytical studies that might explain the diversity of their findings. In particular, theoretical models would be of great importance for investigating their implications for large populations. Here, we study these questions in a simple, tractable population model of neurons receiving Gaussian inputs [1, 2]. Although the Gaussian input has maximal entropy, the spiking-nonlinearities yield non-trivial higher-order correlations (íhocsí). We find that the magnitude of hocs is strongly modulated by pairwise correlations, in a manner which is consistent with neural recordings. In addition, we show that the entropy in this model grows sublinearly for small, but linearly for large populations. We characterize how the magnitude of hocs grows with population size. Finally, we find that the hocs in this model lead to a diverging specific heat, and therefore, that any such model appears to be at a critical point. We conclude that common input might provide a mechanistic explanation for a wide range of recent empirical observations. [1] SI Amari, H Nakahara, S Wu, Y Sakai. Neural Comput, 2003. [2] JH Macke, M Opper, M Bethge. ArXiv, 2010. [3] IE Ohiorhenuan, et. al Nature, 2010. [4] E Schneidman, MJ Berry, R Segev, W Bialek. Nature, 2006.},
web_url = {http://www.cosyne.org/c/index.php?title=Cosyne_11_posters3},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2011)},
event_place = {Salt Lake City, UT, USA},
state = {published},
author = {Macke JH{jakob}; Opper M; Bethge M{mbethge}}
}
@Poster{ 7074,
title = {Estimating cortical maps with Gaussian process models},
year = {2010},
month = {11},
volume = {40},
number = {483.18},
abstract = {A striking feature of cortical organization is that the encoding of many stimulus features, such as orientation preference, is arranged into topographic maps. The structure of these maps has been extensively studied using functional imaging methods, for example optical imaging of intrinsic signals, voltage sensitive dye imaging or functional magnetic resonance imaging. As functional imaging measurements are usually noisy, statistical processing of the data is necessary to extract maps from the imaging data. We here present a probabilistic model of functional imaging data based on Gaussian processes. In comparison to conventional approaches, our model yields superior estimates of cortical maps from smaller amounts of data. In addition, we obtain quantitative uncertainty estimates, i.e. error bars on properties of the estimated map. We use our probabilistic model to study the coding properties of the map and the role of noise correlations by decoding the stimulus from single trials of an imaging experiment. In addition, we show how our method can be used to reconstruct maps from sparse measurements, for example multi-electrode recordings. We demonstrate our model both on simulated data and on intrinsic signaling data from ferret visual cortex.},
web_url = {http://www.sfn.org/am2010/index.aspx?pagename=abstracts_main},
event_name = {40th Annual Meeting of the Society for Neuroscience (Neuroscience 2010)},
event_place = {San Diego, CA, USA},
state = {published},
author = {Macke JH{jakob}; Sebastian G; White LE; Kaschube M; Bethge M{mbethge}}
}
@Poster{ HafnerGMB2009,
title = {Neuronal decision-making with realistic spiking models},
journal = {Frontiers in Computational Neuroscience},
year = {2009},
month = {10},
day = {1},
volume = {2009},
number = {Conference Abstract: Bernstein Conference on Computational Neuroscience},
pages = {132-133},
abstract = {The neuronal processes underlying perceptual decision-making have been the focus of numerous studies over the past two decades. In the current standard model [1][2][3] the output of noisy sensory neurons is pooled and integrated by decision neurons. Once the activity of the decision neurons reaches a threshold, the corresponding choice is made. This bottom-up model was recently challenged based on the empirical finding that the time courses of psychophysical kernel (PK) and choice probability (CP) qualitatively differ from each other [4]. It was concluded that the decision-related activity in sensory neurons, at least in part, reflects the decision through a top-down signal, rather than contribute to the decision causally. However, the prediction of the standard bottom-up model about the relationship between the time courses of PKs and CPs crucially depends on the underlying noise model. Our study explores the impact of the time course and correlation structure of neuronal noise on PK and CP for several decision models. For the case of non-leaky integration over the entire stimulus duration, we derive analytical expressions for Gaussian additive noise with arbitrary correlation structure. For comparison, we also investigate biophysically generated responses with a Fano factor that increases with the counting window [5], and alternative decision models (leaky, integration to bound) using numerical simulations.
In the case of non-leaky integration over the entire stimulus duration we find that the amplitude of the PK only depends on the overall level of noise, but not its temporal changes. Consequently the PK remains constant regardless of the temporal evolution or correlation structure in the noise. In conjunction with the observed decrease in the amplitude of the PK (e.g. [4]) this supports the conclusion that decreasing PKs are evidence for an integration to a bound model [1][3]. However, we find that the temporal evolution of the CP depends strongly on both the time course of the noise variance and the temporal correlations within the pool of sensory neurons. For instance, a noise variance that increases over time also leads to an increasing CP. The bottom-up account that appears to agree best with the data in [4] combines an increasing variance of the correlated noise (the noise that cannot be eliminated by averaging over many neurons) with an integration-to-bound decision model. This leads to a decreasing PK, as well as a CP that first increases slowly before leveling off and persisting until the end. We do not find qualitatively different results when using biophysically generated or Poisson distributed responses instead of additive Gaussian noise.
In summary, we advance the analytical framework for a quantitative comparison of choice probabilities and psychophysical kernels and find that recent data that was taken to be evidence of a top-down component in choice probabilities, may alternatively be accounted for by a bottom-up model when allowing for time-varying correlated noise.},
web_url = {http://www.frontiersin.org/10.3389/conf.neuro.10.2009.14.004/event_abstract?sname=Bernstein_Conference_on_Computational_Neuroscience},
event_name = {Bernstein Conference on Computational Neuroscience (BCCN 2009)},
event_place = {Frankfurt a.M., Germany},
state = {published},
DOI = {10.3389/conf.neuro.10.2009.14.004},
author = {H\"afner R{rhaefner}{Research Group Computational Vision and Neuroscience}; Gerwinn S{sgerwinn}{Research Group Computational Vision and Neuroscience}; Macke JH{jakob}; Bethge M{mbethge}}
}
@Poster{ 6242,
title = {Estimating Critical Stimulus Features from Psychophysical Data: The Decision-Image Technique Applied to Human Faces},
journal = {Journal of Vision},
year = {2009},
month = {8},
volume = {9},
number = {8},
pages = {31},
abstract = {One of the main challenges in the sensory sciences is to identify the stimulus features on which the sensory systems base their computations: they are a pre-requisite for computational models of perception. We describe a technique---decision-images--- for extracting critical stimulus features based on logistic regression. Rather than embedding the stimuli in noise, as is done in classification image analysis, we want to infer the important features directly from physically heterogeneous stimuli. A Decision-image not only defines the critical region-of-interest within a stimulus but is a quantitative template which defines a direction in stimulus space. Decision-images thus enable the development of predictive models, as well as the generation of optimized stimuli for subsequent psychophysical investigations. Here we describe our method and apply it to data from a human face discrimination experiment. We show that decision-images are able to predict human responses not only in terms of overall percent correct but are able to predict, for individual observers, the probabilities with which individual faces are (mis-) classified. We then test the predictions of the models using optimized stimuli. Finally, we discuss possible generalizations of the approach and its relationships with other models.},
web_url = {http://www.journalofvision.org/9/8/31/},
event_name = {9th Annual Meeting of the Vision Sciences Society (VSS 2009)},
event_place = {Naples, FL, USA},
state = {published},
DOI = {10.1167/9.8.31},
author = {Macke JH{jakob}; Wichmann FA{felix}{Department Empirical Inference}}
}
@Poster{ 5845,
title = {Bayesian estimation of orientation preference maps},
journal = {Frontiers in Systems Neuroscience},
year = {2009},
month = {3},
volume = {2009},
number = {Conference Abstracts: Computational and Systems Neuroscience},
abstract = {Neurons in the early visual cortex of mammals exhibit a striking organization with respect to their functional properties. A prominent example is the layout of orientation preferences in primary visual cortex, the orientation preference map (OPM). Functional imaging techniques, such as optical imaging of intrinsic signals have been used extensively for the measurement of OPMs. As the signal-to-noise ratio in individual pixels if often low, the signals are usually spatially smoothed with a fixed linear filter to obtain an estimate of the functional map.
Here, we consider the estimation of the map from noisy measurements as a Bayesian inference problem. By combining prior knowledge about the structure of OPMs with experimental measurements, we want to obtain better estimates of the OPM with smaller trial numbers. In addition, the use of an explicit, probabilistic model for the data provides a principled framework for setting parameters and smoothing.
We model the underlying map as a bivariate Gaussian process (GP, a.k.a. Gaussian random field), with a prior covariance function that reflects known properties of OPMs. The posterior mean of the map can be interpreted as an optimally smoothed map. Hyper-parameters of the model can be chosen by optimization of the marginal likelihood. In addition, the GP also returns a predicted map for any location, and can therefore be used for extending the map to pixel at which no, or only unreliable data was obtained.
We also obtain a posterior distribution over maps, from which we can estimate the posterior uncertainty of statistical properties of the maps, such as the pinwheel density. Finally, our probabilistic model of both the signal and the noise can be used for decoding, and for estimating the informational content of the map.},
web_url = {http://www.cosyne.org/c/index.php?title=Cosyne_09},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2009)},
event_place = {Salt Lake City, UT, USA},
state = {published},
DOI = {10.3389/conf.neuro.06.2009.03.310},
author = {Macke J{jakob}; Gerwinn S{sgerwinn}{Research Group Computational Vision and Neuroscience}; White L; Kaschube M; Bethge M{mbethge}}
}
@Poster{ 5843,
title = {Bayesian Population Decoding of Spiking Neurons},
journal = {Frontiers in Systems Neuroscience},
year = {2009},
month = {3},
volume = {2009},
number = {Conference Abstracts: Computational and Systems Neuroscience},
web_url = {http://www.cosyne.org/c/index.php?title=Cosyne_09},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2009)},
event_place = {Salt Lake City, UT, USA,},
state = {published},
DOI = {10.3389/conf.neuro.06.2009.03.026},
author = {Gerwinn S{sgerwinn}{Research Group Computational Vision and Neuroscience}; Macke J{jakob}; Bethge M{mbethge}}
}
@Poster{ 5844,
title = {Sensory input statistics and network mechanisms in primate primary visual cortex},
journal = {Frontiers in Systems Neuroscience},
year = {2009},
month = {3},
volume = {2009},
number = {Conference Abstracts: Computational and Systems Neuroscience},
abstract = {Understanding the structure of multi-neuronal firing patterns in ensembles of cortical neurons is a major challenge for systems neuroscience. The dependence of network properties on the statistics of the sensory input can provide important insights into the computations performed by neural ensembles. Here, we study the functional properties of neural populations in the primary visual cortex of awake, behaving macaques by varying visual input statistics in a controlled way. Using arrays of chronically implanted tetrodes, we record simultaneously from up to thirty well-isolated neurons while presenting sets of images with three different correlation structures: spatially uncorrelated white noise (whn), images matching the second-order correlations of natural images (phs) and natural images including higher-order correlations (nat).
We find that groups of six nearby cortical neurons show little redundancy in their firing patterns (represented as binary vectors, 10ms bins) but rather act almost independently (mean multi-information 0.85 bits/s, range 0.16 - 1.90 bits/s, mean fraction of marginal entropy 0.34 %, N=46). Although network correlations are weak, they are statistically significant. While relatively few groups showed significant redundancies under stimulation with white noise (67.4 ± 3.2%; mean fraction of groups ± S.E.M.), many more did so in the other two conditions (phs: 95.7 ± 0.6%; nat: 89.1 ± 1.4%). Additional higher-order correlations in natural images compared to phase scrambled images did not increase but rather decrease the redundancy in the cortical representation: Network correlations are significantly higher in phs than in nat, as is the number of significantly correlated groups.
Multi-information measures the reduction in entropy due to any form of correlation. By using second order maximum entropy modeling, we find that a large fraction of multi-information is accounted for by pairwise correlations (whn: 75.0 ± 3.3%; phs: 82.8 ± 2.1%; nat: 80.8 ± 2.4%; groups with significant redundancy). Importantly, stimulation with natural images containing higher-order correlations only lead to a slight increase in the fraction of redundancy due to higher-order correlations in the cortical representation (mean difference 2.26 %, p=0.054, Sign test).
While our results suggest that population activity in V1 may be modeled well using pairwise correlations only, they leave roughly 20-25 % of the multi-information unexplained. Therefore, choosing a particular form of higher-order interactions may improve model quality. Thus, in addition to the independent model, we evaluated the quality of three different models: (a) The second-order maximum entropy model, which minimizes higher-order correlations, (b) a model which assumes that correlations are a product of common inputs (Dichotomized Gaussian) and (c) a mixture model in which correlations are induced by a discrete number of latent states. We find that an independent model is sufficient for the white noise condition but neither for phs or nat. In contrast, all of the correlation models (a-c) perform similarly well for the conditions with correlated stimuli.
Our results suggest that under natural stimulation redundancies in cortical neurons are relatively weak. Higher-order correlations in natural images do not increase but rather decrease the redundancies in the cortical representation.},
web_url = {http://www.cosyne.org/c/index.php?title=Cosyne_09},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2009)},
event_place = {Salt Lake City, UT, USA},
state = {published},
DOI = {10.3389/conf.neuro.06.2009.03.298},
author = {Berens P{berens}{Research Group Computational Vision and Neuroscience}; Macke JH{jakob}; Ecker AS{aecker}; Cotton RJ; Bethge M{mbethge}; Tolias AS{atolias}}
}
@Poster{ MackeOB2008_2,
title = {How pairwise correlations affect the redundancy in large populations of neurons},
journal = {Frontiers in Computational Neuroscience},
year = {2008},
month = {10},
volume = {2008},
number = {Conference Abstract: Bernstein Symposium 2008},
abstract = {Simultaneously recorded neurons often exhibit correlations in their spiking activity. These correlations shape the statistical structure of the population activity, and can lead to substantial redundancy across neurons. Knowing the amount of redundancy in neural responses is critical for our understanding of the neural code. Here, we study the effect of pairwise correlations on the statistical structure of population activity. We model correlated activity as arising from common Gaussian inputs into simple threshold neurons. In population models with exchangeable correlation structure, one can analytically calculate the distribution of synchronous events across the whole population, and the joint entropy (and thus the redundancy) of the neural responses. We investigate the scaling of the redundancy as the population size is increased, and characterize its phase transitions for increasing correlation strengths. We compare the asymptotic redundancy in our models to the corresponding maximum- and minimum entropy models. Although this model must exhibit more redundancy than the maximum entropy model, we find that its joint entropy increases linearly with population size.},
web_url = {http://www.frontiersin.org/10.3389/conf.neuro.10.2008.01.086/event_abstract?sname=Bernstein_Symposium_2008},
event_name = {Bernstein Symposium 2008},
event_place = {München, Germany},
state = {published},
DOI = {10.3389/conf.neuro.10.2008.01.086},
author = {Macke J{jakob}; Opper M; Bethge M{mbethge}}
}
@Poster{ MackeBEOTB2008,
title = {Modeling populations of spiking neurons with the Dichotomized Gaussian distribution},
year = {2008},
month = {7},
web_url = {http://www.theswartzfoundation.org/summer-meeting-2008.asp},
event_name = {Annual Meeting 2008 of Sloan-Swartz Centers for Theoretical Neurobiology},
event_place = {Princeton, NJ, USA},
state = {published},
author = {Macke JH{jakob}; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Ecker AS{aecker}{Department Physiology of Cognitive Processes}; Opper M; Tolias AS{atolias}{Department Physiology of Cognitive Processes}; Bethge M{mbethge}}
}
@Poster{ 5857,
title = {Analysis of Pattern Recognition Methods in Classifying Bold Signals in Monkeys at 7-Tesla},
year = {2008},
month = {6},
pages = {67},
abstract = {Pattern recognition methods have shown that fMRI data can reveal significant information
about brain activity. For example, in the debate of how object-categories are represented in
the brain, multivariate analysis has been used to provide evidence of distributed encoding
schemes. Many follow-up studies have employed different methods to analyze human fMRI
data with varying degrees of success. In this study we compare four popular pattern recognition
methods: correlation analysis, support-vector machines (SVM), linear discriminant analysis
and Gaussian naïve Bayes (GNB), using data collected at high field (7T) with higher resolution
than usual fMRI studies. We investigate prediction performance on single trials and for averages
across varying numbers of stimulus presentations. The performance of the various algorithms
depends on the nature of the brain activity being categorized: for several tasks,
many of the methods work well, whereas for others, no methods perform above chance level.
An important factor in overall classification performance is careful preprocessing of the data,
including dimensionality reduction, voxel selection, and outlier elimination.},
web_url = {http://www.areadne.org/2008/home.html},
event_name = {AREADNE 2008: Research in Encoding and Decoding of Neural Ensembles},
event_place = {Santorini, Greece},
state = {published},
author = {Ku S-P{shihpi}{Department Physiology of Cognitive Processes}; Gretton A{arthur}{Department Empirical Inference}; Macke J{jakob}; Tolias AT{atolias}{Department Physiology of Cognitive Processes}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}}
}
@Poster{ 5101,
title = {Flexible Models for Population Spike Trains},
year = {2008},
month = {6},
pages = {48},
abstract = {In order to understand how neural systems perform computations and process sensory
information, we need to understand the structure of firing patterns in large populations of
neurons. Spike trains recorded from populations of neurons can exhibit substantial pair wise
correlations between neurons and rich temporal structure. Thus, efficient methods for
generating artificial spike trains with specified correlation structure are essential for the
realistic simulation and analysis of neural systems.
Here we show how correlated binary spike trains can be modeled by means of a latent
multivariate Gaussian model. Sampling from our model is computationally very efficient, and
in particular, feasible even for large populations of neurons. We show empirically that the
spike trains generated with this method have entropy close to the theoretical maximum. They
are therefore consistent with specified pair-wise correlations without exhibiting systematic
higher-order correlations. We compare our model to alternative approaches and discuss its
limitations and advantages. In addition, we demonstrate its use for modeling temporal
correlations in a neuron recorded in macaque primary visual cortex.
Neural activity is often summarized by discarding the exact timing of spikes, and only
counting the total number of spikes that a neuron (or population) fires in a given time window.
In modeling studies, these spike counts have often been assumed to be Poisson distributed
and neurons to be independent. However, correlations between spike counts have been
reported in various visual areas. We show how both temporal and inter-neuron correlations
shape the structure of spike counts, and how our model can be used to generate spike counts
with arbitrary marginal distributions and correlation structure. We demonstrate its capabilities
by modeling a population of simultaneously recorded neurons from the primary visual cortex
of a macaque, and we show how a model with correlations accounts for the data far better
than a model that assumes independence.},
web_url = {http://www.areadne.org/2008/home.html},
event_name = {AREADNE 2008: Research in Encoding and Decoding of Neural Ensembles},
event_place = {Santorini, Greece},
state = {published},
author = {Bethge M{mbethge}; Macke JH{jakob}; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Ecker AS{aecker}; Tolias AS{atolias}}
}
@Poster{ 5100,
title = {Pairwise Correlations and Multineuronal Firing Patterns in the Primary Visual Cortex of the Awake, Behaving Macaque},
year = {2008},
month = {6},
pages = {46},
abstract = {Understanding the structure of multi-neuronal firing patterns has been a central quest and major challenge for systems neuroscience. In particular, how do pairwise interactions between neurons shape the firing patterns of neuronal ensembles in the cortex? To study this question, we recorded simultaneously from multiple single neurons in the primary visual cortex of an awake, behaving macaque using an array of chronically implanted tetrodes1. High
contrast flashed and moving bars were used for stimulation, while the monkey was required to maintain fixation. In a similar vein to recent studies of in vitro preparations 2,3,5, we applied maximum entropy analysis for the first time to the binary spiking patterns of populations of cortical neurons recorded in vivo from the awake macaque. We employed the Dichotomized Gaussian distribution, which can be seen as a close approximation to the pairwise maximum-entropy model for binary data4. Surprisingly, we find that even pairs of neurons with nearby receptive
fields (receptive field center distance < 0.15°) have only weak correlations between their binary responses computed in bins of 10 ms (median absolute correlation coefficient: 0.014, 0.010-0.019, 95% confidence intervals, N=95 pairs; positive correlations: 0.015, N=59; negative correlations: -0.013, N=36). Accordingly, the distribution of spiking patterns of groups of 10 neurons is described well with a model that assumes independence between individual neurons (Jensen-Shannon-Divergence: 1.06×10-2 independent model, 0.96×10-2 approximate second-order maximum-entropy model4; H/H1=0.992). These results suggest that the distribution of firing patterns of small cortical networks in the awake animal is predominantly determined by the mean activity of the participating cells, not by their interactions.
Meaningful computations, however, are performed by neuronal populations much larger than 10 neurons. Therefore, we investigated how weak pairwise correlations affect the firing patterns of artificial populations4 of up to 1000 cells with the same correlation structure as experimentally measured. We find that in neuronal ensembles of this size firing patterns with many active or silent neurons occur considerably more often than expected from a fully
independent population (e.g. 130 or more out of 1000 neurons are active simultaneously roughly every 300 ms in the correlated model and only once every 3-4 seconds in the
independent model). These results suggest that the firing patterns of cortical networks comparable in size to several minicolumns exhibit a rich structure, even if most pairs appear relatively independent when studying small subgroups thereof.},
web_url = {http://www.areadne.org/2008/home.html},
event_name = {AREADNE 2008: Research in Encoding and Decoding of Neural Ensembles},
event_place = {Santorini, Greece},
state = {published},
author = {Berens P{berens}{Research Group Computational Vision and Neuroscience}; Ecker AS{aecker}; Subramaniyan M; Macke JH{jakob}; Hauck P; Bethge M{mbethge}; Tolias AS{atolias}}
}
@Poster{ MackeSB2008,
title = {The role of stimulus correlations for population decoding in
the retina},
year = {2008},
month = {6},
pages = {73},
web_url = {http://www.areadne.org/2008/home.html},
event_name = {AREADNE 2008: Research in Encoding and Decoding of Neural Ensembles},
event_place = {Santorini, Greece},
state = {published},
author = {Macke JH{jakob}; Schwartz G; Berry M}
}
@Poster{ 4952,
title = {The role of stimulus correlations for population decoding in the retina},
year = {2008},
month = {3},
volume = {5},
pages = {172},
abstract = {a large number of retinal ganglion cells, one should be able to construct a decoding algorithm to discriminate different visual stimuli. Despite the inherent noise in the response of the ganglion cell population, everyday visual experience is highly deterministic. We have designed an experiment to study the nature of the population code of the retina in the “low error” regime.
We presented 36 different black and white shapes, each with the same number of black pixels, to the retina of a tiger salamander while recording retinal ganglion cell responses using a multi-electrode array.
Each shape was presented over 100 trials for 0.5 s each and trials were randomly interleaved. Spike trains were recorded from 162 ganglion cells in 13 experiments. We removed noise correlations by shuffling trials, as we wanted to focus on the role of correlations induced by the stimulus (signal correlations).
We designed decoding algorithms for this population response in order to detect each target shape against
the distracter set of the 35 other shapes. Binary response vectors were constructed using a 100 ms bin following the presentation of each shape. First, we used a simple decoder that assumes that all neurons are independent. This decoder is a linear classifier. A second decoder, which takes into account correlations between neurons, was constructed by fitting Ising models1 to the population response using up to 162 neurons for each model.
We also constructed the statistically optimal decoder based on a mixture model, which accounts for signal correlations.
Using populations of many neurons, the optimal and Ising
decoders performed considerably better than the “independent” decoder. For certain shapes, the optimal decoder had 100 times fewer false positives than the independent decoder at 99% hit rate, and, in the median across shapes, the performance enhancement was 8-fold. While the decoder using an Ising model fit to the pairwise correlations did not achieve optimality, it was up to 50 times more accurate than the independent decoder, and 3 times more accurate in the median across shapes.
Some shape discriminations were performed at zero error out of 3500 trials using the optimal and Ising decoders on only a subset of the recorded cells while none reached this “low error” level using the independent decoder even on all 162 cells (see figure).
We find that discrimination with very low error using large populations requires a decoder that models signal correlations. Linear classifiers were unable to reach the “low error” regime. The Ising model of the population response is successfully applied to groups of up to 162 cells and offers a biologically feasible mechanism by which downstream neurons could account for correlations in their inputs.},
file_url = {/fileadmin/user_upload/files/publications/COSYNE2008-Schwartz_4952[0].pdf},
web_url = {http://cosyne.org/c/index.php?title=Cosyne_08},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2008)},
event_place = {Salt Lake City, UT, USA,},
state = {published},
author = {Schwartz G; Macke J{jakob}; Berry M}
}
@Poster{ 4347,
title = {3D Reconstruction of Neural Circuits from Serial EM Images},
journal = {Neuroforum},
year = {2007},
month = {4},
volume = {13},
number = {Supplement},
pages = {1195},
abstract = {The neural processing of visual motion is of essential importance for course control. A basic model suggesting
a possible mechanism of how such a computation could be implemented in the fly visual system is the so
called "correlation-type motion detector" proposed by Reichardt and Hassenstein in the 1950s. The basic
requirement to reconstruct the neural circuit underlying this computation is the availability of electron
microscopic 3D data sets of whole ensembles of neurons constituting the fly visual ganglia. We apply a new
technique,"Serial Block Face Scanning Electron Microscopy" (SBFSEM), that allows for an automatic
sectioning and imaging of biological tissue with a scanning electron microscope [Denk, Horstman (2004)
Serial block face scanning electron microscopy to reconstruct three-dimensional tissue nanostructure. PLOS
Biology 2: 1900-1909]. Image Stacks generated with this technology have a resolution sufficient to
distinguish different cellular compartments, especially synaptic structures. Consequently detailed anatomical
knowledge of complete neuronal circuits can be obtained. Such an image stack contains several thousands of
images and is recorded with a minimal voxel size of 25nm in x and y and 30nm in z direction. Consequently a
tissue block of 1mm³ (volume of the Calliphora vicina brain) produces several hundreds terabyte of data.
Therefore new concepts for managing large data sets and for automated 3D reconstruction algorithms need to
be developed. We developed an automated image segmentation and 3D reconstruction software, which allows
a precise contour tracing of cell membranes and simultaneously displays the resulting 3D structure. In detail,
the software contains two stand-alone packages: Neuron2D and Neuron3D, both offer an easy-to-operate
Graphical-User-Interface.
Neuron2D software provides the following image processing functions:
• Image Viewer: Display image stacks in single or movie mode and optional calculates intensity distribution
of each image.
• Image Preprocessing: Filter process of image stacks. Implemented filters are a Gaussian 2D and a
Non-Linear-Diffusion Filter. The filter step enhances the contrast between contour lines and image
background, leading to an enhanced signal to noise ratio which further improves detection of membrane
structures.
• Image Segmentation: The implemented algorithm extracts contour lines from the preceding image and
automatically traces the contour lines in the following images (z-direction), taking into account the previous
image segmentation. In addition, a manual interaction is possible.
To visualize 3D structures of neuronal circuits the additional software Neuron3D was developed. The
reconstruction of neuronal surfaces from contour lines, obtained in Neuron2D, is implemented as a graph
theory approach. The reconstructed anatomical data can further provide a subset for computational models of
neuronal circuits in the fly visual system.},
web_url = {http://nwg.glia.mdc-berlin.de/media/pdf/conference/Proceedings-Goettingen2007.pdf},
event_name = {7th Meeting of the German Neuroscience Society, 31st Göttingen Neurobiology Conference},
event_place = {Göttingen, Germany},
state = {published},
author = {Maack N; Kapfer C; Macke JH{jakob}; Sch\"olkopf B{bs}{Department Empirical Inference}; Denk W; Borst A}
}
@Poster{ 4345,
title = {Identifying temporal population codes in the retina using canonical correlation analysis},
journal = {Neuroforum},
year = {2007},
month = {4},
volume = {13},
number = {Supplement},
pages = {359},
abstract = {Right from the first synapse in the retina, the visual information gets distributed across several parallel
channels with different temporal filtering properties (Wässle, 2004). Yet, the prevalent system identification
tool for characterizing neural responses, the spike-triggered average, only allows one to investigate the
individual neural responses independently of each other. Here, we present a novel data analysis tool for the
identification of temporal population codes based on canonical correlation analysis (Hotelling, 1936).
Canonical correlation analysis allows one to find `population receptive fields' (PRF) which are maximally
correlated with the temporal response of the entire neural population. The method is a convex optimization
technique which essentially solves an eigenvalue problem and is not prone to local minima.
We apply the method to simultaneous recordings from rabbit retinal ganlion cells in a whole mount
preparation (Zeck et al, 2005). The cells respond to a 16 by 16 pixel m-sequence stimulus presented at a frame
rate of 1/(20 msec). The response of 27 ganglion cells is correlated with each input frame in an interval
between zero and 200 msec relative to the stimulus. The 200 msec response period is binned into 14
equal-sized bins. As shown in the figure, we obtain six predictive population receptive fields (left column),
each of which gives rise to a different population response (right column). The x-axis of the color-coded
images used to describe the population response kernels (right column) corresponds to the index of the 27
different neurons, while the y-axis indicates time relative to the stimulus from 0 (top) to 200 msec (bottom).
The six population receptive fields do not only provide a more concise description of the population response
but can also be estimated much more reliably than the receptive fields of individual neurons.
In conclusion, we suggest to characterize retinal ganglion cell responses in terms of population receptive
fields, rather than discussing stimulus-neuron and neuron-neuron dependencies separately.},
file_url = {/fileadmin/user_upload/files/publications/TS24-2C_4345[0].pdf},
web_url = {http://nwg.glia.mdc-berlin.de/media/pdf/conference/Proceedings-Goettingen2007.pdf},
event_name = {7th Meeting of the German Neuroscience Society, 31st Göttingen Neurobiology Conference},
event_place = {Göttingen, Germany},
state = {published},
author = {Bethge M{mbethge}; Macke JH{jakob}; Gerwinn S{sgerwinn}{Department Empirical Inference}{Research Group Computational Vision and Neuroscience}; Zeck G{gzeck}}
}
@Poster{ 4265,
title = {Implicit Wiener Series for Estimating Nonlinear Receptive Fields},
journal = {Neuroforum},
year = {2007},
month = {4},
volume = {13},
number = {Supplement},
pages = {1199},
abstract = {The representation of the nonlinear response properties of a neuron by a Wiener series expansion has enjoyed
a certain popularity in the past, but its application has been limited to rather low-dimensional and weakly
nonlinear systems due to the exponential growth of the number of terms that have to be estimated. A recently
developed estimation method [1] utilizes the kernel techniques widely used in the machine learning
community to implicitly represent the Wiener series as an element of an abstract dot product space. In contrast
to the classical estimation methods for the Wiener series, the estimation complexity of the implicit
representation is linear in the input dimensionality and independent of the degree of nonlinearity.
From the neural system identification point of view, the proposed estimation method has several advantages:
1. Due to the linear dependence of the estimation complexity on input dimensionality, system identification
can be also done for systems acting on high-dimensional inputs such as images or video sequences.
2. Compared to classical cross-correlation techniques (such as spike-triggered average or covariance
estimates), similar accuracies can be achieved with a considerably smaller amount of data.
3. The new technique does not need white noise as input, but works for arbitrary classes of input signals such
as, e.g., natural image patches.
4. Regularisation concepts from machine learning to identify systems with noise-contaminated output signals.
We present an application of the implicit Wiener series to find the low-dimensional stimulus subspace which
accounts for most of the neuron's activity. We approximate the second-order term of a full Wiener series
model with a set of parallel cascades consisting of a linear receptive field and a static nonlinearity. This type
of approximation is known as reduced set technique in machine learning. We compare our results on
simulated and physiological datasets to existing identification techniques in terms of prediction performance
and accuracy of the obtained subspaces.},
web_url = {http://nwg.glia.mdc-berlin.de/media/pdf/conference/Proceedings-Goettingen2007.pdf},
event_name = {7th Meeting of the German Neuroscience Society, 31st Göttingen Neurobiology Conference},
event_place = {Göttingen, Germany},
state = {published},
author = {Franz MO{mof}{Department Empirical Inference}; Macke JH{jakob}; Saleem A; Schultz SR}
}
@Poster{ 4668,
title = {Estimating Population Receptive Fields in Space and Time},
year = {2007},
month = {2},
pages = {44},
abstract = {Right from the first synapse in the retina, visual information gets distributed
across several parallel channels with different temporal filtering properties.
Yet, commonly used system identification tools for characterizing
neural responses, such as the spike-triggered average, only allow one to
investigate the individual neural responses independently of each other.
Conversely, many population coding models of neurons and correlations
between neurons concentrate on the encoding of a single-variate stimulus.
We seek to identify the features of the visual stimulus that are encoded in
the temporal response of an ensemble of neurons, and the corresponding
spike-patterns that indicate the presence of these features.
We present a novel data analysis tool for the identification of such temporal
population codes based on canonical correlation analysis (Hotelling,
1936). The “population receptive fields” (PRFs) are defined to be those
dimensions of the stimulus-space that are maximally correlated with the
temporal response of the entire neural population, irrespective of whether
the stimulus features are encoded by the responses of single neurons or by
patterns of spikes across neurons or time. These dimensions are identified
by canonical correlation analysis, a convex optimization technique which essentially solves an eigenvalue
problem and is not prone to local minima.
Each receptive field can be represented by the weighted sum of a small number of functions that are separable
in space-time. Therefore, non-separable receptive fields can be estimated more efficiently than with spiketriggered
techniques, which makes our method advantageous even for the estimation of single-cell receptive
fields.
The method is demonstrated by applying it to data from multi-electrode recordings from rabbit retinal ganglion
cells in a whole mount preparation (Zeck et al, 2005). The figure displays the first 6 PRFs of a population
of 27 cells from one such experiment. The recovered stimulus-features look qualitatively different
to the receptive fields of single retinal ganglion cells. In addition, we show how the model can be extendended
to capture nonlinear stimulus-response relationships and to test different coding-mechanisms by the
use of kernel-canonical correlation analysis. In conclusion, we suggest to characterize responses of ensembles
of neurons in terms of PRFs, rather than discussing stimulus-neuron and neuron-neuron dependencies
separately.},
file_url = {/fileadmin/user_upload/files/publications/Cosyne-2007-I-37_[0].pdf},
web_url = {http://www.cosyne.org/wiki/Cosyne_07_Program},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2007)},
event_place = {Salt Lake City, UT, USA},
state = {published},
author = {Macke JH{jakob}; Zeck G{gzeck}; Bethge M{mbethge}}
}
@Poster{ 4358,
title = {Nonlinear Receptive Field Analysis: Making Kernel Methods Interpretable},
year = {2007},
month = {2},
pages = {16},
abstract = {Identification of stimulus-response functions is a central problem in systems neuroscience and related areas.
Prominent examples are the estimation of receptive fields and classification images [1]. In most cases, the
relationship between a high-dimensional input and the system output is modeled by a linear (first-order) or
quadratic (second-order) model. Models with third or higher order dependencies are seldom used, since
both parameter estimation and model interpretation can become very difficult.
Recently, Wu and Gallant [3] proposed the use of kernel methods, which have become a standard tool in
machine learning during the past decade [2]. Kernel methods can capture relationships of any order, while
solving the parameter estmation problem efficiently. In short, the stimuli are mapped into a high-dimensional
feature space, where a standard linear method, such as linear regression or Fisher discriminant, is applied.
The kernel function allows for doing this implicitly, with all computations carried out in stimulus space.
As a consequence, the resulting model is nonlinear, but many desirable properties of linear methods are
retained. For example, the estimation problem has no local minima, which is in contrast to other nonlinear
approaches, such as neural networks [4].
Unfortunately, although kernel methods excel at modeling complex functions, the question of how to interpret
the resulting models remains. In particular, it is not clear how receptive fields should be defined in
this context, or how they can be visualized. To remedy this, we propose the following definition: noting
that the model is linear in feature space, we define a nonlinear receptive field as a stimulus whose image in
feature space maximizes the dot-product with the learned model. This can be seen as a generalization of the
receptive field of a linear filter: if the feature map is the identity, the kernel method becomes linear, and our
receptive field definition coincides with that of a linear filter. If it is nonlinear, we numerically invert the
feature space mapping to recover the receptive field in stimulus space.
Experimental results show that receptive fields of simulated visual neurons, using natural stimuli, are correctly
identified. Moreover, we use this technique to compute nonlinear receptive fields of the human fixation
mechanism during free-viewing of natural images.},
file_url = {/fileadmin/user_upload/files/publications/Cosyne-2007-I-9_4358[0].pdf},
web_url = {http://www.cosyne.org/wiki/Cosyne_07_Program},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2007)},
event_place = {Salt Lake City, UT, USA},
state = {published},
author = {Kienzle W{kienzle}{Department Empirical Inference}; Macke JH{jakob}; Wichmann FA{felix}{Department Empirical Inference}; Sch\"olkopf B{bs}{Department Empirical Inference}; Franz MO{mof}{Department Empirical Inference}}
}
@Conference{ GoncalvesLBNM2017_2,
title = {Flexible Bayesian inference for complex models of single neurons},
journal = {BMC Neuroscience},
year = {2017},
month = {8},
volume = {18},
number = {Supplement 1},
pages = {58},
abstract = {Characterizing the input-output transformations of single neurons is critical for understanding neural computation.
Single-neuron models have been extensively studied, ranging from simple phenomenological models to complex
multi-compartment neurons. However, linking mechanistic models of single-neurons to empirical observations of
neural activity has been challenging. Statistical inference is only possible for a few neuron models (e.g. GLMs),
and no generally applicable, effective statistical inference algorithms are available: As a consequence, comparisons
between models and data are either qualitative or rely on manual parameter tweaking, parameter-fitting using heuristics or brute-force search [1]. Furthermore, parameter-fitting approaches typically return a single
best-fitting estimate, but do not characterize the entire space of models that would be consistent with data (the
posterior distribution). We overcome this limitation by presenting a general method to infer the posterior distribution over model parameters given observed data on complex single-neuron models. Our approach can be applied in a ‘black box’ manner to a wide range of single-neuron models without requiring model-specific modifications. In particular, it extends to models without explicit likelihoods (e.g. most single-neuron models). We achieve this goal by building on recent advances in likelihood-free Bayesian inference [2]: the key idea is to simulate multiple data-sets from different parameters, and then to train a probabilistic neural network which approximates the mapping from data to posterior distribution. We illustrate this approach using single- and multi-compartment models of single neurons: On simulated data, estimated posterior distributions recover ground-truth parameters, and reveal the manifold of parameters for which the model exhibits the same behaviour. On in-vitro recordings of membrane voltages, we recover multivariate posteriors over biophysical parameters, and voltage traces accurately match empirical data. Our approach will enable neuroscientists to perform Bayesian inference on complex neuron models without having to design modelspecific algorithms, closing the gap between biophysical and statistical approaches to single-neuron modelling.},
web_url = {https://bmcneurosci.biomedcentral.com/track/pdf/10.1186/s12868-017-0370-3},
event_name = {Twenty-Sixth Annual Computational Neuroscience Meeting (CNS*2017)},
event_place = {Antwerpen, Belgium},
state = {published},
DOI = {10.1186/s12868-017-0370-3},
author = {Goncalves P; Luckmann J-M; Bassetto G; Nonnenmacher M; Macke J{jakob}}
}
@Conference{ Macke2017,
title = {Correlations and signatures of criticality in neural population models},
year = {2017},
month = {1},
day = {23},
abstract = {Large-scale recording methods make it possible to measure the statistics of neural population activity and to gain insights into the principles that govern the collective activity of neural ensembles. One hypothesis that has emerged from this approach is that neural populations are poised at a thermodynamic critical point. Support for this notion has come from a recent series of studies which identified signatures of criticality in the statistics of neural activity recorded from populations of retinal ganglion cells, and hypothesized that the retina might be optimised to be operating at this critical point.
What mechanisms can explain these observations? Do they require the neural system to be fine-tuned to be poised at the critical point, or do they robustly emerge in generic circuits? We here show that these effects arise in a simple, canonical models of retinal population activity. They robustly appear across a range of parameters, and can be understood analytically in a simple model. These observations pose the question of whether signatures of criticality are indicative of an optimised coding strategy, or whether alternative theories are more promising candidates for understanding sensory coding.},
web_url = {http://www.bccn-tuebingen.de/events/double-feature-event-bernsteincrc-kick-off-symposium.html},
event_name = {Double Feature Workshop: Bernstein Symposium & Kick-Off Symposium},
event_place = {Tübingen, Germany},
state = {published},
author = {Macke J{jakob}}
}
@Conference{ Macke2015_2,
title = {Correlations and signatures of criticality in neural population models},
year = {2015},
month = {12},
day = {11},
abstract = {Large-scale recording methods make it possible to measure the statistics of neural population activity, and thereby to gain insights into the principles that govern the collective activity of neural ensembles. One hypothesis that has emerged from this approach is that neural populations are poised at a ‘thermo-dynamic critical point’, and that this has important functional consequences
(Tkacik et al 2014). Support for this hypothesis has come from studies that computed the specific heat, a measure of global population statistics, for groups of neurons subsampled from population recordings. These studies have found two effects which in physical systems indicate a critical point: First, specific heat diverges with population size N. Second, when manipulating population
statistics by introducing a ’temperature’ in analogy to statistical mechanics, the maximum heat moves towards unit
temperature for large populations.
What mechanisms can explain these observations? We show that both effects arise in a simple simulation of retinal population activity. They robustly appear across a range of parameters including biologically implausible ones, and can be understood analytically in simple models. The specific heat grows with N. whenever the (average) correlation is independent of N, which is always true when uniformly subsampling a large, correlated population. For weakly
correlated populations, the rate of divergence of the specific heat is proportional to the correlation strength. Thus, if retinal population codes were optimized to
maximize specific heat, then this would predict that they seek to increase correlations. This is incongruent with theories of efficient coding that make the opposite prediction. We find criticality in a simple and parsimonious model of retinal processing, and without the need for fine-tuning or adaptation. This suggests that signatures of criticality might not require an optimized coding strategy, but rather arise as consequence of subsampling a stimulus-driven neural population (Aitchison et al 2014).},
web_url = {https://netadis.wordpress.com/nips-workshop-2015/},
event_name = {NIPS 2015 Workshop: Modelling and Inference for Dynamics on Complex Interaction Networks: Joining Up Machine aqnd Statistical Physics},
event_place = {Montréal, Canada},
state = {published},
author = {Macke J{jakob}}
}
@Conference{ Macke2015_4,
title = {Correlations and signatures of criticality in neural population models},
year = {2015},
month = {10},
day = {29},
abstract = {Large-scale recording methods make it possible to measure the statistics of neural population activity and to gain insights into the principles that govern the collective activity of neural ensembles. One hypothesis that has emerged from this approach is that neural populations are poised at a thermodynamic critical point. Support for this notion has come from a recent series of studies which identified signatures of criticality (such as a divergence of the specific heat with population size) in the statistics of neural activity recorded from populations of retinal ganglion cells, and hypothesized that the retina might be optimised to be operating at this critical point.
What mechanisms can explain these observations? Do they require the neural system to be fine-tuned to be poised at the critical point, or do they robustly emerge in generic circuits? How are signatures of criticality related to the structure of correlations within the neural population? We here show that these effects arise in a simple simulation of retinal population activity. They robustly appear across a range of parameters including biologically implausible ones, and can be understood analytically in a simple model. The specific heat diverges linearly with population size n whenever the (average) correlation is independent of n— in particular, this is generally true when subsampling a large, correlated population. These observations pose the question of whether signatures of criticality are indicative of an optimised coding strategy, or whether they arise as byproduct of sub-sampling a neural population with correlations.},
web_url = {http://www.iec-lnc.ens.fr/group-for-neural-theory/events/gnt-iec-new-ideas-in-theoretical/past-events/},
event_name = {Institut d'Etudes de la Cognition (IEC) at the Ecole Normale Supérieure: Group for Neural Theory},
event_place = {Paris, France},
state = {published},
author = {Macke J{jakob}}
}
@Conference{ NonnenmacherBBBM2015,
title = {Correlations and signatures of criticality in neural population models},
year = {2015},
month = {9},
day = {16},
pages = {27-28},
abstract = {Large-scale recording methods make it possible to measure the statistics of neural population activity, and thereby to gain insights into the principles that govern the
collective activity of neural ensembles. One hypothesis that has emerged from this approach is that neural populations are poised at a thermodynamic critical point [1], and that this may have important functional consequences. Support for this hypothesis has come from studies [2,3] that identified signatures of criticality (such as a divergence of the specific heat with population size) in the statistics of neural activity recorded from populations of retinal ganglion cells. What mechanisms can explain these observations? Do they require the neural system to be fine-tuned to be poised at the critical point, or do they robustly emerge in generic circuits [4,5,6]?
We show that indicators for thermodynamic criticality arise in a simple simulation of retinal population activity, and without the need for fine-tuning or adaptation. Using simple statistical models [7], we demonstrate that peak specific heat grows with population size whenever the (average) correlation is independent of the number of
neurons. The latter is always true when uniformly subsampling a large, correlated population. For weakly correlated populations, the rate of divergence of the specific heat is proportional to the correlation strength. This predicts that neural populations would be strongly correlated if they were optimized to maximize specific heat, which is in contrast with theories of efficient coding that make the opposite prediction. Our findings suggest that indicators for thermodynamic criticality might not require an optimized coding strategy, but rather arise as consequence of subsampling a stimulusdriven neural population.},
web_url = {http://www.nncn.de/de/bernstein-conference/2015/program},
event_name = {Bernstein Conference 2015},
event_place = {Heidelberg, Germany},
state = {published},
DOI = {10.12751/nncn.bc2015.0013},
author = {Nonnenmacher M{mnonnenmacher}; Behrens C; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}; Macke J{jakob}}
}
@Conference{ Macke2016,
title = {Estimating state and parameters in Gaussian state-space models with point-process observations},
year = {2015},
month = {9},
day = {14},
web_url = {http://www.nncn.de/de/bernstein-conference/2015/satellite-workshops/estimating-parameters-and-unobserved-state-variables-from-neural-data},
event_name = {Bernstein Conference 2015 Satellite Workshop "Estimating parameters and unobserved state variables from neural data"},
event_place = {Heidelberg, Germany},
state = {published},
author = {Macke J{jakob}}
}
@Conference{ Macke2015_3,
title = {Correlations, criticality and common input},
year = {2015},
month = {4},
day = {2},
abstract = {Large-scale recording methods make it possible to measure the statistics of neural population activity, and to describe their joint statistics by fitting statistical models to population spike train data. What can the
statistical structure of neural population data tell us about the underlying mechanisms, as well as about the
principles that govern the collectivity activity and coding properties of neural ensembles? One intriguing hypothesis that has emerged from this approach is that the statistics of neural populations resemble those of physical systems
which are poised at a thermo-dynamic critical point. Support for this hypothesis has come from studies that computed the `specific heat’ (a measure of global population statistics which is effectively the normalized variance of log-probabilities of spike-patterns). These studies have found two effects which—in physical systems indicate a critical point: First, specific heat diverges
with population size N. Second, when manipulating population statistics by introducing a ’temperature’ in
analogy to statistical mechanics, the maximum heat moves towards unit-temperature for large populations. What mechanisms can explain these observations? Do they require the neural system to be fine-tuned to be poised at the critical point, or do they robustly emerge in generic circuits? How are signatures of criticality related to the structure of correlations within the neural population? In this talk, I will address these questions, give some answers, and pose more questions.},
web_url = {http://lcn1.epfl.ch/files/content/sites/lcn/files/2015%20Seminars/SCNS%2002%2004%2015%20-%20Macke.pdf},
event_name = {University of Zurich: Swiss Computational Neuroscience Seminars},
event_place = {Zürich, Switzerland},
state = {published},
author = {Macke J{jakob}}
}
@Conference{ Macke2015,
title = {Dissecting choice-probabilities in V2 neurons using serial dependence},
year = {2015},
month = {3},
day = {9},
web_url = {http://www.cosyne.org/c/index.php?title=Cosyne2015_Program},
event_name = {COSYNE 2015 Workshops},
event_place = {Snowbird, UT, USA},
state = {published},
author = {Macke J{jakob}}
}
@Conference{ Macke2014_2,
title = {Statistical methods for characterizing cortical population activity},
year = {2014},
month = {5},
web_url = {http://2014.occam-os.de/videos.html},
event_name = {Osnabrück Computational Cognition Alliance Meeting on "The Brain as a Probabilistic Inference Engine" (OCCAM 2014)},
event_place = {Osnabrück, Germany},
state = {published},
author = {Macke JH{jakob}}
}
@Conference{ Macke2013_3,
title = {Inferring neural population dynamics from multiple partial measurements of the same circuit},
year = {2013},
month = {10},
day = {25},
web_url = {http://itp.uni-frankfurt.de/~gros/Seminar/groupSeminar.html},
event_name = {Group Seminar C. Gros "Complex and Cognitive Systems": Max-Planck-Institute for Brain Research},
event_place = {Frankfurt a.M., Germany},
state = {published},
author = {Macke J{jakob}}
}
@Conference{ Macke2013_2,
title = {Characterizing the dynamics of large neural populations},
year = {2013},
month = {9},
day = {9},
web_url = {https://www.helmholtz-muenchen.de/icb/institute/icb-seminar/past-seminars/index.html},
event_name = {ICB Institute of Computational Biology: Helmholtz Zentrum},
event_place = {München, Germany},
state = {published},
author = {Macke J{jakob}}
}
@Conference{ Macke2013,
title = {B8: Statistical Modelling of Psychophysical Data},
journal = {Perception},
year = {2013},
month = {8},
day = {25},
volume = {42},
number = {ECVP Abstract Supplement},
pages = {4},
abstract = {n this tutorial, we will discuss some statistical techniques that one can use in order to obtain a more accurate statistical model of the relationship between experimental variables and psychophysical performance. We will use models which include the effect of additional, non-stimulus determinants of behaviour, and which therefore give us additional flexibility in analysing psychophysical data. For example, these models will allow us to estimate the effect of experimental history on the responses on an observer, and to automatically correct for errors which can be attributed to such history-effects. By reanalysing a large data-set of low-level psychophysical data, we will show that the resulting models have vastly superior statistical goodness of fit, give more accurate estimates of psychophysical functions and allow us to detect and capture interesting temporal structure in psychophysical data. In summary, the approach presented in this tutorial does not only yield more accurate models of the data, but also has the potential to reveal unexpected structure in the kind of data that every visual scientist has plentiful-- classical psychophysical data with binary responses.},
web_url = {http://pec.sagepub.com/content/42/1_suppl.toc},
event_name = {36th European Conference on Visual Perception (ECVP 2013)},
event_place = {Bremen, Germany},
state = {published},
DOI = {10.1177/03010066130420S101},
author = {Macke J{jakob}}
}
@Conference{ GerwinnMB2010,
title = {Toolbox for inference in generalized linear models of spiking neurons},
journal = {Frontiers in Computational Neuroscience},
year = {2010},
month = {10},
volume = {2010},
number = {Conference Abstract: Bernstein Conference on Computational Neuroscience},
abstract = {Generalized linear models are increasingly used for analyzing neural data, and to characterize the stimulus dependence and functional connectivity of both single neurons and neural populations. One possibility to extend the computational complexity of these models is to expand the stimulus, and possibly the representation of the spiking history into high dimensional feature spaces.
When the dimension of the parameter space is large, strong regularization has to be used in order to fit GLMs to datasets of realistic size without overfitting. By imposing properly chosen priors over parameters, Bayesian inference provides an effective and principled approach for achieving regularization.
In this work, we present a MATLAB toolbox which provides efficient inference methods for parameter fitting. This includes standard maximum a posteriori estimation for Gaussian and Laplacian prior, which is also sometimes referred to as L1- and L2-reguralization. Furthermore, it implements approximate inference techniques for both prior distributions based on the expectation propagation algorithm [1].
In order to model the refractory property and functional couplings between neurons, the spiking history within a population is often represented as responses to a set of predefined basis functions. Most of the basis function sets used so far, are non-orthogonal. Commonly priors are specified without taking the properties of the basis functions into account (uncorrelated Gauss, independent Laplace). However, if basis functions overlap, the coefficients are correlated. As an example application of this toolbox, we analyze the effect of independent prior distributions, if the set of basis functions are non-orthogonal and compare the performance to the orthogonal setting.},
web_url = {http://www.frontiersin.org/10.3389/conf.fncom.2010.51.00091/event_abstract?sname=Bernstein_Conference_on_Computational_Neuroscience_1},
event_name = {Bernstein Conference on Computational Neuroscience (BCCN 2010)},
event_place = {Berlin, Germany},
state = {published},
DOI = {10.3389/conf.fncom.2010.51.00091},
author = {Gerwinn S{sgerwinn}{Research Group Computational Vision and Neuroscience}; Macke JH{jakob}; Bethge M{mbethge}}
}
@Conference{ HafnerGMB2010,
title = {Implications of correlated neuronal noise in decision making circuits for physiology and behavior},
journal = {Frontiers in Neuroscience},
year = {2010},
month = {2},
volume = {Conference Abstract: Computational and Systems Neuroscience 2010},
abstract = {Understanding how the activity of sensory neurons contribute to perceptual decision making is one of the major questions in neuroscience. In the current standard model, the output of opposing pools of noisy, correlated sensory neurons is integrated by downstream neurons whose activity elicits a decision-dependent behavior [1][2]. The predictions of the standard model for empirical measurements like choice probability (CP), psychophysical kernel (PK) and reaction time distribution crucially depend on the spatial and temporal correlations within the pools of sensory neurons. This dependency has so far only been investigated numerically and for time-invariant correlations and variances. However, it has recently been shown that the noise variance undergoes significant changes over the course of the stimulus presentation [3]. The same is true for inter-neuronal correlations that have been shown to change with task and attentional state [4][5]. In the first part of our work we compute analytically the time course of CPs and PKs in the presence of arbitrary noise correlations and variances for the case of non-leaky integration and Gaussian noise. This allows general insights and is especially needed in the light of the experimental transition from single-cell to multi-cell recordings. Then we simulate the implications of realistic noise in several variants of the standard model (leaky and non-leaky integration, integration over the entire stimulus presentation or until a bound, with and without urgency signal) and compare them to physiological data. We find that in the case of non-leaky integration over the entire stimulus duration, the PK only depends on the overall level of noise variance, not its time course. That means that the PK remains constant regardless of the temporal changes in the noise. This finding supports an earlier conclusion that an observed decreasing PK suggests that the brain is not integrating over the entire stimulus duration but only until it has accumulated sufficient evidence, even in the case of no urgency [6]. The time course of the CP, on the other hand, strongly depends on the time course of the noise variances and on the temporal and interneuronal correlations. If noise variance or interneuronal correlation increases, CPs increase as well. This dissociation of PK and CP allows an alternative solution to the puzzle recently posed by [7] in a bottom-up framework by combining integration to a bound with an increase in noise variance/correlation. In addition, we derive how the distribution of reaction times depends on noise variance and correlation, further constraining the model using empirical observations.},
web_url = {http://www.frontiersin.org/10.3389/conf.fnins.2010.03.00023/event_abstract},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2010)},
event_place = {Salt Lake City, UT, USA},
state = {published},
DOI = {10.3389/conf.fnins.2010.03.00023},
author = {Haefner R{rhaefner}{Research Group Computational Vision and Neuroscience}; Gerwinn S{sgerwinn}{Department Empirical Inference}{Research Group Computational Vision and Neuroscience}; Macke J{jakob}; Bethge M{mbethge}}
}
@Conference{ Macke2009,
title = {Modelling correlated populations: Redundancies, spike counts and the effect of common input},
year = {2009},
month = {7},
web_url = {http://www.cin.uni-tuebingen.de/news-events/browse-all-events/detail/view/338/page/3/conference-computational-neuroscience-meeting-2009.html},
event_name = {Computational Neuroscience Meeting 2009},
event_place = {Tübingen, Germany},
state = {published},
author = {Macke J{jakob}}
}
@Conference{ MackeOB2008,
title = {How pairwise correlations aect the redundancy in large
populations of neurons},
journal = {Frontiers in Computational Neuroscience},
year = {2008},
month = {10},
volume = {2008},
number = {Conference Abstract: Bernstein Symposium 2008},
web_url = {http://www.frontiersin.org/community/AbstractDetails.aspx?ABS_DOI=10.3389/conf.neuro.10.2008.01.086&eid=108&sname=Bernstein_Symposium_2008},
event_name = {Bernstein Symposium 2008},
event_place = {München, Germany},
state = {published},
DOI = {10.3389/conf.neuro.10.2008.01.086},
author = {Macke JH{jakob}; Opper M; Bethge M{mbethge}}
}
@Conference{ KuGML2008,
title = {Pattern recognition methods in classifying fMRI data},
year = {2008},
month = {10},
pages = {11},
abstract = {Pattern recognition methods have shown that fMRI data can reveal significant information about brain activity. For example, in the debate of how object{categories are represented in the brain, multivariate analysis has been used to provide evidence of a distributed encoding scheme. Many follow{up studies have employed different methods to analyze human fMRI data with varying degrees of success. In this presentation I would like to discuss and compare four popular pattern recognition methods: correlation analysis,
support{vector machines (SVM), linear discriminant analysis and Gaussian natife Bayes (GNB), using data collected at high field (7T) with higher resolution than usual fMRI
studies. We investigate prediction performance on single trials and for averages across varying numbers of stimulus presentations. The performance of the various algorithms
depends on the nature of the brain activity being categorized: for several tasks, many of the methods work well, whereas for others, no methods perform above chance level. An important factor in overall classiffication performance is careful preprocessing of the data,
including dimensionality reduction, voxel selection, and outlier elimination.},
event_name = {9th Conference of the Junior Neuroscientists of Tübingen (NeNa 2008)},
event_place = {Ellwangen, Germany},
state = {published},
author = {Ku S-P{shipi}; Gretton A{arthur}{Department Empirical Inference}; Macke J{jakob}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}}
}
@Conference{ 5408,
title = {Estimating receptive fields without spike-triggering},
year = {2007},
month = {11},
volume = {37},
number = {768.1},
abstract = {The prevalent means of characterizing stimulus selectivity in sensory neurons is to estimate their receptive field properties such as orientation selectivity. Receptive fields are usually derived from the mean (or covariance) of the spike-triggered stimulus ensemble.
This approach treats each spike as an independent message but ignores the possibility that information might be conveyed through patterns of neural activity that are distributed across space or time.
In the retina for example, visual stimuli are analyzed by several parallel channels with different spatiotemporal filtering properties. How can we define the receptive field of a whole population of neurons, not just a single neuron?
Imaging methods (such as voltage-sensitive dye imaging) yield measurements of neural activity that do not contain spiking events at all. How can receptive fields be derived from this kind of data?
Even for single neurons, there is evidence that multiple features of the neural response, for example spike patterns or latencies, can carry information. How can these features be taken into account in the estimation process?
Here, we address the question of how receptive fields can be calculated from such distributed representations. We seek to identify those stimulus features and the corresponding patterns of neural activity that are most reliably coupled, as measured by the mutual information between the two signals. As an efficient implementation of this strategy, we use an extension of reverse-correlation methods based on canonical correlation analysis [1]. We evaluate our approach using both simulated data and multi-electrode recordings from rabbit retinal ganglion cells [2]. In addition, we show how the model can be extended to capture nonlinear stimulus-response relationships and to test different coding mechanisms using kernel canonical correlation analysis [3].},
web_url = {http://www.sfn.org/am2007/},
event_name = {37th Annual Meeting of the Society for Neuroscience (Neuroscience 2007)},
event_place = {San Diego, CA, USA},
state = {published},
author = {Macke JH{jakob}; Zeck G{gzeck}; Bethge M{mbethge}}
}
@Conference{ Macke2006,
title = {Decision-Images: A tool for identifying critical stimulus features},
year = {2006},
month = {11},
volume = {7},
pages = {10},
abstract = {neurons- during a visual task is an important pre-requisite for computational models of visual cognition. We describe a technique for estimating high-dimensional decision-images, and apply the method to a psychophysical gender discrimination task. The use of regularization makes it possible to map out decision-images using a relatively small number of stimuli.
Statistical analysis of the result shows a remarkable fit to the datasets collected—remarkable, as gender discrimination is a rather high-level visual task, and thus believed to be complex, but our model is conceptually rather simple. We demonstrate that the decision-images are sensitive to subtle changes in lighting, texture, and pose, and to individual differences in gender discrimination exhibited by our subjects.
We show how decision-images can be used to create new stimuli, and how the approach can be generalized to non-linear and multi-scale decision-images. In addition, connections to reverse correlation techniques for receptive field estimation are described.},
event_name = {7th Conference of the Junior Neuroscientists of Tübingen (NeNa 2006)},
event_place = {Oberjoch, Germany},
state = {published},
author = {Macke J{jakob}}
}