@Article{ HosseiniSTB2016,
title = {Inference and mixture modeling with the Elliptical Gamma Distribution},
journal = {Computational Statistics & Data Analysis},
year = {2016},
month = {9},
volume = {101},
pages = {29–43},
abstract = {The authors study modeling and inference with the Elliptical Gamma Distribution (EGD). In particular, Maximum likelihood (ML) estimation for EGD scatter matrices is considered, a task for which the authors present new fixed-point algorithms. The algorithms are shown to be efficient and convergent to global optima despite non-convexity. Moreover, they turn out to be much faster than both a well-known iterative algorithm of Kent & Tyler and sophisticated manifold optimization algorithms. Subsequently, the ML algorithms are invoked as subroutines for estimating parameters of a mixture of EGDs. The performance of the methods is illustrated on the task of modeling natural image statistics—the proposed EGD mixture model yields the most parsimonious model among several competing approaches.},
web_url = {http://www.sciencedirect.com/science/article/pii/S0167947316300251},
state = {published},
DOI = {10.1016/j.csda.2016.02.009},
author = {Hosseini R{hosseini}; Sra S{suvrit}; Theis L{lucas}; Bethge M{mbethge}}
}
@Article{ TheisBFRRBETB2016,
title = {Benchmarking Spike Rate Inference in Population Calcium Imaging},
journal = {Neuron},
year = {2016},
month = {5},
volume = {90},
number = {3},
pages = {471–482},
abstract = {A fundamental challenge in calcium imaging has been to infer spike rates of neurons from the measured noisy fluorescence traces. We systematically evaluate different spike inference algorithms on a large benchmark dataset (>100,000 spikes) recorded from varying neural tissue (V1 and retina) using different calcium indicators (OGB-1 and GCaMP6). In addition, we introduce a new algorithm based on supervised learning in flexible probabilistic models and find that it performs better than other published techniques. Importantly, it outperforms other algorithms even when applied to entirely new datasets for which no simultaneously recorded data is available. Future data acquired in new experimental conditions can be used to further improve the spike prediction accuracy and generalization performance of the model. Finally, we show that comparing algorithms on artificial data is not informative about performance on real data, suggesting that benchmarking different methods with real-world datasets may greatly facilitate future algorithmic developments in neuroscience.},
web_url = {http://www.sciencedirect.com/science/article/pii/S0896627316300733},
state = {published},
DOI = {10.1016/j.neuron.2016.04.014},
author = {Theis L{lucas}{Research Group Computational Vision and Neuroscience}; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Froudarakis E; Reimer J; Rom{\'a}n Ros{\'o}n M; Baden T; Euler T; Tolias AS{atolias}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Article{ WallisBW2016_2,
title = {Testing models of peripheral encoding using metamerism in an oddity paradigm},
journal = {Journal of Vision},
year = {2016},
month = {3},
volume = {16},
number = {2:4},
pages = {1-30},
abstract = {Most of the visual field is peripheral, and the periphery encodes visual input with less fidelity compared to the fovea. What information is encoded, and what is lost in the visual periphery? A systematic way to answer this question is to determine how sensitive the visual system is to different kinds of lossy image changes compared to the unmodified natural scene. If modified images are indiscriminable from the original scene, then the information discarded by the modification is not important for perception under the experimental conditions used. We measured the detectability of modifications of natural image structure using a temporal three-alternative oddity task, in which observers compared modified images to original natural scenes. We consider two lossy image transformations, Gaussian blur and Portilla and Simoncelli texture synthesis. Although our paradigm demonstrates metamerism (physically different images that appear the same) under some conditions, in general we find that humans can be capable of impressive sensitivity to deviations from natural appearance. The representations we examine here do not preserve all the information necessary to match the appearance of natural scenes in the periphery.},
web_url = {http://jov.arvojournals.org/article.aspx?articleid=2503433},
state = {published},
DOI = {10.1167/16.2.4},
author = {Wallis T; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Wichmann FA{felix}}
}
@Article{ CadwellPJBDYRSBTST2015,
title = {Electrophysiological, transcriptomic and morphologic profiling of single neurons using Patch-seq},
journal = {Nature Biotechnology},
year = {2016},
month = {2},
volume = {34},
number = {2},
pages = {199–203},
abstract = {Despite the importance of the mammalian neocortex for complex cognitive processes, we still lack a comprehensive description of its cellular components. To improve the classification of neuronal cell types and the functional characterization of single neurons, we present Patch-seq, a method that combines whole-cell electrophysiological patch-clamp recordings, single-cell RNA-sequencing and morphological characterization. Following electrophysiological characterization, cell contents are aspirated through the patch-clamp pipette and prepared for RNA-sequencing. Using this approach, we generate electrophysiological and molecular profiles of 58 neocortical cells and show that gene expression patterns can be used to infer the morphological and physiological properties such as axonal arborization and action potential amplitude of individual neurons. Our results shed light on the molecular underpinnings of neuronal diversity and suggest that Patch-seq can facilitate the classification of cell types in the nervous system.},
web_url = {http://www.nature.com/nbt/journal/v34/n2/pdf/nbt.3445.pdf},
state = {published},
DOI = {10.1038/nbt.3445},
author = {Cadwell CR; Palasantza A; Jiang X; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Deng Q; Yilmaz M; Reimer J; Shen S; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Tolias KF; Sandberg R; Tolias AS{atolias}}
}
@Article{ EckerDBT2016,
title = {On the Structure of Neuronal Population Activity under Fluctuations in Attentional State},
journal = {Journal of Neuroscience},
year = {2016},
month = {2},
volume = {36},
number = {5},
pages = {1775-1789},
abstract = {Attention is commonly thought to improve behavioral performance by increasing response gain and suppressing shared variability in neuronal populations. However, both the focus and the strength of attention are likely to vary from one experimental trial to the next, thereby inducing response variability unknown to the experimenter. Here we study analytically how fluctuations in attentional state affect the structure of population responses in a simple model of spatial and feature attention. In our model, attention acts on the neural response exclusively by modulating each neuron's gain. Neurons are conditionally independent given the stimulus and the attentional gain, and correlated activity arises only from trial-to-trial fluctuations of the attentional state, which are unknown to the experimenter. We find that this simple model can readily explain many aspects of neural response modulation under attention, such as increased response gain, reduced individual and shared variability, increased correlations with firing rates, limited range correlations, and differential correlations. We therefore suggest that attention may act primarily by increasing response gain of individual neurons without affecting their correlation structure. The experimentally observed reduction in correlations may instead result from reduced variability of the attentional gain when a stimulus is attended. Moreover, we show that attentional gain fluctuations, even if unknown to a downstream readout, do not impair the readout accuracy despite inducing limited-range correlations, whereas fluctuations of the attended feature can in principle limit behavioral performance.},
web_url = {http://www.jneurosci.org/content/36/5/1775.full.pdf+html},
state = {published},
DOI = {10.1523/JNEUROSCI.2044-15.2016},
author = {Ecker AS{aecker}{Department Physiology of Cognitive Processes}; Denfield GH; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Tolias AS{atolias}{Department Physiology of Cognitive Processes}}
}
@Article{ BadenBFRBE2016,
title = {The functional diversity of retinal ganglion cells in the mouse},
journal = {Nature},
year = {2016},
month = {1},
volume = {529},
number = {7586},
pages = {345–350},
abstract = {In the vertebrate visual system, all output of the retina is carried by retinal ganglion cells. Each type encodes distinct visual features in parallel for transmission to the brain. How many such ‘output channels’ exist and what each encodes are areas of intense debate. In the mouse, anatomical estimates range from 15 to 20 channels, and only a handful are functionally understood. By combining two-photon calcium imaging to obtain dense retinal recordings and unsupervised clustering of the resulting sample of more than 11,000 cells, here we show that the mouse retina harbours substantially more than 30 functional output channels. These include all known and several new ganglion cell types, as verified by genetic and anatomical criteria. Therefore, information channels from the mouse eye to the mouse brain are considerably more diverse than shown thus far by anatomical studies, suggesting an encoding strategy resembling that used in state-of-the-art artificial vision systems.},
web_url = {http://www.nature.com/nature/journal/v529/n7586/pdf/nature16468.pdf},
state = {published},
DOI = {10.1038/nature16468},
author = {Baden T; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Franke K; Rom{\'a}n Ros{\'o}n M; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Euler T}
}
@Article{ KummererWB2015,
title = {Information-theoretic model comparison unifies saliency metrics},
journal = {Proceedings of the National Academy of Sciences of the United States of America},
year = {2015},
month = {12},
volume = {112},
number = {52},
pages = {16054–16059},
abstract = {Learning the properties of an image associated with human gaze placement is important both for understanding how biological systems explore the environment and for computer vision applications. There is a large literature on quantitative eye movement models that seeks to predict fixations from images (sometimes termed "saliency" prediction). A major problem known to the field is that existing model comparison metrics give inconsistent results, causing confusion. We argue that the primary reason for these inconsistencies is because different metrics and models use different definitions of what a "saliency map" entails. For example, some metrics expect a model to account for image-independent central fixation bias whereas others will penalize a model that does. Here we bring saliency evaluation into the domain of information by framing fixation prediction models probabilistically and calculating information gain. We jointly optimize the scale, the center bias, and spatial blurring of all models within this framework. Evaluating existing metrics on these rephrased models produces almost perfect agreement in model rankings across the metrics. Model performance is separated from center bias and spatial blurring, avoiding the confounding of these factors in model comparison. We additionally provide a method to show where and how models fail to capture information in the fixations on the pixel level. These methods are readily extended to spatiotemporal models of fixation scanpaths, and we provide a software package to facilitate their use.},
web_url = {http://www.pnas.org/content/112/52/16054.full.pdf},
state = {published},
DOI = {10.1073/pnas.1510393112},
author = {K\"ummerer M{mkuemmerer}{Research Group Computational Vision and Neuroscience}; Wallis TS; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Article{ GatysEB2015_3,
title = {A Neural Algorithm of Artistic Style},
journal = {Nature Communications},
year = {2015},
month = {10},
abstract = {In fine art, especially painting, humans have mastered the skill to create unique visual experiences through composing a complex interplay between the content and style of an image. Thus far the algorithmic basis of this process is unknown and there exists no artificial system with similar capabilities. However, in other key areas of visual perception such as object and face recognition near-human performance was recently demonstrated by a class of biologically inspired vision models called Deep Neural Networks. Here we introduce an artificial system based on a Deep Neural Network that creates artistic images of high perceptual quality. The system uses neural representations to separate and recombine content and style of arbitrary images, providing a neural algorithm for the creation of artistic images. Moreover, in light of the striking similarities between performance-optimised artificial neural networks and biological vision, our work offers a path forward to an algorithmic understanding of how humans create and perceive artistic imagery.},
web_url = {http://arxiv.org/abs/1508.06576},
state = {submitted},
author = {Gatys LA; Ecker AS{aecker}{Department Physiology of Cognitive Processes}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Article{ GatysETB2015_2,
title = {Synaptic unreliability facilitates information transmission in balanced cortical populations},
journal = {Physical Review E},
year = {2015},
month = {6},
volume = {91},
number = {062707},
pages = {1-7},
abstract = {Synaptic unreliability is one of the major sources of biophysical noise in the brain. In the context of neural information processing, it is a central question how neural systems can afford this unreliability. Here we examine how synaptic noise affects signal transmission in cortical circuits, where excitation and inhibition are thought to be tightly balanced. Surprisingly, we find that in this balanced state synaptic response variability actually facilitates information transmission, rather than impairing it. In particular, the transmission of fast-varying signals benefits from synaptic noise, as it instantaneously increases the amount of information shared between presynaptic signal and postsynaptic current. Furthermore we show that the beneficial effect of noise is based on a very general mechanism which contrary to stochastic resonance does not reach an optimum at a finite noise level.
PDFHTML},
web_url = {http://journals.aps.org/pre/pdf/10.1103/PhysRevE.91.062707},
state = {published},
DOI = {10.1103/PhysRevE.91.062707},
author = {Gatys LA; Ecker AS{aecker}{Department Physiology of Cognitive Processes}; Tchumatchenko T; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Article{ LudtkeDTB2015,
title = {A Generative Model of Natural Texture Surrogates},
journal = {-},
year = {2015},
month = {5},
abstract = {Natural images can be viewed as patchworks of different textures, where the local image statistics is roughly stationary within a small neighborhood but otherwise varies from region to region. In order to model this variability, we first applied the parametric texture algorithm of Portilla and Simoncelli to image patches of 64X64 pixels in a large database of natural images such that each image patch is then described by 655 texture parameters which specify certain statistics, such as variances and covariances of wavelet coefficients or coefficient magnitudes within that patch. To model the statistics of these texture parameters, we then developed suitable nonlinear transformations of the parameters that allowed us to fit their joint statistics with a multivariate Gaussian distribution. We find that the first 200 principal components contain more than 99% of the variance and are sufficient to generate textures that are perceptually extremely close to those generated with all 655 components. We demonstrate the usefulness of the model in several ways: (1) We sample ensembles of texture patches that can be directly compared to samples of patches from the natural image database and can to a high degree reproduce their perceptual appearance. (2) We further developed an image compression algorithm which generates surprisingly accurate images at bit rates as low as 0.14 bits/pixel. Finally, (3) We demonstrate how our approach can be used for an efficient and objective evaluation of samples generated with probabilistic models of natural images.},
web_url = {http://arxiv.org/abs/1505.07672},
state = {submitted},
author = {L\"udtke N; Das D; Theis L; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Article{ SinzLGB2014,
title = {Natter: A Python Natural Image Statistics Toolbox},
journal = {Journal of Statistical Software},
year = {2014},
month = {11},
volume = {61},
number = {5},
pages = {1-34},
abstract = {The statistical analysis and modeling of natural images is an important branch of statistics with applications in image signaling, image compression, computer vision, and human perception. Because the space of all possible images is too large to be sampled exhaustively, natural image models must inevitably make assumptions in order to stay tractable. Subsequent model comparison can then ﬁlter out those models that best capture the statistical regularities in natural images. Proper model comparison, however, often requires that the models and the preprocessing of the data match down to the implementation details. Here we present the Natter, a statistical software toolbox for natural images models, that can provide such consistency. The Natter includes powerful but tractable baseline model as well as standardized data preprocessing steps. It has an extensive test suite to ensure correctness of its algorithms, it interfaces to the modular toolkit for data processing toolbox MDP, and provides simple ways to log the results of numerical experiments. Most importantly, its modular structure can be extended by new models with minimal coding eﬀort, thereby providing a platform for the development and comparison of probabilistic models for natural image data.},
web_url = {http://www.jstatsoft.org/v61/i05},
state = {published},
author = {Sinz FH{fabee}{Research Group Computational Vision and Neuroscience}{Research Group Computational Vision and Neuroscience}; Lies J-P; Gerwinn S{sgerwinn}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Article{ HosseiniSTB2014,
title = {Statistical inference with the Elliptical Gamma Distribution},
journal = {Machine Learning},
year = {2014},
month = {10},
abstract = {This paper studies mixture modeling using the Elliptical Gamma distribution (EGD)---a distribution that has parametrized tail and peak behaviors and offers richer modeling power than the multivariate Gaussian. First, we study maximum likelihood (ML) parameter estimation for a single EGD, a task that involves nontrivial conic optimization problems. We solve these problems by developing globally convergent fixed-point methods for them. Next, we consider fitting mixtures of EGDs, for which we first derive a closed-form expression for the KL-divergence between two EGDs and then use it in a "split-and-merge" expectation maximization algorithm. We demonstrate the ability of our proposed mixture modelling in modelling natural image patches.},
web_url = {http://arxiv.org/abs/1410.4812},
state = {submitted},
author = {Hosseini R{hosseini}{Research Group Computational Vision and Neuroscience}; Sra S{suvrit}; Theis L{lucas}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Article{ FroudarakisBECSYSBT2014,
title = {Population code in mouse V1 facilitates readout of natural scenes through increased sparseness},
journal = {Nature Neuroscience},
year = {2014},
month = {6},
volume = {17},
number = {6},
pages = {851–857},
abstract = {Neural codes are believed to have adapted to the statistical properties of the natural environment. However, the principles that govern the organization of ensemble activity in the visual cortex during natural visual input are unknown. We recorded populations of up to 500 neurons in the mouse primary visual cortex and characterized the structure of their activity, comparing responses to natural movies with those to control stimuli. We found that higher order correlations in natural scenes induced a sparser code, in which information is encoded by reliable activation of a smaller set of neurons and can be read out more easily. This computationally advantageous encoding for natural scenes was state-dependent and apparent only in anesthetized and active awake animals, but not during quiet wakefulness. Our results argue for a functional benefit of sparsification that could be a general principle governing the structure of the population activity throughout cortical microcircuits.},
web_url = {http://www.nature.com/neuro/journal/v17/n6/pdf/nn.3707.pdf},
state = {published},
DOI = {10.1038/nn.3707},
author = {Froudarakis E; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Ecker AS{aecker}{Department Physiology of Cognitive Processes}; Cotton RJ; Sinz FH{fabee}{Research Group Computational Vision and Neuroscience}{Research Group Computational Vision and Neuroscience}; Yatsenko D; Saggau P; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Tolias AS{atolias}{Department Physiology of Cognitive Processes}}
}
@Article{ EckerBCSDCSBT2014,
title = {State Dependence of Noise Correlations in Macaque Primary Visual Cortex},
journal = {Neuron},
year = {2014},
month = {4},
volume = {82},
number = {1},
pages = {235–248},
abstract = {Shared, trial-to-trial variability in neuronal populations has a strong impact on the accuracy of information processing in the brain. Estimates of the level of such noise correlations are diverse, ranging from 0.01 to 0.4, with little consensus on which factors account for these differences. Here we addressed one important factor that varied across studies, asking how anesthesia affects the population activity structure in macaque primary visual cortex. We found that under opioid anesthesia, activity was dominated by strong coordinated fluctuations on a timescale of 1–2 Hz, which were mostly absent in awake, fixating monkeys. Accounting for these global fluctuations markedly reduced correlations under anesthesia, matching those observed during wakefulness and reconciling earlier studies conducted under anesthesia and in awake animals. Our results show that internal signals, such as brain state transitions under anesthesia, can induce noise correlations but can also be estimated and accounted for based on neuronal population activity.},
web_url = {http://www.sciencedirect.com/science/article/pii/S0896627314001044},
state = {published},
DOI = {10.1016/j.neuron.2014.02.006},
author = {Ecker AS{aecker}{Department Physiology of Cognitive Processes}; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Cotton RJ; Subramaniyan M; Denfield GH; Cadwell CR; Smirnakis SM{ssmirnakis}{Department Physiology of Cognitive Processes}{Department Physiology of Cognitive Processes}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Tolias AS{atolias}{Department Physiology of Cognitive Processes}}
}
@Article{ LiesHB2014,
title = {Slowness and sparseness have diverging effects on complex cell learning},
journal = {PLoS Computational Biology},
year = {2014},
month = {3},
volume = {10},
number = {3},
pages = {1-11},
abstract = {Following earlier studies which showed that a sparse coding principle may explain the receptive field properties of complex cells in primary visual cortex, it has been concluded that the same properties may be equally derived from a slowness principle. In contrast to this claim, we here show that slowness and sparsity drive the representations towards substantially different receptive field properties. To do so, we present complete sets of basis functions learned with slow subspace analysis (SSA) in case of natural movies as well as translations, rotations, and scalings of natural images. SSA directly parallels independent subspace analysis (ISA) with the only difference that SSA maximizes slowness instead of sparsity. We find a large discrepancy between the filter shapes learned with SSA and ISA. We argue that SSA can be understood as a generalization of the Fourier transform where the power spectrum corresponds to the maximally slow subspace energies in SSA. Finally, we investigate the trade-off between slowness and sparseness when combined in one objective function.},
web_url = {http://www.ploscompbiol.org/article/fetchObject.action;jsessionid=C70AA42BAD1A88382D67F8CAD3571B65?uri=info%3Adoi%2F10.1371%2Fjournal.pcbi.1003468&representation=PDF},
state = {published},
DOI = {10.1371/journal.pcbi.1003468},
EPUB = {e1003468},
author = {Lies J-P; H\"afner RM{rhaefner}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Article{ GerhardB2014,
title = {Towards Rigorous Study of Artistic Style: A New Psychophysical Paradigm},
journal = {Art & Perception},
year = {2014},
month = {2},
volume = {2},
number = {1-2},
pages = {23-44},
abstract = {What makes one artist’s style so different from another’s? How do we perceive these differences? Studying the perception of artistic style has proven difficult. Observers typically view several artworks and must group them or rate similarities between pairs. Responses are often driven by semantic variables, such as scene type or the presence/absence of particular subject matter, which leaves little room for studying how viewers distinguish a Degas ballerina from a Toulouse-Lautrec ballerina, for example. In the current paper, we introduce a new psychophysical paradigm for studying artistic style that focuses on visual qualities and avoids semantic categorization issues by presenting only very local views of a piece, thereby precluding object recognition. The task recasts stylistic judgment in a psychophysical texture discrimination framework, where visual judgments can be rigorously measured for trained and untrained observers alike. Stimuli were a dataset of drawings by Pieter Bruegel the Elder and his imitators studied by the computer science community, which showed that statistical analyses of the drawings’ local content can distinguish an authentic Bruegel from an imitation. Our non-expert observers also successfully discriminated the authentic and inauthentic drawings and furthermore discriminated stylistic variations within the categories, demonstrating the new paradigm’s feasibility for studying artistic style perception. At the same time, however, we discovered several issues in the Bruegel dataset that bear on conclusions drawn by the computer vision studies of artistic style.},
web_url = {http://booksandjournals.brillonline.com/content/journals/10.1163/22134913-00002010},
state = {published},
DOI = {10.1163/22134913-00002010},
author = {Gerhard HE{hgerhard}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Article{ ChagasTSSBS2013,
title = {Functional analysis of ultra high information rates conveyed by rat vibrissal primary afferents},
journal = {Frontiers in Neural Circuits},
year = {2013},
month = {12},
volume = {7},
number = {190},
pages = {1-17},
abstract = {Sensory receptors determine the type and the quantity of information available for perception. Here, we quantified and characterized the information transferred by primary afferents in the rat whisker system using neural system identification. Quantification of “how much” information is conveyed by primary afferents, using the direct method (DM), a classical information theoretic tool, revealed that primary afferents transfer huge amounts of information (up to 529 bits/s). Information theoretic analysis of instantaneous spike-triggered kinematic stimulus features was used to gain functional insight on “what” is coded by primary afferents. Amongst the kinematic variables tested—position, velocity, and acceleration—primary afferent spikes encoded velocity best. The other two variables contributed to information transfer, but only if combined with velocity. We further revealed three additional characteristics that play a role in information transfer by primary afferents. Firstly, primary afferent spikes show preference for well separated multiple stimuli (i.e., well separated sets of combinations of the three instantaneous kinematic variables). Secondly, neurons are sensitive to short strips of the stimulus trajectory (up to 10 ms pre-spike time), and thirdly, they show spike patterns (precise doublet and triplet spiking). In order to deal with these complexities, we used a flexible probabilistic neuron model fitting mixtures of Gaussians to the spike triggered stimulus distributions, which quantitatively captured the contribution of the mentioned features and allowed us to achieve a full functional analysis of the total information rate indicated by the DM. We found that instantaneous position, velocity, and acceleration explained about 50% of the total information rate. Adding a 10 ms pre-spike interval of stimulus trajectory achieved 80–90%. The final 10–20% were found to be due to non-linear coding by spike bursts.},
web_url = {http://www.frontiersin.org/Journal/DownloadFile.ashx?pdf=1&FileId=125135&articleId=56643&Version=1&ContentTypeId=21&FileName=fncir-07-00190.pdf},
state = {published},
DOI = {10.3389/fncir.2013.00190},
author = {Chagas AM; Theis L{lucas}{Research Group Computational Vision and Neuroscience}; Sengupta B{sengupta}{Department Empirical Inference}{Department Physiology of Cognitive Processes}{Research Group Computational Vision and Neuroscience}; St\"uttgen MC; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Schwarz C}
}
@Article{ TheisCASB2013,
title = {Beyond GLMs: A Generative Mixture Modeling Approach to Neural System Identification},
journal = {PLoS Computational Biology},
year = {2013},
month = {11},
volume = {9},
number = {11},
pages = {1-9},
abstract = {Generalized linear models (GLMs) represent a popular choice for the probabilistic characterization of neural spike responses. While GLMs are attractive for their computational tractability, they also impose strong assumptions and thus only allow for a limited range of stimulus-response relationships to be discovered. Alternative approaches exist that make only very weak assumptions but scale poorly to high-dimensional stimulus spaces. Here we seek an approach which can gracefully interpolate between the two extremes. We extend two frequently used special cases of the GLM—a linear and a quadratic model—by assuming that the spike-triggered and non-spike-triggered distributions can be adequately represented using Gaussian mixtures. Because we derive the model from a generative perspective, its components are easy to interpret as they correspond to, for example, the spike-triggered distribution and the interspike interval distribution. The model is able to capture complex dependencies on high-dimensional stimuli with far fewer parameters than other approaches such as histogram-based methods. The added flexibility comes at the cost of a non-concave log-likelihood. We show that in practice this does not have to be an issue and the mixture-based model is able to outperform generalized linear and quadratic models.},
web_url = {http://www.ploscompbiol.org/article/fetchObject.action;jsessionid=1950DEF2F763DABCACA00DF2E02CCD34?uri=info%3Adoi%2F10.1371%2Fjournal.pcbi.1003356&representation=PDF},
state = {published},
DOI = {10.1371/journal.pcbi.1003356},
EPUB = {e1003356},
author = {Theis L{lucas}{Research Group Computational Vision and Neuroscience}; Chagas AM; Arnstein D{darnstein}{Research Group Computational Vision and Neuroscience}; Schwarz C; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Article{ SinzB2013,
title = {What Is the Limit of Redundancy Reduction with Divisive Normalization?},
journal = {Neural Computation},
year = {2013},
month = {11},
volume = {25},
number = {11},
pages = {2809-2814},
abstract = {Divisive normalization has been proposed as a nonlinear redundancy reduction mechanism capturing contrast correlations. Its basic function is a radial rescaling of the population response. Because of the saturation of divisive normalization, however, it is impossible to achieve a fully independent representation. In this letter, we derive an analytical upper bound on the inevitable residual redundancy of any saturating radial rescaling mechanism.},
web_url = {http://www.mitpressjournals.org/doi/pdf/10.1162/NECO_a_00505},
state = {published},
DOI = {10.1162/NECO_a_00505},
author = {Sinz F{fabee}{Research Group Computational Vision and Neuroscience}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Article{ HaefnerGMB2013,
title = {Inferring decoding strategies from choice probabilities in the presence of correlated variability},
journal = {Nature Neuroscience},
year = {2013},
month = {2},
volume = {16},
number = {2},
pages = {235–242},
abstract = {The activity of cortical neurons in sensory areas covaries with perceptual decisions, a relationship that is often quantified by choice probabilities. Although choice probabilities have been measured extensively, their interpretation has remained fraught with difficulty. We derive the mathematical relationship between choice probabilities, read-out weights and correlated variability in the standard neural decision-making model. Our solution allowed us to prove and generalize earlier observations on the basis of numerical simulations and to derive new predictions. Notably, our results indicate how the read-out weight profile, or decoding strategy, can be inferred from experimentally measurable quantities. Furthermore, we developed a test to decide whether the decoding weights of individual neurons are optimal for the task, even without knowing the underlying correlations. We confirmed the practicality of our approach using simulated data from a realistic population model. Thus, our findings provide a theoretical foundation for a growing body of experimental results on choice probabilities and correlations.},
web_url = {http://www.nature.com/neuro/journal/v16/n2/pdf/nn.3309.pdf},
state = {published},
DOI = {10.1038/nn.3309},
author = {Haefner RM{rhaefner}{Research Group Computational Vision and Neuroscience}; Gerwinn S{sgerwinn}{Department Empirical Inference}{Research Group Computational Vision and Neuroscience}; Macke JH{jakob}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Article{ GerhardWB2013,
title = {How sensitive is the human visual system to the local statistics of natural images?},
journal = {PLoS Computational Biology},
year = {2013},
month = {1},
volume = {9},
number = {1},
pages = {1-15},
abstract = {A key hypothesis in sensory system neuroscience is that sensory representations are adapted to the statistical regularities in sensory signals and thereby incorporate knowledge about the outside world. Supporting this hypothesis, several probabilistic models of local natural image regularities have been proposed that reproduce neural response properties. Although many such physiological links have been made, these models have not been linked directly to visual sensitivity. Previous psychophysical studies of sensitivity to natural image regularities focus on global perception of large images, but much less is known about sensitivity to local natural image regularities. We present a new paradigm for controlled psychophysical studies of local natural image regularities and compare how well such models capture perceptually relevant image content. To produce stimuli with precise statistics, we start with a set of patches cut from natural images and alter their content to generate a matched set whose joint statistics are equally likely under a probabilistic natural image model. The task is forced choice to discriminate natural patches from model patches. The results show that human observers can learn to discriminate the higher-order regularities in natural images from those of model samples after very few exposures and that no current model is perfect for patches as small as 5 by 5 pixels or larger. Discrimination performance was accurately predicted by model likelihood, an information theoretic measure of model efficacy, indicating that the visual system possesses a surprisingly detailed knowledge of natural image higher-order correlations, much more so than current image models. We also perform three cue identification experiments to interpret how model features correspond to perceptually relevant image features.},
web_url = {http://www.ploscompbiol.org/article/fetchObjectAttachment.action;jsessionid=3383816275C3FD37ED05174B95B4B9D2?uri=info%3Adoi%2F10.1371%2Fjournal.pcbi.1002873&representation=PDF},
state = {published},
DOI = {10.1371/journal.pcbi.1002873},
EPUB = {e1002873},
author = {Gerhard HE{hgerhard}{Research Group Computational Vision and Neuroscience}; Wichmann FA{felix}{Department Empirical Inference}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Article{ BadenBBE2013,
title = {Spikes in Mammalian Bipolar Cells Support Temporal Layering of the Inner Retina},
journal = {Current Biology},
year = {2013},
month = {1},
volume = {23},
number = {1},
pages = {48–52},
abstract = {In the mammalian retina, 10–12 different cone bipolar cell (BC) types decompose the photoreceptor signal into parallel channels [1, 2, 3, 4, 5, 6, 7 and 8], providing the basis for the functional diversity of retinal ganglion cells (RGCs) [9]. BCs differing in their temporal properties appear to project to different strata of the retina’s inner synaptic layer [10 and 11], based on somatic recordings of BCs [1, 2, 4, 12, 13 and 14] and excitatory synaptic currents measured in RGCs [10]. However, postsynaptic currents in RGCs are influenced by dendritic morphology [15 and 16] and receptor types [17], and the BC signal can be transformed at the axon terminals both through interactions with amacrine cells [18 and 19] and through the generation of all-or-nothing spikes [20, 21, 22, 23 and 24]. Therefore, the temporal properties of the BC output have not been analyzed systematically across different types of mammalian BCs. We recorded calcium signals directly within axon terminals using two-photon imaging [25 and 26] and show that BCs can be divided into ≥eight functional clusters. The temporal properties of the BC output were directly reflected in their anatomical organization within the retina’s inner synaptic layer: faster cells stratified closer to the border between ON and OFF sublamina. Moreover, ≥three fastest groups generated clear all-or-nothing spikes. Therefore, the systematic projection pattern of BCs provides distinct temporal “building blocks” for the feature extracting circuits of the inner retina.},
web_url = {http://www.sciencedirect.com/science/article/pii/S0960982212013152},
state = {published},
DOI = {10.1016/j.cub.2012.11.006},
author = {Baden T; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Euler T}
}
@Article{ SinzB2013_2,
title = {Temporal Adaptation Enhances Efficient Contrast Gain Control on Natural Images},
journal = {PLoS Computational Biology},
year = {2013},
month = {1},
volume = {9},
number = {1},
pages = {1-13},
abstract = {Divisive normalization in primary visual cortex has been linked to adaptation to natural image statistics in accordance to Barlow's redundancy reduction hypothesis. Using recent advances in natural image modeling, we show that the previously studied static model of divisive normalization is rather inefficient in reducing local contrast correlations, but that a simple temporal contrast adaptation mechanism of the half-saturation constant can substantially increase its efficiency. Our findings reveal the experimentally observed temporal dynamics of divisive normalization to be critical for redundancy reduction.},
web_url = {http://www.ploscompbiol.org/article/fetchObject.action?uri=info%3Adoi%2F10.1371%2Fjournal.pcbi.1002889&representation=PDF},
state = {published},
DOI = {10.1371/journal.pcbi.1002889},
EPUB = {e1002889},
author = {Sinz F{fabee}{Research Group Computational Vision and Neuroscience}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Article{ BerensECMBT2012,
title = {A Fast and Simple Population Code for Orientation in Primate V1},
journal = {Journal of Neuroscience},
year = {2012},
month = {8},
volume = {32},
number = {31},
pages = {10618-10626},
abstract = {Orientation tuning has been a classic model for understanding single-neuron computation in the neocortex. However, little is known about how orientation can be read out from the activity of neural populations, in particular in alert animals. Our study is a first step toward that goal. We recorded from up to 20 well isolated single neurons in the primary visual cortex of alert macaques simultaneously and applied a simple, neurally plausible decoder to read out the population code. We focus on two questions: First, what are the time course and the timescale at which orientation can be read out from the population response? Second, how complex does the decoding mechanism in a downstream neuron have to be to reliably discriminate between visual stimuli with different orientations? We show that the neural ensembles in primary visual cortex of awake macaques represent orientation in a way that facilitates a fast and simple readout mechanism: With an average latency of 30–80 ms, the population code can be read out instantaneously with a short integration time of only tens of milliseconds, and neither stimulus contrast nor correlations need to be taken into account to compute the optimal synaptic weight pattern. Our study shows that—similar to the case of single-neuron computation—the representation of orientation in the spike patterns of neural populations can serve as an exemplary case for understanding the computations performed by neural ensembles underlying visual processing during behavior.},
web_url = {http://www.jneurosci.org/content/32/31/10618.full.pdf+html},
state = {published},
DOI = {10.1523/JNEUROSCI.1335-12.2012},
author = {Berens P; Ecker AS{aecker}{Department Physiology of Cognitive Processes}; Cotton RJ; Ma WJ; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Tolias AS{atolias}{Department Physiology of Cognitive Processes}}
}
@Article{ TheisHB2012_2,
title = {Mixtures of Conditional Gaussian Scale Mixtures Applied to Multiscale Image Representations},
journal = {PLoS ONE},
year = {2012},
month = {7},
volume = {7},
number = {7},
pages = {1-8},
abstract = {We present a probabilistic model for natural images that is based on mixtures of Gaussian scale mixtures and a simple multiscale representation. We show that it is able to generate images with interesting higher-order correlations when trained on natural images or samples from an occlusion-based model. More importantly, our multiscale model allows for a principled evaluation. While it is easy to generate visually appealing images, we demonstrate that our model also yields the best performance reported to date when evaluated with respect to the cross-entropy rate, a measure tightly linked to the average log-likelihood. The ability to quantitatively evaluate our model differentiates it from other multiscale models, for which evaluation of these kinds of measures is usually intractable.},
web_url = {http://www.plosone.org/article/fetchObject.action?uri=info%3Adoi%2F10.1371%2Fjournal.pone.0039857&representation=PDF},
state = {published},
DOI = {10.1371/journal.pone.0039857},
EPUB = {e39857},
author = {Theis L{lucas}{Research Group Computational Vision and Neuroscience}; Hosseini R{hosseini}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Article{ PutzeysBWWG2012,
title = {A New Perceptual Bias Reveals Suboptimal Population Decoding of Sensory Responses},
journal = {PLoS Computational Biology},
year = {2012},
month = {4},
volume = {8},
number = {4},
pages = {1-13},
abstract = {Several studies have reported optimal population decoding of sensory responses in two-alternative visual discrimination tasks. Such decoding involves integrating noisy neural responses into a more reliable representation of the likelihood that the stimuli under consideration evoked the observed responses. Importantly, an ideal observer must be able to evaluate likelihood with high precision and only consider the likelihood of the two relevant stimuli involved in the discrimination task. We report a new perceptual bias suggesting that observers read out the likelihood representation with remarkably low precision when discriminating grating spatial frequencies. Using spectrally filtered noise, we induced an asymmetry in the likelihood function of spatial frequency. This manipulation mainly affects the likelihood of spatial frequencies that are irrelevant to the task at hand. Nevertheless, we find a significant shift in perceived grating frequency, indicating that observers evaluate likelihoods of a broad range of irrelevant frequencies and discard prior knowledge of stimulus alternatives when performing two-alternative discrimination.},
web_url = {http://www.ploscompbiol.org/article/fetchObjectAttachment.action;jsessionid=A1B286EF07386D526FFB894EF5E7644C?uri=info%3Adoi%2F10.1371%2Fjournal.pcbi.1002453&representation=PDF},
state = {published},
DOI = {10.1371/journal.pcbi.1002453},
EPUB = {e1002453},
author = {Putzeys T; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Wichmann F{felix}{Department Empirical Inference}; Wagemans J; Goris R}
}
@Article{ TheisGSB2011,
title = {In All Likelihood, Deep Belief Is Not Enough},
journal = {Journal of Machine Learning Research},
year = {2011},
month = {11},
volume = {12},
pages = {3071-3096},
abstract = {Statistical models of natural images provide an important tool for researchers in the fields of machine learning and computational neuroscience. The canonical measure to quantitatively assess and compare the performance of statistical models is given by the likelihood. One class of statistical models which has recently gained increasing popularity and has been applied to a variety of complex data is formed by deep belief networks. Analyses of these models, however, have often been limited to qualitative analyses based on samples due to the computationally intractable nature of their likelihood. Motivated by these circumstances, the present article introduces a consistent estimator for the likelihood of deep belief networks which is computationally tractable and simple to apply in practice. Using this estimator, we quantitatively investigate a deep belief network for natural image patches and compare its performance to the performance of other models for natural image patches. We find that the deep belief network is outperformed with respect to the likelihood even by very simple mixture models.},
web_url = {http://jmlr.csail.mit.edu/papers/v12/theis11a.html},
state = {published},
author = {Theis L{lucas}{Research Group Computational Vision and Neuroscience}; Gerwinn S{sgerwinn}{Research Group Computational Vision and Neuroscience}; Sinz F{fabee}{Research Group Computational Vision and Neuroscience}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Article{ EckerBTB2011,
title = {The effect of noise correlations in populations of diversely tuned neurons},
journal = {Journal of Neuroscience},
year = {2011},
month = {10},
volume = {31},
number = {40},
pages = {14272-14283},
abstract = {The amount of information encoded by networks of neurons critically depends on the correlation structure of their activity. Neurons with similar stimulus preferences tend to have higher noise correlations than others. In homogeneous populations of neurons, this limited range correlation structure is highly detrimental to the accuracy of a population code. Therefore, reduced spike count correlations under attention, after adaptation, or after learning have been interpreted as evidence for a more efficient population code. Here, we analyze the role of limited range correlations in more realistic, heterogeneous population models. We use Fisher information and maximum-likelihood decoding to show that reduced correlations do not necessarily improve encoding accuracy. In fact, in populations with more than a few hundred neurons, increasing the level of limited range correlations can substantially improve encoding accuracy. We found that this improvement results from a decrease in noise entropy that is associated with increasing correlations if the marginal distributions are unchanged. Surprisingly, for constant noise entropy and in the limit of large populations, the encoding accuracy is independent of both structure and magnitude of noise correlations.},
web_url = {http://www.jneurosci.org/content/31/40/14272.full.pdf+html},
state = {published},
DOI = {10.1523/JNEUROSCI.2539-11.2011},
author = {Ecker AS{aecker}{Department Physiology of Cognitive Processes}; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Tolias AS{atolias}{Department Physiology of Cognitive Processes}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Article{ KitchingAGHHMRSVBBBBCGHHHKKKMMNPRRSSSTVvWW2011,
title = {Gravitational Lensing Accuracy Testing 2010 (GREAT10) Challenge Handbook},
journal = {Annals of Applied Statistics},
year = {2011},
month = {9},
volume = {5},
number = {3},
pages = {2231-2263},
abstract = {GRavitational lEnsing Accuracy Testing 2010 (GREAT10) is a public image analysis challenge aimed at the development of algorithms to analyze astronomical images. Specifically, the challenge is to measure varying image distortions in the presence of a variable convolution kernel, pixelization and noise. This is the second in a series of challenges set to the astronomy, computer science and statistics communities, providing a structured environment in which methods can be improved and tested in preparation for planned astronomical surveys. GREAT10 extends upon previous work by introducing variable fields into the challenge. The “Galaxy Challenge” involves the precise measurement of galaxy shape distortions, quantified locally by two parameters called shear, in the presence of a known convolution kernel. Crucially, the convolution kernel and the simulated gravitational lensing shape distortion both now vary as a function of position within the images, as is the case for real data. In addition, we introduce the “Star Challenge” that concerns the reconstruction of a variable convolution kernel, similar to that in a typical astronomical observation. This document details the GREAT10 Challenge for potential participants. Continually updated information is also available from www.greatchallenges.info.},
file_url = {fileadmin/user_upload/files/publications/2011/GREAT10.pdf},
web_url = {http://projecteuclid.org/euclid.aoas/1318514302},
state = {published},
DOI = {10.1214/11-AOAS484},
author = {Kitching T; Amara A; Gill M; Harmeling S{harmeling}{Department Empirical Inference}; Heymans C; Massey R; Rowe B; Schrabback T; Voigt L; Balan S; Bernstein G; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Bridle S; Courbin F; Gentile M; Heavens A; Hirsch M{mhirsch}{Department Empirical Inference}; Hosseini R{hosseini}{Research Group Computational Vision and Neuroscience}; Kiessling A; Kirk D; Kuijken K; Mandelbaum R; Moghaddam B; Nurbaeva G; Paulin-Henriksson S; Rassat A; Rhodes J; Sch\"olkopf B{bs}{Department Empirical Inference}; Shawe-Taylor J; Shmakova M; Taylor A; Velander M; van Waerbeke L; Witherick D; Wittman D}
}
@Article{ MackeBb2011,
title = {Statistical analysis of multi-cell recordings: linking population coding models to experimental data},
journal = {Frontiers in Computational Neuroscience},
year = {2011},
month = {7},
volume = {5},
number = {35},
pages = {1-2},
abstract = {Modern recording techniques such as multi-electrode arrays and two-photon imaging methods are capable of simultaneously monitoring the activity of large neuronal ensembles at single cell resolution. These methods finally give us the means to address some of the most crucial questions in systems neuroscience: what are the dynamics of neural population activity? How do populations of neurons perform computations? What is the functional organization of neural ensembles?
While the wealth of new experimental data generated by these techniques provides exciting opportunities to test ideas about how neural ensembles operate, it also provides major challenges: multi-cell recordings necessarily yield data which is high-dimensional in nature. Understanding this kind of data requires powerful statistical techniques for capturing the structure of the neural population responses, as well as their relationship with external stimuli or behavioral observations. Furthermore, linking recorded neural population activity to the predictions of theoretical models of population coding has turned out not to be straightforward.
These challenges motivated us to organize a workshop at the 2009 Computational Neuroscience Meeting in Berlin to discuss these issues. In order to collect some of the recent progress in this field, and to foster discussion on the most important directions and most pressing questions, we issued a call for papers for this Research Topic. We asked authors to address the following four questions:
1. What classes of statistical methods are most useful for modeling population activity?
2. What are the main limitations of current approaches, and what can be done to overcome them?
3. How can statistical methods be used to empirically test existing models of (probabilistic) population coding?
4. What role can statistical methods play in formulating novel hypotheses about the principles of information processing in neural populations?
A total of 15 papers addressing questions related to these themes are now collected in this Research Topic. Three of these articles have resulted in “Focused reviews” in Frontiers in Neuroscience (Crumiller et al., 2011; Rosenbaum et al., 2011; Tchumatchenko et al., 2011), illustrating the great interest in the topic. Many of the articles are devoted to a better understanding of how correlations arise in neural circuits, and how they can be detected, modeled, and interpreted. For example, by modeling how pairwise correlations are transformed by spiking non-linearities in simple neural circuits, Tchumatchenko et al. (2010) show that pairwise correlation coefficients have to be interpreted with care, since their magnitude can depend strongly on the temporal statistics of their input-correlations. In a similar spirit, Rosenbaum et al. (2010) study how correlations can arise and accumulate in feed-forward circuits as a result of pooling of correlated inputs.
Lyamzin et al. (2010) and Krumin et al. (2010) present methods for simulating correlated population activity and extend previous work to more general settings. The method of Lyamzin et al. (2010) allows one to generate synthetic spike trains which match commonly reported statistical properties, such as time varying firing rates as well signal and noise correlations. The Hawkes framework presented by Krumin et al. (2010) allows one to fit models of recurrent population activity to the correlation-structure of experimental data. Louis et al. (2010) present a novel method for generating surrogate spike trains which can be useful when trying to assess the significance and time-scale of correlations in neural spike trains. Finally, Pipa and Munk (2011) study spike synchronization in prefrontal cortex during working memory.
A number of studies are also devoted to advancing our methodological toolkit for analyzing various aspects of population activity (Gerwinn et al., 2010; Machens, 2010; Staude et al., 2010; Yu et al., 2010). For example, Gerwinn et al. (2010) explain how full probabilistic inference can be performed in the popular model class of generalized linear models (GLMs), and study the effect of using prior distributions on the parameters of the stimulus and coupling filters. Staude et al. (2010) extend a method for detecting higher-order correlations between neurons via population spike counts to non-stationary settings. Yu et al. (2010) describe a new technique for estimating the information rate of a population of neurons using frequency-domain methods. Machens (2010) introduces a novel extension of principal component analysis for separating the variability of a neural response into different sources.
Focusing less on the spike responses of neural populations but on aggregate signals of population activity, Boatman-Reich et al. (2010) and Hoerzer et al. (2010) describe methods for a quantitative analysis of field potential recordings. While Boatman-Reich et al. (2010) discuss a number of existing techniques in a unified framework and highlight the potential pitfalls associated with such approaches, Hoerzer et al. (2010) demonstrate how multivariate autoregressive models and the concept of Granger causality can be used to infer local functional connectivity in area V4 of behaving macaques.
A final group of studies is devoted to understanding experimental data in light of computational models (Galán et al., 2010; Pandarinath et al., 2010; Shteingart et al., 2010). Pandarinath et al. (2010) present a novel mechanism that may explain how neural networks in the retina switch from one state to another by a change in gap junction coupling, and conjecture that this mechanism might also be found in other neural circuits. Galán et al. (2010) present a model of how hypoxia may change the network structure in the respiratory networks in the brainstem, and analyze neural correlations in multi-electrode recordings in light of this model. Finally, Shteingart et al. (2010) show that the spontaneous activation sequences they find in cultured networks cannot be explained by Zipf’s law, but rather require a wrestling model.
The papers of this Research Topic thus span a wide range of topics in the statistical modeling of multi-cell recordings. Together with other recent advances, they provide us with a useful toolkit to tackle the challenges presented by the vast amount of data collected with modern recording techniques. The impact of novel statistical methods on the field and their potential to generate scientific progress, however, depends critically on how readily they can be adopted and applied by laboratories and researchers working with experimental data. An important step toward this goal is to also publish computer code along with the articles (Barnes, 2010) as a successful implementation of advanced methods also relies on many details which are hard to communicate in the article itself. In this way it becomes much more likely that other researchers can actually use the methods, and unnecessary re-implementations can be avoided. Some of the papers in this Research Topic already follow this goal (Gerwinn et al., 2010; Louis et al., 2010; Lyamzin et al., 2010). We hope that this practice becomes more and more common in the future and encourage authors and editors of Research Topics to make as much code available as possible, ideally in a format that can be easily integrated with existing software sharing initiatives (Herz et al., 2008; Goldberg et al., 2009).},
web_url = {http://www.frontiersin.org/Computational_Neuroscience/10.3389/fncom.2011.00035/full},
state = {published},
DOI = {10.3389/fncom.2011.00035},
author = {Macke J{jakob}; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Article{ MackeOb2011,
title = {Common Input Explains Higher-Order Correlations and Entropy in a Simple Model of Neural Population Activity},
journal = {Physical Review Letters},
year = {2011},
month = {5},
volume = {106},
number = {20},
pages = {1-4},
abstract = {Simultaneously recorded neurons exhibit correlations whose underlying causes are not known. Here, we use a population of threshold neurons receiving correlated inputs to model neural population recordings. We show analytically that small changes in second-order correlations can lead to large changes in higher-order redundancies, and that the resulting interactions have a strong impact on the entropy, sparsity, and statistical heat capacity of the population. Our findings for this simple model may explain some surprising effects recently observed in neural population recordings.},
web_url = {http://prl.aps.org/pdf/PRL/v106/i20/e208102},
state = {published},
DOI = {10.1103/PhysRevLett.106.208102},
EPUB = {208102},
author = {Macke JH{jakob}; Opper M; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Article{ 6516,
title = {Gaussian process methods for estimating cortical maps},
journal = {NeuroImage},
year = {2011},
month = {5},
volume = {56},
number = {2},
pages = {570-581},
abstract = {A striking feature of cortical organization is that the encoding of many stimulus features, for example orientation or direction selectivity, is arranged into topographic maps. Functional imaging methods such as optical imaging of intrinsic signals, voltage sensitive dye imaging or functional magnetic resonance imaging are important tools for studying the structure of cortical maps. As functional imaging measurements are usually noisy, statistical processing of the data is necessary to extract maps from the imaging data. We here present a probabilistic model of functional imaging data based on Gaussian processes. In comparison to conventional approaches, our model yields superior estimates of cortical maps from smaller amounts of data. In addition, we obtain quantitative uncertainty estimates, i.e. error bars on properties of the estimated map. We use our probabilistic model to study the coding properties of the map and the role of noise-correlations by decoding the stimulus from single trials of an imaging experiment.},
web_url = {http://www.sciencedirect.com/science?_ob=MImg&_imagekey=B6WNP-5032NNX-1-3&_cdi=6968&_user=29041&_pii=S1053811910007007&_origin=&_coverDate=05%2F15%2F2011&_sk=999439997&view=c&wchp=dGLbVlz-zSkWl&md5=17cff103ca4f9e756eee9e6711fca3e4&ie=/sdarticle.pdf},
state = {published},
DOI = {10.1016/j.neuroimage.2010.04.272},
author = {Macke JH{jakob}; Gerwinn S{sgerwinn}{Research Group Computational Vision and Neuroscience}; White LW; Kaschube M; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Article{ BerensEGTB2011,
title = {Reassessing optimal neural population codes with neurometric functions},
journal = {Proceedings of the National Academy of Sciences of the United States of America},
year = {2011},
month = {3},
volume = {108},
number = {11},
pages = {4423-4428},
abstract = {Cortical circuits perform the computations underlying rapid perceptual decisions within a few dozen milliseconds with each neuron emitting only a few spikes. Under these conditions, the theoretical analysis of neural population codes is challenging, as the most commonly used theoretical tool—Fisher information—can lead to erroneous conclusions about the optimality of different coding schemes. Here we revisit the effect of tuning function width and correlation structure on neural population codes based on ideal observer analysis in both a discrimination and a reconstruction task. We show that the optimal tuning function width and the optimal correlation structure in both paradigms strongly depend on the available decoding time in a very similar way. In contrast, population codes optimized for Fisher information do not depend on decoding time and are severely suboptimal when only few spikes are available. In addition, we use the neurometric functions of the ideal observer in the classification task to investigate the differential coding properties of these Fisher-optimal codes for fine and coarse discrimination. We find that the discrimination error for these codes does not decrease to zero with increasing population size, even in simple coarse discrimination tasks. Our results suggest that quite different population codes may be optimal for rapid decoding in cortical computations than those inferred from the optimization of Fisher information.},
web_url = {http://www.pnas.org/content/108/11/4423.full.pdf+html},
state = {published},
DOI = {10.1073/pnas.1015904108},
author = {Berens P{berens}{Research Group Computational Vision and Neuroscience}; Ecker AS{aecker}{Department Physiology of Cognitive Processes}; Gerwinn S{sgerwinn}{Research Group Computational Vision and Neuroscience}; Tolias AS{atolias}{Department Physiology of Cognitive Processes}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Article{ 7040,
title = {Reconstructing stimuli from the spike-times of leaky integrate and fire neurons},
journal = {Frontiers in Neuroscience},
year = {2011},
month = {2},
volume = {5},
number = {1},
pages = {1-16},
abstract = {Reconstructing stimuli from the spike trains of neurons is an important approach for understanding the neural code. One of the difficulties associated with this task is that signals which are varying continuously in time are encoded into sequences of discrete events or spikes. An important problem is to determine how much information about the continuously varying stimulus can be extracted from the time-points at which spikes were observed, especially if these time-points are subject to some sort of randomness. For the special case of spike trains generated by leaky integrate and fire neurons, noise can be introduced by allowing variations in the threshold every time a spike is released. A simple decoding algorithm previously derived for the noiseless case can be extended to the stochastic case, but turns out to be biased. Here, we review a solution to this problem, by presenting a simple yet efficient algorithm which greatly reduces the bias, and therefore leads to better decoding performance in the stochastic case.},
web_url = {http://www.frontiersin.org/Neuroscience/10.3389/fnins.2011.00001/abstract},
state = {published},
DOI = {10.3389/fnins.2011.00001},
EPUB = {1-9},
author = {Gerwinn S{sgerwinn}{Research Group Computational Vision and Neuroscience}; Macke JH{jakob}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Article{ 6823,
title = {Lp-Nested Symmetric Distributions},
journal = {Journal of Machine Learning Research},
year = {2010},
month = {12},
volume = {11},
pages = {3409-3451},
file_url = {/fileadmin/user_upload/files/publications/SinzBethge2010aArchivX_[0].pdf},
web_url = {http://jmlr.csail.mit.edu/papers/volume11/sinz10a/sinz10a.pdf},
state = {published},
author = {Sinz F{fabee}{Research Group Computational Vision and Neuroscience}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Article{ 6687,
title = {Lower bounds on the redundancy of natural images},
journal = {Vision Research},
year = {2010},
month = {10},
volume = {50},
number = {22},
pages = {2213-2222},
abstract = {The light intensities of natural images exhibit a high degree of redundancy. Knowing the exact amount of their statistical dependencies is important for biological vision as well as compression and coding applications but estimating the total amount of redundancy, the multi-information, is intrinsically hard. The common approach is to estimate the multi-information for patches of increasing sizes and divide by the number of pixels. Here, we show that the limiting value of this sequence---the multi-information rate---can be better estimated by using another limiting process based on measuring the mutual information between a pixel and a causal neighborhood of increasing size around it. Although in principle this method has been known for decades, its superiority for estimating the multi-information rate of natural images has not been fully exploited yet. Either method provides a lower bound on the multi-information rate, but the mutual information based sequence converges much faster to the multi-information r
ate than the conventional method does. Using this fact, we provide improved estimates of the multi-information rate of natural images and a better understanding of its underlying spatial structure.},
file_url = {/fileadmin/user_upload/files/publications/HosseiniEtAl2009_[0].pdf},
web_url = {http://www.sciencedirect.com/science?_ob=MImg&_imagekey=B6T0W-50RP1WF-1-4D&_cdi=4873&_user=29041&_pii=S004269891000372X&_origin=search&_coverDate=10%2F28%2F2010&_sk=999499977&view=c&w},
state = {published},
DOI = {10.1016/j.visres.2010.07.025},
author = {Hosseini R{hosseini}{Research Group Computational Vision and Neuroscience}; Sinz FH{fabee}{Research Group Computational Vision and Neuroscience}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Article{ 6340,
title = {Results of the GREAT08 Challenge: An image analysis competition for cosmological lensing},
journal = {Monthly Notices of the Royal Astronomical Society},
year = {2010},
month = {7},
volume = {405},
number = {3},
pages = {2044-2061},
abstract = {We present the results of the GREAT08 Challenge, a blind analysis challenge to infer weak gravitational lensing shear distortions from images. The primary goal was to
stimulate new ideas by presenting the problem to researchers outside the shear measurement community. Six GREAT08 Team methods were presented at the launch of
the Challenge and five additional groups submitted results during the 6 month competition. Participants analyzed 30 million simulated galaxies with a range in signal to
noise ratio, point-spread function ellipticity, galaxy size, and galaxy type. The large quantity of simulations allowed shear measurement methods to be assessed at a level
of accuracy suitable for currently planned future cosmic shear observations for the first time. Different methods perform well in different parts of simulation parameter space and come close to the target level of accuracy in several of these. A number of fresh ideas have emerged as a result of the Challenge including a re-examination of the process of combining information from different galaxies, which reduces the dependence on realistic galaxy modelling. The image simulations will become increasingly sophis-
ticated in future GREAT challenges, meanwhile the GREAT08 simulations remain as a benchmark for additional developments in shear measurement algorithms.},
web_url = {http://www3.interscience.wiley.com/cgi-bin/fulltext/123456253/PDFSTART},
state = {published},
DOI = {10.1111/j.1365-2966.2010.16598.x},
author = {Bridle S; Balan ST; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Gentile M; Harmeling S{harmeling}{Department Empirical Inference}; Heymans C; Hirsch M{mhirsch}{Department Empirical Inference}; Hosseini R{hosseini}{Research Group Computational Vision and Neuroscience}; Jarvis M; Kirk D; Kitching T; Kuijken K; Lewis A; Paulin-Henriksson S; Sch\"olkopf B{bs}{Department Empirical Inference}; Velander M; Voigt L; Witherick D; Amara A; Bernstein G; Courbin F; Gill M; Heavens A; Mandelbaum R; Massey R; Moghaddam B; Rassat A; Refregier A; Rhodes J; Schrabback T; Shawe-Taylor J; Shmakova M; van Waerbeke L; Wittman D}
}
@Article{ 6502,
title = {Bayesian inference for generalized linear models for spiking neurons},
journal = {Frontiers in Computational Neuroscience},
year = {2010},
month = {4},
volume = {4},
number = {12},
pages = {1-17},
abstract = {Generalized Linear Models (GLMs) are commonly used statistical methods for modelling the relationship between neural population activity and presented stimuli. When the dimension of the parameter space is large, strong regularization has to be used in order to fit GLMs to datasets of realistic size without overfitting. By imposing properly chosen priors over parameters, Bayesian inference provides an effective and principled approach for achieving regularization. Here we show how the posterior distribution over model parameters of GLMs can be approximated by a Gaussian using the Expectation Propagation algorithm. In this way, we obtain an estimate of the posterior mean and posterior covariance, allowing us to calculate Bayesian confidence intervals that characterize the uncertainty about the optimal solution. From the posterior we also obtain a different point estimate, namely the posterior mean as opposed to the commonly used maximum a posteriori estimate. We systematically compare the different inference techniques on simulated as well as on multi-electrode recordings of retinal ganglion cells, and explore the effects of the chosen prior and the performance measure used. We find that good performance can be achieved by choosing an Laplace prior together with the posterior mean estimate.},
web_url = {http://frontiersin.org/neuroscience/computationalneuroscience/paper/10.3389/fncom.2010.00012/pdf/},
state = {published},
DOI = {10.3389/fncom.2010.00012},
author = {Gerwinn S{sgerwinn}{Research Group Computational Vision and Neuroscience}; Macke J{jakob}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Article{ 6257,
title = {Decorrelated Neuronal Firing in Cortical Microcircuits},
journal = {Science},
year = {2010},
month = {1},
volume = {327},
number = {5965},
pages = {584-587},
abstract = {Correlated trial-to-trial variability in the activity of cortical neurons is thought to reflect the functional connectivity of the circuit. Many cortical areas are organized into functional columns, in which neurons are believed to be densely connected and to share common input. Numerous studies report a high degree of correlated variability between nearby cells. We developed chronically implanted multitetrode arrays offering unprecedented recording quality to reexamine this question in the primary visual cortex of awake macaques. We found that even nearby neurons with similar orientation tuning show virtually no correlated variability. Our findings suggest a refinement of current models of cortical microcircuit architecture and function: Either adjacent neurons share only a few percent of their inputs or, alternatively, their activity is actively decorrelated.},
web_url = {http://www.sciencemag.org/cgi/reprint/327/5965/584.pdf},
state = {published},
DOI = {10.1126/science.1179867},
author = {Ecker AS{aecker}{Department Physiology of Cognitive Processes}; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Keliris GA{george}{Department Physiology of Cognitive Processes}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}; Tolias AS{atolias}{Department Physiology of Cognitive Processes}}
}
@Article{ 6102,
title = {Bayesian population decoding of spiking neurons},
journal = {Frontiers in Computational Neuroscience},
year = {2009},
month = {10},
volume = {3},
number = {21},
pages = {1-14},
abstract = {The timing of action potentials in spiking neurons depends on the temporal dynamics of their inputs and contains information about temporal fluctuations in the stimulus. Leaky integrate-and-fire neurons constitute a popular class of encoding models, in which spike times depend directly on the temporal structure of the inputs. However, optimal decoding rules for these models have only been studied explicitly in the noiseless case. Here, we study decoding rules for probabilistic inference of a continuous stimulus from the spike times of a population of leaky integrate-and-fire neurons with threshold noise. We derive three algorithms for approximating the posterior distribution over stimuli as a function of the observed spike trains. In addition to a reconstruction of the stimulus we thus obtain an estimate of the uncertainty as well. Furthermore, we derive a `spike-by-spike‘ online decoding scheme that recursively updates the posterior with the arrival of each new spike. We use these decoding rules to reconstruct time-varying stimuli represented by a Gaussian process from spike trains of single neurons as well as neural populations.},
web_url = {http://www.frontiersin.org/computationalneuroscience/paper/10.3389/neuro.10/021.2009/pdf/},
state = {published},
DOI = {10.3389/neuro.10.021.2009},
author = {Gerwinn S{sgerwinn}{Research Group Computational Vision and Neuroscience}; Macke JH{jakob}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Article{ 5276,
title = {Characterization of the p-Generalized Normal Distribution},
journal = {Journal of Multivariate Analysis},
year = {2009},
month = {5},
volume = {100},
number = {5},
pages = {817-820},
abstract = {It is a well known fact that invariance under the orthogonal group
and marginal independence uniquely characterizes the isotropic
normal distribution. Here, a similar characterization is provided
for the more general class of differentiable bounded
$L_{p}$-spherically symmetric distributions: Every factorial
distribution in this class is necessarily $p$-generalized normal.},
web_url = {http://www.sciencedirect.com/science?_ob=MImg&_imagekey=B6WK9-4T2S8N4-1-3&_cdi=6901&_user=29041&_orig=search&_coverDate=05%2F31%2F2009&_sk=998999994&view=c&wchp=dGLbVzW-zSkzV&md5=41621c26d1a605c98eaeefa4fb327946&ie=/sdarticle.pdf},
state = {published},
DOI = {10.1016/j.jmva.2008.07.006},
author = {Sinz FH{fabee}{Research Group Computational Vision and Neuroscience}{Research Group Computational Vision and Neuroscience}; Gerwinn S{sgerwinn}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Article{ 5588,
title = {Natural Image Coding in V1: How Much Use is Orientation Selectivity?},
journal = {PLoS Computational Biology},
year = {2009},
month = {4},
volume = {5},
number = {4},
pages = {1-16},
abstract = {Orientation selectivity is the most striking feature of simple cell coding in V1 that has been shown to emerge from the reduction of higher-order correlations in natural images in a large variety of statistical image models. The most parsimonious one among these models is linear Independent Component Analysis (ICA), whereas second-order decorrelation transformations such as Principal Component Analysis (PCA) do not yield oriented filters. Because of this finding, it has been suggested that the emergence of orientation selectivity may be explained by higher-order redundancy reduction. To assess the tenability of this hypothesis, it is an important empirical question how much more redundancy can be removed with ICA in comparison to PCA or other second-order decorrelation methods. Although some previous studies have concluded that the amount of higher-order correlation in natural images is generally insignificant, other studies reported an extra gain for ICA of more than 100%. A consistent conclusion about the role of higher-order correlations in natural images can be reached only by the development of reliable quantitative evaluation methods. Here, we present a very careful and comprehensive analysis using three evaluation criteria related to redundancy reduction: In addition to the multi-information and the average log-loss, we compute complete rate-distortion curves for ICA in comparison with PCA. Without exception, we find that the advantage of the ICA filters is small. At the same time, we show that a simple spherically symmetric distribution with only two parameters can fit the data significantly better than the probabilistic model underlying ICA. This finding suggests that, although the amount of higher-order correlation in natural images can in fact be significant, the feature of orientation selectivity does not yield a large contribution to redundancy reduction within the linear filter bank models of V1 simple cells.},
web_url = {http://journals.plos.org/ploscompbiol/article/asset?id=10.1371%2Fjournal.pcbi.1000336.PDF},
state = {published},
DOI = {10.1371/journal.pcbi.1000336},
EPUB = {e1000336},
author = {Eichhorn J{je}{Research Group Computational Vision and Neuroscience}; Sinz FH{fabee}{Research Group Computational Vision and Neuroscience}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Article{ 5157,
title = {Generating Spike Trains with Specified Correlation Coefficients},
journal = {Neural Computation},
year = {2009},
month = {2},
volume = {21},
number = {2},
pages = {397-423},
abstract = {Spike trains recorded from populations of neurons can exhibit substantial pairwise correlations between neurons and rich temporal structure. Thus, for the realistic simulation and analysis of neural systems, it is essential to have efficient methods for generating artificial spike trains with specified correlation structure. Here we show how correlated binary spike trains can be simulated by means of a latent multivariate gaussian model. Sampling from the model is computationally very efficient and, in particular, feasible even for large populations of neurons. The entropy of the model is close to the theoretical maximum for a wide range of parameters. In addition, this framework naturally extends to correlations over time and offers an elegant way to model correlated neural spike counts with arbitrary marginal distributions.},
file_url = {/fileadmin/user_upload/files/publications/macke2009_5157[0].pdf},
web_url = {http://www.mitpressjournals.org/doi/pdf/10.1162/neco.2008.02-08-713},
state = {published},
DOI = {10.1162/neco.2008.02-08-713},
author = {Macke JH{jakob}; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Ecker AS{aecker}; Tolias AS{atolias}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Article{ 3731,
title = {Factorial coding of natural images: how effective are linear models in removing higher-order dependencies?},
journal = {Journal of the Optical Society of America A},
year = {2006},
month = {6},
volume = {23},
number = {6},
pages = {1253-1268},
abstract = {The performance of unsupervised learning models for natural images is evaluated quantitatively by means of information theory. We estimate the gain in statistical independence (the multi-information reduction) achieved with independent component analysis (ICA), principal component analysis (PCA), zero-phase whitening, and predictive coding. Predictive coding is translated into the transform coding framework, where it can be characterized by the constraint of a triangular filter matrix. A randomly sampled whitening basis and the Haar wavelet are included in the comparison as well. The comparison of all these methods is carried out for different patch sizes, ranging from 2×2to16×16 pixels . In spite of large differences in the shape of the basis functions, we find only small differences in the multi-information between all decorrelation transforms (5% or less) for all patch sizes. Among the second-order methods, PCA is optimal for small patch sizes and predictive coding performs best for large patch sizes. The extra gain achieved with ICA is always less than 2%. In conclusion, the edge filters found with ICA lead to only a surprisingly small improvement in terms of its actual objective.},
file_url = {/fileadmin/user_upload/files/publications/Bethge_2006_3731[0].pdf},
web_url = {http://www.opticsinfobase.org/josaa/abstract.cfm?uri=josaa-23-6-1253},
state = {published},
DOI = {10.1364/JOSAA.23.001253},
author = {Bethge M{mbethge}}
}
@Article{ 5182,
title = {Dynamics of Population Rate Codes in Ensembles of Neocortical Neurons},
journal = {Journal of Neurophysiology},
year = {2004},
month = {2},
volume = {91},
number = {2},
pages = {704-709},
abstract = {Information processing in neocortex can be very fast, indicating that neuronal ensembles faithfully transmit rapidly changing signals to each other. Apart from signal-to-noise issues, population codes are fundamentally constrained by the neuronal dynamics. In particular, the biophysical properties of individual neurons and collective phenomena may substantially limit the speed at which a graded signal can be represented by the activity of an ensemble. These implications of the neuronal dynamics are rarely studied experimentally. Here, we combine theoretical analysis and whole cell recordings to show that encoding signals in the variance of uncorrelated synaptic inputs to a neocortical ensemble enables faithful transmission of graded signals with high temporal resolution. In contrast, the encoding of signals in the mean current is subject to low-pass filtering.},
web_url = {http://jn.physiology.org/cgi/reprint/91/2/704},
state = {published},
DOI = {10.1152/jn.00415.2003},
author = {Silberberg G; Bethge M{mbethge}; Markram H; Pawelzik K; Tsodyks M}
}
@Article{ 5875,
title = {Optimal neural rate coding leads to bimodal firing rate distributions},
journal = {Network},
year = {2003},
month = {5},
volume = {14},
number = {2},
pages = {303-319},
abstract = {Many experimental studies concerning the neuronal code are based on graded responses of neurons, given by the emitted number of spikes measured in a certain time window. Correspondingly, a large body of neural network theory deals with analogue neuron models and discusses their potential use for computation or function approximation. All physical signals, however, are of limited precision, and neuronal firing rates in cortex are relatively low. Here, we investigate the relevance of analogue signal processing with spikes in terms of optimal stimulus reconstruction and information theory. In particular, we derive optimal tuning functions taking the biological constraint of limited firing rates into account. It turns out that depending on the available decoding time T, optimal encoding undergoes a phase transition from discrete binary coding for small T towards analogue or quasi-analogue encoding for large T. The corresponding firing rate distributions are bimodal for all relevant T, in particular in the case of
population coding.},
web_url = {http://www.tandfonline.com/doi/pdf/10.1088/0954-898X_14_2_307},
state = {published},
DOI = {10.1088/0954-898X/14/2/307},
author = {Bethge M{mbethge}; Rotermund D; Pawelzik K}
}
@Article{ 5183,
title = {Second Order Phase Transition in Neural Rate Coding: Binary Encoding is Optimal for Rapid Signal Transmission},
journal = {Physical Review Letters},
year = {2003},
month = {2},
volume = {90},
number = {8:088104},
pages = {1-4},
abstract = {Here, we derive optimal tuning functions for minimum mean square reconstruction from neural rate responses subjected to Poisson noise. The shape of these tuning functions strongly depends on the length T of the time window within which action potentials (spikes) are counted in order to estimate the underlying firing rate. A phase transition towards pure binary encoding occurs if the maximum mean spike count becomes smaller than approximately three. For a particular function class, we prove the existence of a second-order phase transition. The analytically derived critical decoding time window length is in precise agreement with numerical results. Our analysis reveals that binary rate encoding should dominate in the brain wherever time is the critical constraint.},
web_url = {http://prola.aps.org/pdf/PRL/v90/i8/e088104},
state = {published},
DOI = {10.1103/PhysRevLett.90.088104},
author = {Bethge M{mbethge}; Rotermund D; Pawelzik K}
}
@Article{ 5186,
title = {Optimal Short-Term Population Coding: When Fisher Information Fails},
journal = {Neural Computation},
year = {2002},
month = {10},
volume = {14},
number = {10},
pages = {2317-2351},
abstract = {Efficient coding has been proposed as a first principle explaining neuronal response properties in the central nervous system. The shape of optimal codes, however, strongly depends on the natural limitations of the particular physical system. Here we investigate how optimal neuronal encoding strategies are influenced by the finite number of neurons N (place constraint), the limited decoding time window length T (time constraint), the maximum neuronal firing rate f(max) (power constraint), and the maximal average rate (f)(max) (energy constraint). While Fisher information provides a general lower bound for the mean squared error of unbiased signal reconstruction, its use to characterize the coding precision is limited. Analyzing simple examples, we illustrate some typical pitfalls and thereby show that Fisher information provides a valid measure for the precision of a code only if the dynamic range (f(min)T, f(max)T) is sufficiently large. In particular, we demonstrate that the optimal width of gaussian tuning curves depends on the available decoding time T. Within the broader class of unimodal tuning functions, it turns out that the shape of a Fisher-optimal coding scheme is not unique. We solve this ambiguity by taking the minimum mean square error into account, which leads to flat tuning curves. The tuning width, however, remains to be determined by energy constraints rather than by the principle of efficient coding.},
web_url = {http://www.mitpressjournals.org/doi/pdf/10.1162/08997660260293247},
state = {published},
DOI = {10.1162/08997660260293247},
author = {Bethge M{mbethge}; Rotermund D; Pawelzik K}
}
@Article{ 5187,
title = {Population coding with unreliable spikes},
journal = {Neurocomputing},
year = {2002},
month = {6},
volume = {44-46},
pages = {323-328},
abstract = {The need for a neuronal coding scheme that is robust against the corruption of action potentials seems to support the idea of population rate coding, where the relevance of a single spike decreases proportional to the increase of population size. In order to test this intuition, we here investigate the efficiency and robustness of a population rate coding scheme in comparison to a place coding scheme using identical noise model. It turns out that the efficiency of population rate coding is substantially worse than that of place coding even if the generation or propagation of spikes are highly unreliable processes.},
web_url = {http://www.sciencedirect.com/science?_ob=MImg&_imagekey=B6V10-45F90MH-8-28&_cdi=5660&_user=29041&_orig=browse&_coverDate=06%2F30%2F2002&_sk=999539999&view=c&wchp=dGLzVzz-zSkzk&md5=bb3ca21239b5d857eaea71c0b2146426&ie=/sdarticle.pdf},
state = {published},
DOI = {10.1016/S0925-2312(02)00359-4},
author = {Bethge M{mbethge}; Pawelzik K}
}
@Article{ 5188,
title = {Spike-frequency adaptation:
Phenomenological model and experimental tests},
journal = {Neurocomputing},
year = {2001},
month = {6},
volume = {38-40},
pages = {105-110},
abstract = {Spike-frequency adaptation is a common feature of neural dynamics. Here we present a low-dimensional phenomenological model whose parameters can be easily determined from experimental data. We test the model on intracellular recordings from auditory receptor neurons of locusts and demonstrate that the temporal variation of discharge rate is predicted with high accuracy. We relate the model to biophysical descriptions of adaptation in conductance-based models and analyze its implications for neural computation.},
web_url = {http://www.sciencedirect.com/science?_ob=MImg&_imagekey=B6V10-435KK2K-J-20&_cdi=5660&_user=29041&_orig=browse&_coverDate=06%2F30%2F2001&_sk=999599999&view=c&wchp=dGLbVzW-zSkzk&md5=f3fbbfeb0b39a93be92c28d7bd19bcce&ie=/sdarticle.pdf},
state = {published},
DOI = {10.1016/S0925-2312(01)00545-8},
author = {Benda J; Bethge M{mbethge}; Henning M; Pawelzik K; Herz AVM}
}
@Article{ 5189,
title = {Synchronous inhibition as a mechanism for unbiased selective gain control},
journal = {Neurocomputing},
year = {2001},
month = {6},
volume = {38-40},
pages = {483-488},
abstract = {While there are many experiments providing evidence for synchronized neuronal activity, there is little agreement about its functional role. Since many proposals rely on the assumption that neuronal activity can be modulated by top-down or feedback signals in a multiplicative way, it is a critical question how the dynamics of neurons may account for a selective control of their gain. In this paper we present a novel gain control mechanism based on the interplay of synaptic depression and synchronous inhibition. From simulations of a two-layered model of populations of integrate-and-fire neurons connected by stochastic depressing synapses, we conclude that synchronous inhibition can act as a selective gain control signal, which may be relevant, in particular when sensory processing reflects an ongoing process of hypotheses testing.},
web_url = {http://www.sciencedirect.com/science?_ob=MImg&_imagekey=B6V10-435KK2K-29-1J&_cdi=5660&_user=29041&_orig=browse&_coverDate=06%2F30%2F2001&_sk=999599999&view=c&wchp=dGLbVtz-zSkzS&md5=b07dfd1bdf5d9f044dad0029de1bc0f2&ie=/sdarticle.pdf},
state = {published},
DOI = {10.1016/S0925-2312(01)00373-3},
author = {Bethge M{mbethge}; Pawelzik K}
}
@Article{ 5190,
title = {Brief pauses as signals for depressing synapses},
journal = {Neurocomputing},
year = {1999},
month = {6},
volume = {26-27},
pages = {1-7},
abstract = {Activity-dependent synaptic depression is a striking feature of synaptic transmission between neocortical pyramidal neurons. It has been shown that this kind of synaptic dynamics permits the transmission of rate changes rather than the DC part of presynaptic activities. In this paper, we show that activity-dependent depression makes synapses sensitive to reductions of presynaptic activity which are brief compared to the recovery time scale of the synapse. This surprising finding suggests that the synchronous lack of activity is potentially relevant for neuronal information processing. We present a mathematical analysis and an intuitive explanation of this paradoxical phenomenon.},
web_url = {http://www.sciencedirect.com/science?_ob=MImg&_imagekey=B6V10-40D0KHS-1-1&_cdi=5660&_user=29041&_orig=browse&_coverDate=06%2F30%2F1999&_sk=999739999&view=c&wchp=dGLzVlz-zSkWz&md5=77812d75035c3e0ca73dd10014257782&ie=/sdarticle.pdf},
state = {published},
DOI = {10.1016/S0925-2312(99)00082-X},
author = {Bethge M{mbethge}; Pawelzik K; Geisel T}
}
@Inproceedings{ GatysEB2016_2,
title = {Image Style Transfer Using Convolutional Neural Networks},
year = {2016},
month = {6},
pages = {2414-2423},
abstract = {Rendering the semantic content of an image in different styles is a difficult image processing task. Arguably, a major limiting factor for previous approaches has been the lack of image representations that explicitly represent semantic information and, thus, allow to separate image content from style. Here we use image representations derived from Convolutional Neural Networks optimised for object recognition, which make high level image information explicit. We introduce A Neural Algorithm of Artistic Style that can separate and recombine the image content and style of natural images. The algorithm allows us to produce new images of high perceptual quality that combine the content of an arbitrary photograph with the appearance of numerous well-known artworks. Our results provide new insights into the deep image representations learned by Convolutional Neural Networks and demonstrate their potential for high level image synthesis and manipulation.},
web_url = {http://www.cv-foundation.org/openaccess/content_cvpr_2016/html/Gatys_Image_Style_Transfer_CVPR_2016_paper.html},
event_name = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR 2016)},
event_place = {Las Vegas, NV, USA},
state = {published},
author = {Gatys LA; Ecker AS{aecker}{Department Physiology of Cognitive Processes}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Inproceedings{ TheisB2015,
title = {Generative Image Modeling Using Spatial LSTMs},
year = {2016},
pages = {1918-1926},
abstract = {Modeling the distribution of natural images is challenging, partly because of strong statistical dependencies which can extend over hundreds of pixels. Recurrent neural networks have been successful in capturing long-range dependencies in a number of problems but only recently have found their way into generative image models. We here introduce a recurrent image model based on multi-dimensional long short-term memory units which are particularly suited for image modeling due to their spatial structure. Our model scales to images of arbitrary size and its likelihood is computationally tractable. We find that it outperforms the state of the art in quantitative comparisons on several image datasets and produces promising results when used for texture synthesis and inpainting.},
web_url = {http://papers.nips.cc/paper/5637-generative-image-modeling-using-spatial-lstms},
editor = {Cortes, C. , N.D. Lawrence, D.D. Lee, M. Sugiyama, R. Garnett, R. Garnett},
publisher = {Curran},
address = {Red Hook, NY, USA},
booktitle = {Advances in Neural Information Processing Systems 28},
event_name = {Twenty-Ninth Annual Conference on Neural Information Processing Systems (NIPS 2015)},
event_place = {Montréal, Canada},
state = {published},
author = {Theis L{lucas}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Inproceedings{ GatysEB2015,
title = {Texture Synthesis Using Convolutional Neural Networks},
year = {2016},
pages = {262-270},
abstract = {Here we introduce a new model of natural textures based on the feature spaces of convolutional neural networks optimised for object recognition. Samples from the model are of high perceptual quality demonstrating the generative power of neural networks trained in a purely discriminative fashion. Within the model, textures are represented by the correlations between feature maps in several layers of the network. We show that across layers the texture representations increasingly capture the statistical properties of natural images while making object information more and more explicit. The model provides a new tool to generate stimuli for neuroscience and might offer insights into the deep representations learned by convolutional neural networks.},
web_url = {http://papers.nips.cc/paper/5633-texture-synthesis-using-convolutional-neural-networks},
editor = {Cortes, C. , N.D. Lawrence, D.D. Lee, M. Sugiyama, R. Garnett, R. Garnett},
publisher = {Curran},
address = {Red Hook, NY, USA},
booktitle = {Advances in Neural Information Processing Systems 28},
event_name = {Twenty-Ninth Annual Conference on Neural Information Processing Systems (NIPS 2015)},
event_place = {Montréal, Canada},
state = {published},
author = {Gatys LA; Ecker AS{aecker}{Department Physiology of Cognitive Processes}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Inproceedings{ SraHTB2015,
title = {Data modeling with the elliptical gamma distribution},
year = {2015},
month = {5},
pages = {903–911},
abstract = {We study mixture modeling using the elliptical gamma (EG) distribution, a non-Gaussian distribution that allows heavy and light tail and peak behaviors. We first consider maximum likelihood parameter estimation, a task that turns out to be very challenging: we must handle positive definiteness constraints, and more crucially, we must handle possibly nonconcave log-likelihoods, which makes maximization hard. We overcome these difficulties by developing algorithms based on fixed-point theory; our methods respect the psd constraint, while also efficiently solving the (possibly) nonconcave maximization to global optimality. Subsequently, we focus on mixture modeling using EG distributions: we present a closed-form expression of the KL-divergence between two EG distributions, which we then combine with our ML estimation methods to obtain an efficient split-and-merge expectation maximization algorithm. We illustrate the use of our model and algorithms on a dataset of natural image patches.},
web_url = {http://jmlr.org/proceedings/papers/v38/sra15.html},
editor = {Lebanon, G. , S.V.N. Vishwanathan},
publisher = {International Machine Learning Society},
address = {Madison, WI, USA},
series = {JMLR Workshop and Conference Proceedings ; 38},
event_name = {18th International Conference on Artificial Intelligence and Statistics (AISTATS 2015)},
event_place = {San Diego, CA, USA},
state = {published},
author = {Sra S{suvrit}; Hosseini R{hosseini}{Research Group Computational Vision and Neuroscience}; Theis L{lucas}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Inproceedings{ TheisSB2013,
title = {Training sparse natural image models with a fast Gibbs sampler of an extended state space},
year = {2013},
month = {4},
pages = {1133-1141},
abstract = {We present a new learning strategy based on an efficient blocked Gibbs sampler for sparse overcomplete linear models. Particular emphasis is placed on statistical
image modeling, where overcomplete models have played an important role in discovering sparse representations. Our Gibbs sampler is faster than general purpose sampling schemes while also requiring no tuning as it is free of parameters. Using the Gibbs sampler and a persistent
variant of expectation maximization, we are able to extract highly sparse distributions over latent sources from data. When applied to natural images, our algorithm learns source distributions which resemble spike-and-slab distributions. We evaluate the likelihood and quantitatively compare the performance of the overcomplete linear model to its complete counterpart as well as a product of experts model, which represents another overcomplete generalization of the complete linear model. In contrast to previous claims, we find that overcomplete representations lead to significant improvements, but that the overcomplete linear model still underperforms other models.},
web_url = {http://nips.cc/Conferences/2012/},
editor = {Bartlett, P. , F.C.N. Pereira, L. Bottou, C.J.C. Burges, K.Q. Weinberger},
publisher = {Curran},
address = {Red Hook, NY, USA},
booktitle = {Advances in Neural Information Processing Systems 25},
event_name = {Twenty-Sixth Annual Conference on Neural Information Processing Systems (NIPS 2012)},
event_place = {Lake Tahoe, NV, USA},
state = {published},
ISBN = {978-1-627-48003-1},
author = {Theis L{lucas}{Research Group Computational Vision and Neuroscience}; Sohl-Dickstein J; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Inproceedings{ HaefnerB2011,
title = {Evaluating neuronal codes for inference using Fisher information},
year = {2011},
month = {6},
pages = {1993-2001},
abstract = {Many studies have explored the impact of response variability on the quality of sensory codes. The source of this variability is almost always assumed to be intrinsic to the brain. However, when inferring a particular stimulus property, variability associated with other stimulus attributes also effectively act as noise. Here we study the impact of such stimulus-induced response variability for the case of binocular disparity inference. We characterize the response distribution for the binocular energy model in response to random dot stereograms and find it to be very different from the Poisson-like noise usually assumed. We then compute the Fisher information with respect to binocular disparity, present in the monocular inputs to the standard model of early binocular processing, and thereby obtain an upper bound on how much information a model could theoretically extract from them. Then we analyze the information loss incurred by the different ways of combining those inputs to produce a scalar single-neuron response. We find that in the case of depth inference, monocular stimulus variability places a greater limit on the extractable information than intrinsic neuronal noise for typical spike counts. Furthermore, the largest loss of information is incurred by the standard model for position disparity neurons (tuned-excitatory), that are the most ubiquitous in monkey primary visual cortex, while more information from the inputs is preserved in phase-disparity neurons (tuned-near or tuned-far) primarily found in higher cortical regions.},
web_url = {http://nips.cc/Conferences/2010/},
editor = {Lafferty, J. , C. K.I. Williams, J. Shawe-Taylor, R. S. Zemel, A. Culotta},
publisher = {Curran},
address = {Red Hook, NY, USA},
booktitle = {Advances in Neural Information Processing Systems 23},
event_name = {Twenty-Fourth Annual Conference on Neural Information Processing Systems (NIPS 2010)},
event_place = {Vancouver, BC, Canada},
state = {published},
ISBN = {978-1-617-82380-0},
author = {Haefner RM{rhaefner}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Inproceedings{ 6075,
title = {A joint maximum-entropy model for binary neural population patterns and continuous signals},
year = {2010},
month = {4},
pages = {620-628},
abstract = {Second-order maximum-entropy models have recently gained much interest for describing the statistics of binary spike trains. Here, we extend this approach to take continuous stimuli into account as well. By constraining on the joint secondorder statistics, we obtain a joint Gaussian-Boltzmann distribution of continuous stimuli and binary neural firing patterns, for which we also compute marginal and conditional distributions. This model has the same computational complexity as pure binary models and fitting it to data is a convex problem. We show that the model can be seen as an extension to the classical spike-triggered average and can be used as a non-linear method for extracting features which a neural population is sensitive to. Further, by calculating the posterior distribution of stimuli given an observed neural response, the model can be used to decode stimuli and yields a natural spike-train metric. Therefore, extending the framework of maximumentropy
models to continuous variables allows us to gain novel insights into the relationship between the firing patterns of neural ensembles and the stimuli they are processing.},
file_url = {/fileadmin/user_upload/files/publications/gerwinn2009_6075[0].pdf},
web_url = {http://nips.cc/Conferences/2009/},
editor = {Bengio, Y. , D. Schuurmans, J. Lafferty, C. Williams, A. Culotta},
publisher = {Curran},
address = {Red Hook, NY, USA},
booktitle = {Advances in Neural Information Processing Systems 22},
event_name = {23rd Annual Conference on Neural Information Processing Systems (NIPS 2009)},
event_place = {Vancouver, BC, Canada},
state = {published},
ISBN = {978-1-615-67911-9},
author = {Gerwinn S{sgerwinn}{Research Group Computational Vision and Neuroscience}; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Inproceedings{ 6121,
title = {Bayesian estimation of orientation preference maps},
year = {2010},
month = {4},
pages = {1195-1203},
abstract = {Imaging techniques such as optical imaging of intrinsic signals, 2-photon calcium imaging and voltage sensitive dye imaging can be used to measure the functional organization of visual cortex across different spatial and temporal scales. Here, we present Bayesian methods based on Gaussian processes for extracting topographic maps from functional imaging data. In particular, we focus on the estimation of
orientation preference maps (OPMs) from intrinsic signal imaging data. We model the underlying map as a bivariate Gaussian process, with a prior covariance function that reflects known properties of OPMs, and a noise covariance adjusted to the data. The posterior mean can be interpreted as an optimally smoothed estimate of the map, and can be used for model based interpolations of the map from sparse measurements. By sampling from the posterior distribution, we can get error bars on statistical properties such as preferred orientations, pinwheel locations or pinwheel counts. Finally, the use of an explicit probabilistic model facilitates interpretation of parameters and quantitative model comparisons. We demonstrate our model both on simulated data and on intrinsic signaling data from ferret visual cortex.},
file_url = {/fileadmin/user_upload/files/publications/NIPS2009-Macke_6121[0].pdf},
web_url = {http://nips.cc/Conferences/2009/},
editor = {Bengio, Y. , D. Schuurmans, J. Lafferty, C. Williams, A. Culotta},
publisher = {Curran},
address = {Red Hook, NY, USA},
booktitle = {Advances in Neural Information Processing Systems 22},
event_name = {23rd Annual Conference on Neural Information Processing Systems (NIPS 2009)},
event_place = {Vancouver, BC, Canada},
state = {published},
ISBN = {978-1-615-67911-9},
author = {Macke JH{jakob}; Gerwinn S{sgerwinn}{Research Group Computational Vision and Neuroscience}; Kaschube M; White LE; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Inproceedings{ 6047,
title = {Hierarchical Modeling of Local Image Features through Lp-Nested Symmetric Distributions},
year = {2010},
month = {4},
pages = {1696-1704},
abstract = {We introduce a new family of distributions, called Lp-nested symmetric distributions, whose densities are expressed in terms of a hierarchical cascade of Lp-
norms. This class generalizes the family of spherically and Lp-spherically symmetric distributions which have recently been successfully used for natural image modeling. Similar to those distributions it allows for a nonlinear mechanism
to reduce the dependencies between its variables. With suitable choices of the parameters and norms, this family includes the Independent Subspace Analysis (ISA) model as a special case, which has been proposed as a means of deriving
filters that mimic complex cells found in mammalian primary visual cortex. Lp-nested distributions are relatively easy to estimate and allow us to explore the variety of models between ISA and the Lp-spherically symmetric models. By fitting the generalized Lp-nested model to 8 by 8 image patches, we show that the subspaces obtained from ISA are in fact more dependent than the individual filter
coefficients within a subspace. When first applying contrast gain control as preprocessing, however, there are no dependencies left that could be exploited by ISA. This suggests that complex cell modeling can only be useful for redundancy reduction in larger image patches.},
file_url = {/fileadmin/user_upload/files/publications/219_paper_6047[0].pdf},
file_url2 = {/fileadmin/user_upload/files/publications/219_supplement_6047[1].pdf},
web_url = {http://nips.cc/Conferences/2009/},
editor = {Bengio, Y. , D. Schuurmans, J. Lafferty, C. Williams, A. Culotta},
publisher = {Curran},
address = {Red Hook, NY, USA},
booktitle = {Advances in Neural Information Processing Systems 22},
event_name = {23rd Annual Conference on Neural Information Processing Systems (NIPS 2009)},
event_place = {Vancouver, BC, Canada},
state = {published},
ISBN = {978-1-615-67911-9},
author = {Sinz F{fabee}{Research Group Computational Vision and Neuroscience}{Research Group Computational Vision and Neuroscience}; Simoncelli EP; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Inproceedings{ 6076,
title = {Neurometric function analysis of population codes},
year = {2010},
month = {4},
pages = {90-98},
abstract = {The relative merits of different population coding schemes have mostly been analyzed in the framework of stimulus reconstruction using Fisher Information. Here, we consider the case of stimulus discrimination in a two alternative forced choice paradigm and compute neurometric functions in terms of the minimal discrimination error and the Jensen-Shannon information to study neural population codes.
We first explore the relationship between minimum discrimination error, Jensen-Shannon Information and Fisher Information and show that the discrimination framework is more informative about the coding accuracy than Fisher Information as it defines an error for any pair of possible stimuli. In particular, it includes Fisher Information as a special case. Second, we use the framework to study population codes of angular variables. Specifically, we assess the impact of different noise correlations structures on coding accuracy in long versus short decoding
time windows. That is, for long time window we use the common Gaussian noise approximation. To address the case of short time windows we analyze the Ising model with identical noise correlation structure. In this way, we provide a new rigorous framework for assessing the functional consequences of noise correlation structures for the representational accuracy of neural population codes that is in particular applicable to short-time population coding.},
file_url = {/fileadmin/user_upload/files/publications/berens2009b_6076[0].pdf},
web_url = {http://nips.cc/Conferences/2009/},
editor = {Bengio, Y. , D. Schuurmans, J. Lafferty, C. Williams, A. Culotta},
publisher = {Curran},
address = {Red Hook, NY, USA},
booktitle = {Advances in Neural Information Processing Systems 22},
event_name = {23rd Annual Conference on Neural Information Processing Systems (NIPS 2009)},
event_place = {Vancouver, BC, Canada},
state = {published},
ISBN = {978-1-615-67911-9},
author = {Berens P{berens}{Research Group Computational Vision and Neuroscience}; Gerwinn S{sgerwinn}{Research Group Computational Vision and Neuroscience}; Ecker AS{aecker}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Inproceedings{ 5382,
title = {The Conjoint Effect of Divisive Normalization and Orientation Selectivity on Redundancy Reduction},
year = {2009},
month = {6},
pages = {1521-1528},
abstract = {Bandpass filtering, orientation selectivity, and contrast gain control are prominent features of sensory coding at the level of V1 simple cells. While the effect of bandpass filtering and orientation selectivity can be assessed within a linear model, contrast gain control is an inherently nonlinear computation. Here we employ the
class of $L_p$ elliptically contoured distributions to investigate the extent to which the two features---orientation selectivity and contrast gain control---are suited to model the statistics of natural images. Within this framework we find that contrast gain control can
play a significant role for the removal of redundancies in natural images. Orientation selectivity, in contrast, has only a very limited potential for redundancy reduction.},
file_url = {/fileadmin/user_upload/files/publications/SinzBethge2008Extended_5382[0].pdf},
web_url = {http://nips.cc/Conferences/2008/},
editor = {Koller, D. , D. Schuurmans, Y. Bengio, L. Bottou},
publisher = {Curran},
address = {Red Hook, NY, USA},
booktitle = {Advances in neural information processing systems 21},
event_name = {Twenty-Second Annual Conference on Neural Information Processing Systems (NIPS 2008)},
event_place = {Vancouver, BC, Canada},
state = {published},
ISBN = {978-1-605-60949-2},
author = {Sinz F{fabee}{Research Group Computational Vision and Neuroscience}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Inproceedings{ 4728,
title = {Bayesian Inference for Spiking Neuron Models with a Sparsity Prior},
year = {2008},
month = {9},
pages = {529-536},
abstract = {Generalized linear models are the most commonly used tools to describe the stimulus selectivity of sensory neurons. Here we present a Bayesian treatment of such models. Using the expectation propagation algorithm, we are able to approximate the full posterior distribution over all weights. In addition, we use a Laplacian prior to favor sparse solutions. Therefore, stimulus features that do not critically influence neural activity will be assigned zero weights and thus be effectively excluded by the model. This feature selection mechanism facilitates both the interpretation of the neuron model as well as its predictive abilities. The posterior distribution can be used to obtain confidence intervals which makes it possible to assess the statistical significance of the solution. In neural data analysis, the available amount of experimental measurements is often limited whereas the parameter space is large. In such a situation, both regularization by a sparsity prior and uncertainty estimates for the model parameters are essential.
We apply our method to multi-electrode recordings of retinal ganglion cells and use our uncertainty estimate to test the statistical significance of functional couplings between neurons. Furthermore we used the sparsity of the Laplace prior to select those filters from a spike-triggered covariance analysis that are most informative about the neural response.},
file_url = {/fileadmin/user_upload/files/publications/BayesLNP_4728[0].pdf},
web_url = {http://nips.cc/Conferences/2007/},
editor = {Platt, J. C., D. Koller, Y. Singer, S. Roweis},
publisher = {Curran},
address = {Red Hook, NY, USA},
booktitle = {Advances in neural information processing systems 20},
event_name = {Twenty-First Annual Conference on Neural Information Processing Systems (NIPS 2007)},
event_place = {Vancouver, BC, Canada},
state = {published},
ISBN = {978-1-605-60352-0},
author = {Gerwinn S{sgerwinn}{Department Empirical Inference}{Research Group Computational Vision and Neuroscience}; Macke J{jakob}; Seeger M{seeger}{Department Empirical Inference}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Inproceedings{ 4729,
title = {Near-Maximum Entropy Models for Binary Neural Representations of Natural Images},
year = {2008},
month = {9},
pages = {97-104},
abstract = {Maximum entropy analysis of binary variables provides an elegant way for studying the role of pairwise correlations in neural populations. Unfortunately, these approaches suffer from their poor scalability to high dimensions. In sensory coding, however, high-dimensional data is ubiquitous. Here, we introduce a new approach using a near-maximum entropy model, that makes this type of analysis feasible for very high-dimensional data - the model parameters can be derived in closed form and sampling is easy. We demonstrate its usefulness by studying a simple neural representation model of natural images. For the first time, we are able to directly compare predictions from a pairwise maximum entropy model not only in small groups of neurons, but also in larger populations of more than thousand units. Our results indicate that in such larger networks interactions exist that are not predicted by pairwise correlations, despite the fact that pairwise correlations explain the lower-dimensional marginal statistics extrem ely well up to the limit of dimensionality where estimation of the full joint distribution is feasible.},
file_url = {/fileadmin/user_upload/files/publications/NIPS-2007-Bethge_4729[0].pdf},
web_url = {http://nips.cc/Conferences/2007/},
editor = {Platt, J. C., D. Koller, Y. Singer, S. Roweis},
publisher = {Curran},
address = {Red Hook, NY, USA},
booktitle = {Advances in neural information processing systems 20},
event_name = {Twenty-First Annual Conference on Neural Information Processing Systems (NIPS 2007)},
event_place = {Vancouver, BC, Canada},
state = {published},
ISBN = {978-1-605-60352-0},
author = {Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Berens P{berens}{Research Group Computational Vision and Neuroscience}}
}
@Inproceedings{ 4738,
title = {Receptive Fields without Spike-Triggering},
year = {2008},
month = {9},
pages = {969-976},
abstract = {Stimulus selectivity of sensory neurons is often characterized by estimating their receptive field properties such as orientation selectivity. Receptive fields are usually derived from the mean (or covariance) of the spike-triggered stimulus ensemble. This approach treats each spike as an independent message but does not take into account that information might be conveyed through patterns of neural activity that are distributed across space or time. Can we find a concise description for the processing of a whole population of neurons analogous to the receptive field for single neurons? Here, we present a generalization of the linear receptive field which is not bound to be triggered on individual spikes but can be meaningfully
linked to distributed response patterns. More precisely, we seek to identify those stimulus features and the corresponding patterns of neural activity that are most
reliably coupled. We use an extension of reverse-correlation methods based on canonical correlation analysis. The resulting population receptive fields span the
subspace of stimuli that is most informative about the population response. We evaluate our approach using both neuronal models and multi-electrode recordings from rabbit retinal ganglion cells. We show how the model can be extended to capture nonlinear stimulus-response relationships using kernel canonical correlation analysis, which makes it possible to test different coding mechanisms. Our technique can also be used to calculate receptive fields from multi-dimensional neural measurements such as those obtained from dynamic imaging methods.},
file_url = {/fileadmin/user_upload/files/publications/NIPS2007-Macke_4738[0].pdf},
web_url = {http://nips.cc/Conferences/2007/},
editor = {Platt, J. C., D. Koller, Y. Singer, S. Roweis},
publisher = {Curran},
address = {Red Hook, NY, USA},
booktitle = {Advances in neural information processing systems 20},
event_name = {Twenty-First Annual Conference on Neural Information Processing Systems (NIPS 2007)},
event_place = {Vancouver, BC, Canada},
state = {published},
ISBN = {978-1-605-60352-0},
author = {Macke JH{jakob}; Zeck G{gzeck}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Inproceedings{ 4807,
title = {Bayesian Inference for Sparse Generalized Linear Models},
year = {2007},
month = {9},
pages = {298-309},
abstract = {We present a framework for efficient, accurate approximate Bayesian inference in generalized linear models (GLMs), based on the expectation propagation (EP) technique. The parameters can be endowed with a factorizing prior distribution, encoding properties such as sparsity or non-negativity. The central role of posterior log-concavity in Bayesian GLMs is emphasized and related to stability issues in EP. In particular, we use our technique to infer the parameters of a point process model for neuronal spiking data from multiple electrodes, demonstrating significantly superior predictive performance when a sparsity assumption is enforced via a Laplace prior distribution.},
web_url = {http://www.springerlink.com/content/n65162n4547n29k7/fulltext.pdf},
editor = {Kok, J. N., J. Koronacki, R. Lopez de Mantaras, S. Matwin, D. Mladenic, A. Skowron},
publisher = {Springer},
address = {Berlin, Germany},
series = {Lecture Notes in Computer Science ; 4701},
booktitle = {Machine Learning: ECML 2007},
event_name = {18th European Conference on Machine Learning},
event_place = {Warsaw, Poland},
state = {published},
ISBN = {978-3-540-74957-8},
DOI = {10.1007/978-3-540-74958-5_29},
author = {Seeger M{seeger}{Department Empirical Inference}; Gerwinn S{sgerwinn}{Department Empirical Inference}; Bethge M{mbethge}}
}
@Inproceedings{ 4304,
title = {The Independent Components of Natural Images are Perceptually Dependent},
year = {2007},
month = {2},
pages = {1-12},
abstract = {The independent components of natural images are a set of linear filters which are optimized for statistical independence. With such a set of filters images can be represented without loss of information. Intriguingly, the filter shapes are localized, oriented, and bandpass, resembling important properties of V1 simple cell receptive fields. Here we address the question of whether the independent components of natural images are also perceptually less dependent than other image components. We compared the pixel basis, the ICA basis and the discrete cosine basis by asking subjects to interactively predict missing pixels (for the pixel basis) or to predict the coefficients of ICA and DCT basis functions in patches of natural images. Like Kersten (1987) we find the pixel basis to be perceptually highly redundant but perhaps surprisingly, the ICA basis showed significantly higher perceptual dependencies than the DCT basis. This shows a dissociation between statistical and perceptual dependence measures.},
file_url = {/fileadmin/user_upload/files/publications/EI105-IndependentComponents_4304[0].pdf},
web_url = {http://www.ece.northwestern.edu/~pappas/hvei/past/6806.html},
editor = {Rogowitz, B. E.},
publisher = {SPIE},
address = {Bellingham, WA, USA},
series = {Proceedings of the SPIE ; 6492},
booktitle = {Human Vision and Electronic Imaging XII},
event_name = {SPIE Human Vision and Electronic Imaging Conference 2007},
event_place = {San Jose, CA, USA},
state = {published},
ISBN = {978-0-8194-6605-1},
DOI = {10.1117/12.711133},
author = {Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Wiecki TV{wiecki}{Research Group Computational Vision and Neuroscience}; Wichmann FA{felix}{Department Empirical Inference}}
}
@Inproceedings{ 4305,
title = {Unsupervised learning of a steerable basis for invariant image representations},
year = {2007},
month = {2},
pages = {1-12},
abstract = {There are two aspects to unsupervised learning of invariant representations of images: First, we can reduce the dimensionality of the representation by finding an optimal trade-off between temporal stability and informativeness. We show that the answer to this optimization problem is generally not unique so that there is still considerable freedom in choosing a suitable basis. Which of the many optimal representations should be selected? Here, we focus on this second aspect, and seek to find representations that are invariant under geometrical transformations occuring in sequences of natural images. We utilize ideas of steerability and Lie groups, which have been developed in the context of filter design. In particular, we show how an anti-symmetric version of canonical correlation analysis can be used to learn a full-rank image basis which is steerable with respect to rotations. We provide a geometric interpretation of this algorithm by showing that it finds the two-dimensional eigensubspaces of the avera
ge bivector. For data which exhibits a variety of transformations, we develop a bivector clustering algorithm, which we use to learn a basis of generalized quadrature pairs (i.e. complex cells) from sequences of natural images.},
file_url = {/fileadmin/user_upload/files/publications/SPIE2007-Bethge_4305[0].pdf},
web_url = {http://www.ece.northwestern.edu/~pappas/hvei/past/6806.html},
editor = {Rogowitz, B. E.},
publisher = {SPIE},
address = {Bellingham, WA, USA},
series = {Proceedings of the SPIE ; 6492},
booktitle = {Human Vision and Electronic Imaging XII},
event_name = {SPIE Human Vision and Electronic Imaging Conference 2007},
event_place = {San Jose, CA, USA},
state = {published},
ISBN = {978-0-8194-6605-1},
DOI = {10.1117/12.711119},
author = {Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Gerwinn S{sgerwinn}{Department Empirical Inference}{Research Group Computational Vision and Neuroscience}; Macke JH{jakob}}
}
@Inproceedings{ 5185,
title = {Binary tuning is optimal for neural rate coding with
high temporal resolution},
year = {2003},
pages = {189-196},
abstract = {Here we derive optimal gain functions for minimum mean square reconstruction from neural rate responses subjected to Poisson noise. The shape of these functions strongly depends on the length T of the time window within which spikes are counted in order to estimate the underlying
firing rate. A phase transition towards pure binary encoding occurs if the maximum mean spike count becomes smaller than approximately three provided the minimum firing rate is zero. For a particular function class, we were able to prove the existence of a second-order phase transition analytically. The critical decoding time window length obtained from the analytical derivation is in precise agreement with the numerical results. We conclude that under most circumstances relevant to information
processing in the brain, rate coding can be better ascribed to a binary (low-entropy) code than to the other extreme of rich analog coding.},
file_url = {fileadmin/user_upload/files/publications/NIPS-2002-Bethge.pdf},
web_url = {http://books.nips.cc/nips15.html},
editor = {Becker, S. , S. Thrun, K. Obermayer},
publisher = {MIT Press},
address = {Cambridge, MA, USA},
booktitle = {Advances in neural information processing systems 15},
event_name = {Sixteenth Annual Conference on Neural Information Processing Systems (NIPS 2002)},
event_place = {Vancouver, BC, Canada},
state = {published},
ISBN = {0-262-02550-7},
author = {Bethge M{mbethge}; Rotermund D; Pawelzik K}
}
@Inbook{ Bethge2014,
title = {Efficient Population Coding},
year = {2015},
pages = {1063-1070},
abstract = {Natural stimulations caused by objects in the surrounding world do not stimulate single sensory receptors in isolation but lead to the activation of large numbers of neurons simultaneously. Thus, typical stimulus variables of interest are represented only implicitly in activation patterns across large neural populations. These patterns are statistical in nature since repeated presentation of the same stimulus usually leads to highly variable responses. The large dimensionality and randomness of the neural responses make it difficult to assess how well different stimuli can be discriminated. Depending on how effectively neurons share the labor of encoding, the accuracy with which stimuli are represented can change dramatically. Thus, studying the efficiency of population codes is important for our understanding of both which information is encoded in neural populations and how it is encoded.},
web_url = {http://link.springer.com/content/pdf/10.1007%2F978-1-4614-7320-6_578-1.pdf},
editor = {Jaeger, D. , R. Jung},
publisher = {Springer},
address = {New York, NY, USA},
booktitle = {Encyclopedia of Computational Neuroscience},
state = {published},
ISBN = {978-1-4614-6674-1},
DOI = {10.1007/978-1-4614-7320-6_578-1},
author = {Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Inbook{ GerhardTB2015,
title = {Modeling Natural Image Statistics},
year = {2015},
pages = {53-80},
abstract = {This chapter focuses on models of the spatial structure in natural images, that is, the content of static images as opposed to sequences of images. It introduces some statistical qualities of natural images and discusses why it is interesting to model them. The chapter describes several models including the state of the art. It then discusses examples of how natural image models impact computer vision applications. The chapter further describes experimental examples of how biological systems are adapted to natural images. A wide spectrum of approaches to modeling the density of natural images has been proposed in the last two decades. Many have been designed to examine how biological systems adapt to environmental statistics, where the logic is to compare neural response properties to emergent aspects of the models after fitting to natural images.},
web_url = {http://onlinelibrary.wiley.com/doi/10.1002/9783527680863.ch4/pdf},
editor = {Cristóbal, G. , L. Perrinet, M.S. Keil},
publisher = {Wiley-VCH},
address = {Weinheim, Germany},
booktitle = {Biologically inspired Computer Vision: Fundamentals and Applications},
state = {published},
ISBN = {978-3-527-41264-8},
DOI = {10.1002/9783527680863.ch4},
author = {Gerhard HE{hgerhard}; Theis L{lucas}; Bethge M{mbethge}}
}
@Techreport{ 6114,
title = {Spectral Stacking: Unbiased Shear Estimation for Weak Gravitational Lensing},
year = {2009},
month = {10},
number = {186},
abstract = {We present a new method for the estimation of shear in gravitational lensing from a set of galaxy images with unknown distribution of shapes. Common procedures first compute an estimate of some characteristic feature for each individual galaxy and then average over these. The average can be used to estimate the shear as it becomes independent of the individual galaxy shapes with increasing number of images. A common problem of the previous methods is that the estimators of the features are biased. Here we introduce ``{it spectral stacking}‘‘ which uses the power spectrum as a characteristic feature of the individual galaxies. If the galaxy images are contaminated by Poisson noise, an unbiased estimator of the power spectrum exists which is used in the analysis. Furthermore, the power spectrum is independent of the location of the individual galaxy centers provided the smoothed galaxy intensities decay sufficiently fast. No further assumptions are necessary. The alg
orithm won the main contest of the Great08 challenge.},
file_url = {/fileadmin/user_upload/files/publications/MPIK-TR-186_6114[0].pdf},
state = {published},
author = {Hosseini R{hosseini}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Techreport{ 5865,
title = {The effect of pairwise neural
correlations on global population
statistics},
year = {2009},
month = {3},
number = {183},
abstract = {Simultaneously recorded neurons often exhibit correlations in their spiking activity. These correlations
shape the statistical structure of the population activity, and can lead to substantial redundancy across
neurons. Here, we study the effect of pairwise correlations on the population spike count statistics and redundancy
in populations of threshold-neurons in which response-correlations arise from correlated Gaussian inputs. We investigate
the scaling of the redundancy as the population size is increased, and compare the asymptotic redundancy
in our models to the corresponding maximum- and minimum entropy models.},
file_url = {/fileadmin/user_upload/files/publications/MPIK-TR-183_[0].pdf},
state = {published},
author = {Macke JH{jakob}; Opper M; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Techreport{ 5191,
title = {How Much Can Orientation Selectivity and Contrast Gain Control Reduce the Redundancies in Natural Images},
year = {2008},
month = {3},
number = {169},
abstract = {The two most prominent features of early visual
processing are orientation selective filtering and contrast gain
control. While the effect of orientation selectivity can be assessed
within in a linear model, contrast gain control is an inherently
nonlinear computation. Here we employ the class of $L_p$
elliptically contoured distributions to investigate the extent to
which the two features, orientation selectivity and contrast gain
control, are suited to model the statistics of natural images.
Within this model we find that contrast gain control can play a
significant role for the removal of redundancies in natural images.
Orientation selectivity, in contrast, has only a very limited
potential for linear redundancy reduction.},
file_url = {/fileadmin/user_upload/files/publications/MPIK-TR-169_5191[0].pdf},
state = {published},
author = {Sinz FH{fabee}{Research Group Computational Vision and Neuroscience}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ DenfieldEBT2016,
title = {Correlated Variability in Population Activity: Noise or Signature of Internal Computations},
year = {2016},
month = {6},
pages = {63},
abstract = {Neuronal responses to repeated presentations of identical visual stimuli are variable. The source of this variability is unknown, but it is commonly treated as noise and seen as an obstacle to understanding neuronal activity. We argue that this variability is not noise but reflects, and is due to, computations internal to the brain. Internal signals such as cortical state or attention interact with sensory information processing in early sensory areas. However, little research has examined the effect of fluctuations in these signals on neuronal responses, leaving a number of uncontrolled parameters that may contribute to neuronal variability. One such variable is attention, which increases neuronal response gain in a spatial and feature selective manner. Both the strength of this modulation and the focus of attention are likely to vary from trial to trial, and we hypothesize that these fluctuations are a major source of neuronal response variability and covariability. We first examine a simple model of a gain-modulating signal acting on a population of neurons
and show that fluctuations in attention can increase individual and shared variability and generate a variety of correlation structures relevant to population coding, including limited range and differential correlations. To test our model’s predictions experimentally, we devised
a cued-spatial attention, change-detection task to induce varying degrees of fluctuation in the subject’s attentional signal by changing whether the subject must attend to one stimulus location while ignoring another, or attempt to attend to multiple locations simultaneously. We use
multi-electrode recordings with laminar probes in primary visual cortex of macaques performing this task.
We demonstrate that attention gain-modulates responses of V1 neurons in a manner consistent with results from higher-order areas. Consistent with our model’s predictions, our preliminary results indicate neuronal covariability is elevated in conditions in which attention fluctuates and that neurons are nearly independent when attention is focused. Overall, our results suggest
that attentional fluctuations are an important contributor to neuronal variability and open the door to the use of statistical methods for inferring the state of these signals on behaviorally relevant timescales.},
web_url = {http://areadne.org/2016/pezaris-hatsopoulos-2016-areadne.pdf},
event_name = {AREADNE 2016: Research in Encoding And Decoding of Neural Ensembles},
event_place = {Santorini, Greece},
state = {published},
author = {Denfield GH; Ecker AS{aecker}{Department Physiology of Cognitive Processes}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Tolias AS{atolias}{Department Physiology of Cognitive Processes}}
}
@Poster{ GatysEB2016,
title = {A Neural Algorithm of Artistic Style},
year = {2016},
month = {5},
day = {14},
pages = {85},
abstract = {In fine art, especially painting, humans have mastered the skill to create unique visual experiences by composing a complex interplay between the content and style of an image. The algorithmic basis of this process is unknown and there exists no artificial system with similar capabilities. Recently, a class of biologically inspired vision models called Deep Neural Networks have demonstrated near-human performance in complex visual tasks such as object and face recognition. Here we introduce an artificial system based on a Deep Neural Network that creates artistic images of high perceptual quality. The system can separate and recombine the content and style of arbitrary images, providing a neural algorithm for the creation of artistic images. In light of recent studies using fMRI and electrophysiology that have shown striking similarities between performance-optimised artificial neural networks and biological vision, our work offers a path towards an algorithmic understanding of how humans create and perceive artistic imagery. The algorithm introduces a novel class of stimuli that could be used to test specific computational hypotheses about the perceptual processing of artistic style.},
web_url = {http://www.humanbrainmapping.org/i4a/pages/index.cfm?pageID=3662},
event_name = {16th Annual Meeting of the Vision Sciences Society (VSS 2016)},
event_place = {St. Pete Beach, FL, USA},
state = {published},
author = {Gatys LA; Ecker AS{aecker}{Department Physiology of Cognitive Processes}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ KummererB2016,
title = {DeepGaze II: A big step towards explaining all information in image-based saliency},
year = {2016},
month = {5},
day = {14},
pages = {86},
abstract = {When free-viewing scenes, the first few fixations of human observers are driven in part by bottom-up attention. Over the last decade various models have been proposed to explain these fixations. We recently standardized model comparison using an information-theoretic framework and were able to show that these models captured not more than 1/3 of the explainable mutual information between image content and the fixation locations, which might be partially due to the limited data available (Kuemmerer et al, PNAS, in press). Subsequently, we have shown that this limitation can be tackled effectively by using a transfer learning strategy. Our model "DeepGaze I" uses a neural network (AlexNet) that was originally trained for object detection on the ImageNet dataset. It achieved a large improvement over the previous state of the art, explaining 56% of the explainable information (Kuemmerer et al, ICLR 2015).
A new generation of object recognition models have since been developed, substantially outperforming AlexNet. The success of "DeepGaze I" and similar models suggests that features that yield good object detection performance can be exploited for better saliency prediction, and that object detection and fixation prediction performances are correlated. Here we test this hypothesis. Our new model "DeepGaze II" uses the VGG network to convert an image into a high dimensional representation, which is then fed through a second, smaller network to yield a density prediction. The second network is pre-trained using maximum-likelihood on the SALICON dataset and fine-tuned on the MIT1003 dataset. Remarkably, DeepGaze II explains 88% of the explainable information on held out data, and has since achieved top performance on the MIT Saliency Benchmark. The problem of predicting where people look under free-viewing conditions could be solved very soon. That fixation prediction performance is closely tied to object detection informs theories of attentional selection in scene viewing.},
web_url = {http://www.humanbrainmapping.org/i4a/pages/index.cfm?pageID=3662},
event_name = {16th Annual Meeting of the Vision Sciences Society (VSS 2016)},
event_place = {St. Pete Beach, FL, USA},
state = {published},
author = {K\"ummerer M{mkuemmerer}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ WallisEGFWB2016,
title = {Seeking summary statistics that match peripheral visual appearance using naturalistic textures generated by Deep Neural Networks},
year = {2016},
month = {5},
day = {14},
pages = {63-64},
abstract = {An important hypothesis that emerged from crowding research is that the perception of image structure in the periphery is texture-like. We investigate this hypothesis by measuring perceptual properties of a family of naturalistic textures generated using Deep Neural Networks (DNNs), a class of algorithms that can identify objects in images with near-human performance. DNNs function by stacking repeated convolutional operations in a layered feedforward hierarchy. Our group has recently shown how to generate shift-invariant textures that reproduce the statistical structure of natural images increasingly well, by matching the DNN representation at an increasing number of layers. Here, observers discriminated original photographic images from DNN-synthesised images in a spatial oddity paradigm. In this paradigm, low psychophysical performance means that the model is good at matching the appearance of the original scenes. For photographs of natural textures (a subset of the MIT VisTex dataset), discrimination performance decreased as the DNN representations were matched to higher convolutional layers. For photographs of natural scenes (containing inhomogeneous structure), discrimination performance was nearly perfect until the highest layers were matched, whereby performance declined (but never to chance). Performance was only weakly related to retinal eccentricity (from 1.5 to 10 degrees) and strongly depended on individual source images (some images were always hard, others always easy). Surprisingly, performance showed little relationship to size: within a layer-matching condition, images further from the fovea were somewhat harder to discriminate but this result was invariant to a three-fold change in image size (changed via up/down sampling). The DNN stimuli we examine here can match texture appearance but are not yet sufficient to match the peripheral appearance of inhomogeneous scenes. In the future, we can leverage the flexibility of DNN texture synthesis for testing different sets of summary statistics to further refine what information can be discarded without affecting appearance.},
web_url = {http://www.humanbrainmapping.org/i4a/pages/index.cfm?pageID=3662},
event_name = {16th Annual Meeting of the Vision Sciences Society (VSS 2016)},
event_place = {St. Pete Beach, FL, USA},
state = {published},
author = {Wallis TSA; Ecker AS{aecker}{Department Physiology of Cognitive Processes}; Gatys LA; Funke CM; Wichmann FA{felix}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ BethgeTBFRRBET2016,
title = {Supervised learning sets benchmark for robust spike rate inference from calcium imaging signals},
year = {2016},
month = {2},
day = {26},
pages = {163},
abstract = {A fundamental challenge in calcium imaging has been to infer spike rates of neurons from the measured noisy
calcium fluorescence traces. We collected a large benchmark dataset (>100.000 spikes, 73 neurons) recorded from varying neural tissue (V1 and retina) using different calcium indicators (OGB-1 and GCaMP6s). We introduce a new algorithm based on supervised learning in flexible probabilistic models and systematically compare it against a range of spike inference algorithms published previously. We show that our new supervised algorithm outperforms all previously published techniques. Importantly, it even performs better than other algorithms when applied to entirely new datasets for which no simultaneously recorded data is available. Future data acquired in new experimental conditions can easily be used to further improve its spike prediction accuracy and generalization performance. Finally, we show that comparing algorithms on artificial data is not informative about performance on real data, suggesting that benchmark datasets such as the one we provide may greatly facilitate future algorithmic
developments.},
web_url = {http://www.cosyne.org/c/index.php?title=Cosyne_16},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2016)},
event_place = {Salt Lake City, UT, USA},
state = {published},
author = {Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Theis L{lucas}{Research Group Computational Vision and Neuroscience}; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Froudarakis E; Reimer J; Roman-Roson M; Baden T; Euler T; Tolias A{atolias}}
}
@Poster{ NonnenmacherBBBM2015_2,
title = {Correlations and signatures of criticality in neural population models},
year = {2015},
month = {10},
day = {20},
volume = {45},
number = {543.23},
web_url = {http://www.sfn.org/am2015/},
event_name = {45th Annual Meeting of the Society for Neuroscience (Neuroscience 2015)},
event_place = {Chicago, IL, USA},
state = {published},
author = {Nonnenmacher M{mnonnenmacher}; Behrens C; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Macke JH{jakob}}
}
@Poster{ CottonEFBBST2015,
title = {Scaling of information in large sensory neuronal populations},
year = {2015},
month = {10},
day = {19},
volume = {45},
number = {331.01},
abstract = {Individual neurons are noisy. Therefore, it seems necessary to pool the activity of many neurons to obtain an accurate representation of the environment. However, it is widely believed that shared noise in the activity of nearby neurons renders such pooling ineffective, limiting the accuracy of the population code and, ultimately, behavior. However, these predictions are based on extrapolating models fit to small numbers of neurons and have not been tested experimentally. Using a novel high-speed 3D-microscope we densely recorded from hundreds of neurons in the mouse visual cortex and measured the amount of information encoded. We find that the information in this sensory population increases approximately linearly with population size and does not saturate, even for several hundred neurons. This information growth is facilitated by a correlation structure that is not aligned with the tuning, making it less harmful than would be predicted from pairwise measurements. Accordingly, a decoder that accounts for the correlation structure outperforms one that does not. Our findings suggest that sensory representations may be more accurate than previously thought and therefore that psychophysical limitations may arise from downstream neural processes rather than limitations in the sensory encoding.},
web_url = {http://www.sfn.org/am2015/},
event_name = {45th Annual Meeting of the Society for Neuroscience (Neuroscience 2015)},
event_place = {Chicago, IL, USA},
state = {published},
author = {Cotton JR; Ecker AS{aecker}{Department Physiology of Cognitive Processes}; Froudarakis E; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Saggau P; Tolias AS{atolias}{Department Physiology of Cognitive Processes}}
}
@Poster{ EckerDTB2015,
title = {On the structure of population activity under fluctuations in
attentional state},
year = {2015},
month = {9},
day = {16},
pages = {185},
abstract = {Attention is commonly thought to improve behavioral performance by increasing response gain and suppressing shared variability in neuronal populations. However, both the focus and the strength of attention are likely to vary from one experimental trial to the next, thereby inducing response variability unknown to the experimenter. Here we
study analytically how fluctuations in attentional state affect the structure of population responses in a simple model of spatial and feature attention. In our model, attention acts on the neural response exclusively by modulating each neuron’s gain. Neurons are conditionally independent given the stimulus and the attentional gain, and correlated activity arises only from trial-to-trial fluctuations of the attentional state, which are unknown to the experimenter. We find that this simple model can readily explain many aspects of neural response modulation under attention, such as increased response gain, reduced individual and shared variability, increased correlations with firing rates, limited range correlations, and differential correlations. We therefore suggest that attention may act primarily by increasing response gain of individual neurons without affecting their correlation structure. The experimentally observed reduction in correlations may instead result from reduced variability of the attentional gain when a stimulus is attended. Moreover, we show that attentional gain fluctuations – even if unknown to a downstream readout – do not impair the readout accuracy despite inducing limited-range correlations.},
web_url = {http://www.nncn.de/de/bernstein-conference/2015/program},
event_name = {Bernstein Conference 2015},
event_place = {Heidelberg, Germany},
state = {published},
DOI = {10.12751/nncn.bc2015.0179},
author = {Ecker AS{aecker}{Department Physiology of Cognitive Processes}; Denfield GH; Tolias AS{atolias}{Department Physiology of Cognitive Processes}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ GatysEB2015_2,
title = {Texture synthesis and the controlled generation of natural
stimuli using convolutional neural networks},
year = {2015},
month = {9},
day = {16},
pages = {219},
abstract = {It is a long standing question how biological systems transform visual inputs to robustly infer high level visual information. Research in the last decades has established that much of the underlying computations take place in a hierarchical fashion along the ventral visual pathway. However, the exact processing stages along this hierarchy are difficult to characterise. Here we present a method to generate stimuli that will allow a principled description of the processing stages along the ventral stream. We introduce a new parametric texture model based on the powerful feature spaces of convolutional neural networks optimised for object recognition. We show that constraining spatial summary statistic on feature maps suffices to synthesise high quality natural textures. Moreover we establish that our texture representations continuously disentangle high level visual information and demonstrate that the hierarchical parameterisation of the texture model naturally enables us to generate novel types of stimuli for systematically probing mid-level vision.},
web_url = {http://www.nncn.de/de/bernstein-conference/2015/program},
event_name = {Bernstein Conference 2015},
event_place = {Heidelberg, Germany},
state = {published},
DOI = {10.12751/nncn.bc2015.0220},
author = {Gatys LA; Ecker AS{aecker}{Department Physiology of Cognitive Processes}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ KummererTB2014,
title = {Deep Gaze I: Boosting Saliency Prediction with Feature Maps Trained on ImageNet},
year = {2015},
month = {5},
day = {8},
number = {8},
abstract = {Recent results suggest that state-of-the-art saliency models perform far from optimal in predicting fixations. This lack in performance has been attributed to an inability to model the influence of high-level image features such as objects. Recent seminal advances in applying deep neural networks to tasks like object recognition suggests that they are able to capture this kind of structure. However, the enormous amount of training data necessary to train these networks makes them difficult to apply directly to saliency prediction.
We present a novel way of reusing existing neural networks that have been pretrained on the task of object recognition in models of fixation prediction. Using the well-known network of Krizhevsky et al., 2012, we come up with a new saliency model that significantly outperforms all state-of-the-art models on the MIT Saliency Benchmark. We show that the structure of this network allows new insights in the psychophysics of fixation selection and potentially their neural implementation. To train our network, we build on recent work on the modeling of saliency as point processes.},
web_url = {http://arxiv.org/abs/1411.1045},
web_url2 = {http://www.iclr.cc/doku.php?id=iclr2015:main#keynote_talks},
event_name = {International Conference on Learning Representations (ICLR 2015)},
event_place = {San Diego, CA, USA},
state = {submitted},
author = {K\"ummerer M{mkuemmerer}{Research Group Computational Vision and Neuroscience}; Theis L{lucas}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ BadenFPRKBBSE2015,
title = {Following the visual signal across the entire mouse retina: From cone calcium to ganglion cell spikes},
year = {2015},
month = {3},
day = {19},
pages = {688},
abstract = {The vertebrate retina, with its exquisitely regular organisation and its planar, near transparent structure
offers a powerful playground for the detailed exploration of principles in sensory information processing in general. Moreover, detailed knowledge the anatomy of the mouse retina is paralleled by few other model systems in neuroscience today. Here, we present a systematic approach to add a physiological dimension to this description, by optically imaging light-evoked activity to visual stimuli that systematically survey key transformations during retinal signal decomposition, including contrast and frequency response functions and responses to Gaussian noise. By following such ”elementary visual responses” at
key sites within the retinal circuitry, including a clear link to anatomy, that we believe will prove instrumental in exploring a computational description of retinal signal decomposition as whole. We recorded from synapses, dendrites and somata of all excitatory neurons of the mouse cone-pathway. In addition, we recorded from a subset of inhibitory neurons. In the outer retina, we imaged (i) calcium responses from S- and M-cone photoreceptor pedicles in retinal slice of the HR2.1:TN-XL mouse line (Wei et al. 2012, Baden, Schubert et al. 2013). In addition, we monitored (ii) calcium responses in both somata and individual varicosities of horizontal cells using GCaMP3 and GCaMP6 expressed in the Cx57cre/+ line (Ströh et al., 2013) using cross-breeding and AAV, respectively. In the inner retina, we recorded (iii) calcium responses in individual presynaptic terminals of bipolar cells (Baden et al. 2013) and (iv) dendritic tips of retinal ganglion cells labelled with the synthetic calcium indicator OGB-1 or
GCaMP6 introduced using AAV. We also surveyed (v) glutamate release based on iGluSnFR responses (Marvin et al., 2013, Borghuis et al., 2013), expressed either ubiquitously or in specific Cre lines (PV:Cre, Feng et al. 2000; Farrow et al. 2013; Pcp2:Cre, Lewis et al. 2004; Ivanova et al., 2013; ChAT:Cre, Lowell et al., 2006). We also recorded calcium responses in the somata of (vi) all RGCs and (vii) displaced
amacrine cells in the ganglion cell layer after electroporation with OGB-1 (Briggmann and Euler 2011).
These recordings were complemented with (viii) single-unit spike recordings and subsequent intracellularfillings, as well as the use of reporter lines PV:Ai9tdTomato, PcP2:Ai9tdTomato or subsequent immunohistochemistry (GAD67, ChAT) to aid genetic/anatomical classification. Reference to this database will benefit the development of computational models aiming to describe retinal function. In addition, it will form the foundation for a more systematic approach towards understanding the changes in processing during degeneration.},
web_url = {https://www.nwg-goettingen.de/2015/default.asp?id=15},
event_name = {11th Göttingen Meeting of the German Neuroscience Society, 35th Göttingen Neurobiology Conference},
event_place = {Göttingen, Germany},
state = {published},
author = {Baden T; Franke K; Pop S; Roson MR; Kemmler R; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Schubert T; Euler T}
}
@Poster{ NonnenmacherBPBM2015,
title = {Correlations and signatures of criticality in neural population models},
year = {2015},
month = {3},
day = {7},
pages = {207-208},
abstract = {Large-scale recording methods make it possible to measure the statistics of neural population activity, and thereby
to gain insights into the principles that govern the collective activity of neural ensembles. One hypothesis that has emerged from this approach is that neural populations are poised at a ‘thermo-dynamic critical point’, and that this has important functional consequences (Tkacik et al 2014). Support for this hypothesis has come from studies that computed the specific heat, a measure of global population statistics, for groups of neurons subsampled from population recordings. These studies have found two effects which—in physical systems—indicate a critical point: First, specific heat diverges with population size N. Second, when manipulating population statistics by introducing a ’temperature’ in analogy to statistical mechanics, the maximum heat moves towards unit-temperature for large populations. What mechanisms can explain these observations? We show that both effects arise in a simple simulation of retinal population activity. They robustly appear across a range of parameters including biologically implausible ones, and can be understood analytically in simple models. The specific heat grows with N whenever the (average) correlation is independent of N, which is always true when uniformly subsampling a large, correlated population. For weakly correlated populations, the rate of divergence of the specific heat is proportional to the correlation strength. Thus, if retinal population codes were optimized to maximize specific heat, then this would predict that they seek to increase correlations. This is incongruent with theories of efficient coding that make
the opposite prediction. We find criticality in a simple and parsimonious model of retinal processing, and without
the need for fine-tuning or adaptation. This suggests that signatures of criticality might not require an optimized
coding strategy, but rather arise as consequence of sub-sampling a stimulus-driven neural population (Aitchison
et al 2014).},
web_url = {http://www.cosyne.org/c/index.php?title=Cosyne2015_Program},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2015)},
event_place = {Salt Lake City, UT, USA},
state = {published},
author = {Nonnenmacher M{mnonnenmacher}; Behrens C; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Macke J{jakob}}
}
@Poster{ BoettcherB2014,
title = {Information theoretic analysis of neural populations},
year = {2014},
month = {9},
day = {3},
pages = {29-30},
abstract = {The information of the stimulus variable S in a population of n observed neurons R0…Rn can be measured using the mutual information I(S:R0…Rn). In order to gain a deeper understanding about the stimulus encoding in the neural population the question arises how to decompose the mutual information. A decomposition may reveal sets of neurons that share the same information about the stimulus, or sets of neurons that encode information synergistically or a single neurons that may encode information about the stimulus that is not present in any of the other observed neurons.
There are several properties that seem obviously necessary to hold for a mutual information decomposition. But these properties alone do not provide enough constraints to result in a unique solution and it is not clear how to resolve for this ambiguity. Several approaches have been suggested recently ([Williams2010], [Harder2013], [Griffith2014]) but we find that all of them suffer from certain caveats.
Here, we introduce a new approach how to decompose the mutual information of two different neural populations with a stimulus into independent, unique, and synergistic components. We demonstrate its strength compared to previously proposed decompositions and present several examples that corroborate its usefulness. In particular, we believe the decomposition can serve as a useful relaxation to the problem of causality estimation.},
web_url = {http://abstracts.g-node.org/abstracts/b7738738-876e-44da-ac2e-6524d99e1d32},
event_name = {Bernstein Conference 2014},
event_place = {Göttingen, Germany},
state = {published},
DOI = {10.12751/nncn.bc2014.0025},
author = {Boettcher A; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ FroudarakisBECSYSBT2014_2,
title = {Population Code in Mouse V1 Facilities Read-out of Natural Scenes through Increased Sparseness},
year = {2014},
month = {6},
pages = {69},
abstract = {The neural code is believed to have adapted to the statistical properties of the natural environment.
However, the principles that govern the organization of ensemble activity in the visual cortex during natural visual input are unknown. We recorded populations of up to 500 neurons in the mouse primary visual cortex and characterized the structure of their activity, comparing
responses to natural movies with those to control stimuli. We found that higher-order correlations in natural scenes induce a sparser code, in which information is encoded by reliable activation of a smaller set of neurons and can be read-out more easily. This computationally advantageous encoding for natural scenes was state-dependent and apparent only in anesthetized and active, awake animals, but not during quiet wakefulness. Our results argue for a
functional benefit of sparsification that could be a general principle governing the structure of the population activity throughout cortical microcircuits.},
web_url = {http://areadne.org/2014/home.html},
event_name = {AREADNE 2014: Research in Encoding and Decoding of Neural Ensembles},
event_place = {Santorini, Greece},
state = {published},
author = {Froudarakis A; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Ecker AS{aecker}{Department Physiology of Cognitive Processes}; Cotton RJ; Sinz FH{fabee}{Research Group Computational Vision and Neuroscience}{Research Group Computational Vision and Neuroscience}; Yatsenko D; Saggau P; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Tolias AS{atolias}{Department Physiology of Cognitive Processes}}
}
@Poster{ EckerBCSDCSBT2014_2,
title = {State Dependence of Noise Correlation in Macaque Primary Visual Cortex},
year = {2014},
month = {6},
pages = {64},
abstract = {Shared, trial-to-trial variability in neuronal populations has a strong impact on the accuracy of information processing in the brain. Estimates of the level of such noise correlations are diverse, ranging from 0.01 to 0.4, with little consensus on which factors account for these
differences. Here we addressed one important factor that varied across studies, asking how anesthesia affects the population activity structure in macaque primary visual cortex. We found that under opioid anesthesia, activity was dominated by strong coordinated fluctuations on a timescale of 1–2 Hz, which were mostly absent in awake, fixating monkeys. Accounting for these global fluctuations markedly reduced correlations under anesthesia, matching those
observed during wakefulness and reconciling earlier studies conducted under anesthesia and in awake animals. Our results show that internal signals, such as brain state transitions under anesthesia, can induce noise correlations, but can also be estimated and accounted for based on neuronal population activity.},
web_url = {http://areadne.org/2014/home.html},
event_name = {AREADNE 2014: Research in Encoding and Decoding of Neural Ensembles},
event_place = {Santorini, Greece},
state = {published},
author = {Ecker AS{aecker}{Department Physiology of Cognitive Processes}; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Cotton RJ; Subramaniyan M; Denfield GH; Cadwell CR; Smirnakis SM{ssmirnakis}{Department Physiology of Cognitive Processes}{Department Physiology of Cognitive Processes}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Tolias AS{atolias}{Department Physiology of Cognitive Processes}}
}
@Poster{ BerensBFRBE2014,
title = {What the Mouse Eye Tells the Mouse Brain: A Semi-Supervised Clustering Approach for Fingerprinting the Retinal Ganglion Cell Types of the Mouse Retina},
year = {2014},
month = {6},
pages = {53},
abstract = {In the retina, the stream of incoming visual information is split into multiple parallel channels, formed by different kinds of photoreceptors (PRs), bipolar cells (BCs) and ganglion cells (RGCs). These cells form complex circuits with additional interneurons tuning the channels to distinct
sets of visual features. The RGCs relay the output of each channel to the brain—understanding how the visual scenery is encoded by the outputs of the approximately 20 RGC types will thus yield a complete picture of the representation of the visual scene available to the brain.
To identify a functional fingerprint for each RGC type in the mouse retina, we use 2P imaging to measure Ca++ activity in RGCs evoked by a set of stimuli, including frequency/contrast modulated full-field and white noise stimuli. So far our database contains recordings of over
10,000 cells from the RGC layer. In addition, we obtained recordings from transgenic PV1 mice, in which 8 morphologically distinct RGC types are fluorescently labeled and can be identified based on their anatomy. Moreover, we performed single-cell recordings from a few dozen RGCs to relate their spiking responses to the somatic calcium signals and to compare their morphologies with published RGC catalogues.
We implemented a probabilistic clustering framework for separating RGCs into functional types based on features extracted from their responses to the different visual stimuli using PCA. We used a semi-supervised mixture of Gaussians Clustering algorithm, which allowed us to
incorporate the uncertain label information provided by the recordings from the PV1 mice into the clustering. For our data, we obtain 25–29 functional clusters, which separate into 17–21 RGC clusters and 8 displaced amacrine cell (dAC) clusters, as verified using glutamatedecarboxylase (GAD) immunostaining. These numbers match well the number of RGC and dAC types expected in mouse retina. The RGC types include many known cell types (off and on alpha, W3, on-off direction-selective), as verified using our single cell data (e.g., alpha RGCs) and additional information available (e.g., soma size/shape and retinal tiling). In addition, they include new functional RGC types, such as a contrast-suppressed type, not readily matched to
previously described ones. Our results suggest that a functional fingerprint for each RGC in the mouse retina is within reach.},
web_url = {http://areadne.org/2014/home.html},
event_name = {AREADNE 2014: Research in Encoding and Decoding of Neural Ensembles},
event_place = {Santorini, Greece},
state = {published},
author = {Berens P{berens}{Research Group Computational Vision and Neuroscience}; Baden T; Franke K; Rezac M; Bethge M; Euler T{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ GatysETB2013,
title = {Information Coding in the Variance of Neural Activity},
year = {2013},
month = {9},
pages = {44},
abstract = {Neural activity in the cortex appears to be notoriously noisy. A widely accepted explanation for this finding is that excitatory and inhibitory inputs to downstream neurons are balanced in a way that the upstream population activity does not affect the mean but only the variance of the input current. This can be thought of as a multiplicative noise channel. However, the capacity limits imposed by this information channel are not known. Here we develop a general understanding of the encoding process in terms of scale mixture processes and derive information-theoretic bounds on their performance. Our results show that signal transmission via instantaneous changes in the variance can behave quite differently from the common additive noise channel. We perform systematic numerical analyses to maximize the information across the variance channel and thus obtain tight lower bounds to its capacity. Furthermore, we found that additional noise, resembling the unreliable synaptic transmission of spikes, can surprisingly enhance the coding performance of the channel.},
web_url = {https://portal.g-node.org/abstracts/bc13/#/doi/nncn.bc2013.0020},
event_name = {Bernstein Conference 2013},
event_place = {Tübingen, Germany},
state = {published},
DOI = {10.12751/nncn.bc2013.0020},
author = {Gatys L; Ecker A{aecker}; Tchumatchenko T; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ FarzamiTB2013,
title = {Neural Adaptation as Bayesian Inference},
year = {2013},
month = {9},
pages = {44},
abstract = {Capturing stimulus-response relationships is one of the key problems in sensory neuroscience. Due to the stochasticity inherent in neural responses, probabilistic models provide a natural framework for approaching this problem. Generalized linear models (GLMs) are a family of probabilistic models frequently used for characterizing neural spike responses. Popular special cases include the linear nonlinear Poisson model (LNP) and, history dependent LNP models. We applied both types of models to data recorded from whisker-sensitive neurons in the right trigeminal ganglion cells of adult Sprague-Dawley rats stimulated with white noise. We found that the LNP model falls short of explaining the experimental data. Since most of these types of cells are highly adaptive, a likely explanation of the observed shortcoming of LNP models is their inability to represent adaptation effects. Here, we explore the idea that adaptation can be understood as a form of Bayesian inference. We use a dynamical latent variable model to infer parameters of the stimulus. Using the inferred parameters, we adjusted the history dependent LNP models. This not only allows us to improve the spike prediction performance of these models, but also to study the assumptions about the stimulus encoded in the cells, as well as their rate of adaptation.},
web_url = {https://portal.g-node.org/abstracts/bc13/#/doi/nncn.bc2013.0019},
event_name = {Bernstein Conference 2013},
event_place = {Tübingen, Germany},
state = {published},
DOI = {10.12751/nncn.bc2013.0019},
author = {Farzami T; Theis L; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ BadenBFRB2013,
title = {What the mouse eye tells the mouse brain: Recording the entire visual representation along the vertical pathway in the retina},
year = {2013},
month = {9},
number = {W61},
abstract = {Right at the first synapse, the stream of incoming visual information is split into multiple parallel channels, represented in the retina by different kinds of photo¬receptors (PRs), bipolar cells (BCs) and ganglion cells (RGCs). Complex circuits and, in particular, synaptic interactions in the retina’s two synaptic layers tune these channels to distinct sets of visual features. Cracking the “retinal code”, that is understanding how the visual scenery is encoded by the outputs of the ~20 RGC types, is a major aim of vision research. Here, we study the signal at different processing stages of the retinal signal channels by recording from the majority of cells in the vertical cone photoreceptor pathway, including PR, BC[1] and RGC types[2]. We use 2P imaging in the mouse retina to measure Ca2+ activity evoked by a comprehensive set of stimuli, including frequency/contrast modulated full-field and white noise stimuli. So far our database contains recordings of ~100 BCs and >7,000 RGCs. In addition, we started with electrical single-cell RGC measurements, which provide us with ground truth data about spiking activity underlying Ca2+ signals and anatomical descriptions that can be compared with published RGC catalogues. We have implemented a probabilistic framework for clustering RGCs into functional types based on their responses to different visual stimuli. Clustering is refined and verified by employing reference data (e.g. soma size/shape and retinal tiling). A similar approach allowed us to cluster BC responses into 8 morpho-functional clusters[1]. For RGCs (and displaced amacrine cells), ~25-29 functional clusters can be distinguished, some of which were already verified using our single cell data (e.g. alpha RGCs). Our results suggest that this dataset allows us to study the computations performed along the retina’s vertical pathway and to obtain a complete sample of the information the mouse eye sends to the mouse brain.},
web_url = {https://portal.g-node.org/abstracts/bc13/#/doi/nncn.bc2013.0064},
event_name = {Bernstein Conference 2013},
event_place = {Tübingen, Germany},
state = {published},
DOI = {10.12751/nncn.bc2013.0064},
author = {Baden T; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Franke K; Rezac M; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ BethgeLDT2013,
title = {A generative model of natural images as patchworks of textures},
year = {2013},
month = {3},
number = {I-9},
web_url = {http://www.cosyne.org/c/index.php?title=Cosyne_13},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2013)},
event_place = {Salt Lake City, UT, USA},
state = {published},
author = {Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Luedtke N; Das D{ddas}{Research Group Computational Vision and Neuroscience}; Theis L{lucas}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ TheisAMSB2013,
title = {Beyond GLMs: a generative mixture modeling approach to neural system identification},
year = {2013},
month = {3},
number = {II-92},
web_url = {http://www.cosyne.org/c/index.php?title=Cosyne_13},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2013)},
event_place = {Salt Lake City, UT, USA},
state = {published},
author = {Theis L{lucas}{Research Group Computational Vision and Neuroscience}; Arnstein D{darnstein}{Research Group Computational Vision and Neuroscience}; Maia Chagas A; Schwarz C; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ FroudarakisBCESBT2013,
title = {Encoding of natural scene statistics in the primary visual cortex of the mouse},
year = {2013},
month = {3},
number = {II-76},
web_url = {http://www.cosyne.org/c/index.php?title=Cosyne_13},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2013)},
event_place = {Salt Lake City, UT, USA},
state = {published},
author = {Froudarakis E; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Cotton JR; Ecker AS{aecker}; Saggau P; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Tolias A{atolias}}
}
@Poster{ BerensBBE2013,
title = {Recording the entire visual representation along the vertical pathway in the mammalian retina},
year = {2013},
month = {3},
number = {II-77},
web_url = {http://www.cosyne.org/c/index.php?title=Cosyne_13},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2013)},
event_place = {Salt Lake City, UT, USA},
state = {published},
author = {Berens P{berens}{Research Group Computational Vision and Neuroscience}; Baden T; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Euler T}
}
@Poster{ TheisACSB2012,
title = {Beyond GLMs: a generative mixture modeling approach to
neural system identification},
journal = {Frontiers in Computational Neuroscience},
year = {2012},
month = {9},
day = {14},
volume = {Conference Abstract: Bernstein Conference 2012},
pages = {165},
abstract = {One of the principle goals of sensory systems neuroscience is to characterize the relationship between external stimuli and neuronal responses. A popular choice for modeling the responses of neurons is the generalized linear model (GLM). However, due to its inherent linearity, choosing a set of nonlinear features is often crucial but can be difficult in practice if the stimulus dimensionality is high or if the stimulus-response dependencies are complex. Here, we derive a more flexible neuron model which is able to automatically extract highly nonlinear stimulus-response relationships from the data. We start out by representing intuitive and well understood distributions such as the spike-triggered and inter-spike interval
distributions using nonparametric models. For instance, we use mixtures of Gaussians to represent spike-triggered distributions which allows for complex stimulus dependencies such as those of cells with multiple preferred stimuli. A simple application of Bayes’ rule allows us to
turn these distributions into a model of the neuron’s response, which we dub spike-triggered mixture model (STM).
We demonstrate the superior representational power of the STM by fitting it to data generated by a trained GLM and vice versa. While the STM is able to reproduce the behavior of the GLM, the opposite is not the case. We also apply our model to single-cell recordings of primary afferents of the rat’s whisker system and find quantitatively and qualitatively that it is able to better reproduce the cells’ behavior than the GLM. In particular, we obtain much higher estimates of the cells’ mutual information rates.},
web_url = {http://www.frontiersin.org/10.3389/conf.fncom.2012.55.00080/event_abstract},
event_name = {Bernstein Conference 2012},
event_place = {München, Germany},
state = {published},
DOI = {10.3389/conf.fncom.2012.55.00080},
author = {Theis LM{lucas}{Research Group Computational Vision and Neuroscience}; Arnstein D{darnstein}{Research Group Computational Vision and Neuroscience}; Chagas AM; Schwarz C; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ GerhardWB2012_2,
title = {How Sensitive Is the Human Visual System to the Local Statistics of Natural Images?},
journal = {Frontiers in Computational Neuroscience},
year = {2012},
month = {9},
day = {14},
volume = {Conference Abstract: Bernstein Conference 2012},
pages = {175},
abstract = {A key hypothesis in sensory system neuroscience is that sensory representations are adapted to the statistical regularities in sensory signals and thereby incorporate knowledge about the outside world. Supporting this hypothesis, several probabilistic models of local natural image regularities have been proposed that reproduce neural response properties. Although many such physiological links have been made, these models have not been linked directly to visual sensitivity. Previous psychophysical studies focus on global perception of large images, so little is known about sensitivity to local regularities. We present a new paradigm for controlled psychophysical studies of local natural image regularities and use it to compare how well such models capture perceptually relevant image content. To produce image stimuli with precise statistics, we start with a set of patches cut from natural images and alter their content to generate a matched set of patches whose statistics are equally likely under a model’s assumptions. Observers have the task of discriminating natural patches from model patches in a forced choice experiment. The results show that human observers are remarkably sensitive to local correlations in natural images and that no current model is perfect for patches as small as 5 by 5 pixels or larger. Furthermore, discrimination performance was accurately predicted by model likelihood, an information theoretic measure of model efficacy, which altogether suggests that the visual system possesses a surprisingly large knowledge of natural image higher-order correlations, much more so than current image models. We also perform three cue identification experiments where we measure visual sensitivity to selected natural image features. The results reveal several prominent features of local natural image regularities including contrast fluctuations and shape statistics.},
web_url = {http://www.frontiersin.org/10.3389/conf.fncom.2012.55.00053/event_abstract},
event_name = {Bernstein Conference 2012},
event_place = {München, Germany},
state = {published},
DOI = {10.3389/conf.fncom.2012.55.00053},
author = {Gerhard HE{hgerhard}{Research Group Computational Vision and Neuroscience}; Wichmann FA{felix}{Department Empirical Inference}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ TheisHB2012,
title = {Mixtures of conditional Gaussian scale mixtures: the best
model for natural images},
journal = {Frontiers in Computational Neuroscience},
year = {2012},
month = {9},
day = {14},
volume = {Conference Abstract: Bernstein Conference 2012},
pages = {247},
abstract = {Modeling the statistics of natural images is a common problem in computer vision and computational neuroscience. In computational neuroscience, natural image models are used as a means to understand the input to the visual system as well as the visual system’s internal representations of the visual input.
Here we present a new probabilistic model for images of arbitrary size. Our model is a directed graphical model based on mixtures of Gaussian scale mixtures. Gaussian scale mixtures have been repeatedly shown to be suitable building blocks for capturing the statistics of natural images, but have not been applied in a directed modeling context. Perhaps surprisingly—given the much larger popularity of the undirected Markov random field approach—our directed model yields unprecedented performance when applied to natural images while also being easier to train, sample and evaluate.
Samples from the model look much more natural than samples of other models and capture many long-range higher-order correlations. When trained on dead leave images or textures, the model is able to reproduce many properties of these as well—showing the flexibility of our model. By extending the model to multiscale representations, it is able to reproduce even longer-range correlations.
An important measure to quantify the amount of correlations captured by a model is the average log-likelihood. We evaluate our model as well as several other patch-based and whole-image models and show that it yields the best performance reported to date when measured in bits per pixel. A problem closely related to image modeling is image compression. We show that our model can compete even with some of the best image compression algorithms.},
web_url = {http://www.frontiersin.org/10.3389/conf.fncom.2012.55.00079/event_abstract},
event_name = {Bernstein Conference 2012},
event_place = {München, Germany},
state = {published},
DOI = {10.3389/conf.fncom.2012.55.00079},
author = {Theis LM{lucas}{Research Group Computational Vision and Neuroscience}; Hosseini R{hosseini}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ SinzB2012,
title = {Temporal adaptation enhances efficient contrast gain control on natural images},
journal = {Frontiers in Computational Neuroscience},
year = {2012},
month = {9},
day = {13},
volume = {Conference Abstract: Bernstein Conference 2012},
pages = {67-68},
abstract = {The redundancy reduction hypothesis postulates that neural representations adapt to sensory input statistics such that their responses become as statistically independent as possible. Based on this hypothesis, many properties of early visual neurons-like orientation selectivity or divisive normalization-have been linked to natural image statistics. Divisive normalization, in particular, models a widely observed neural response property: The divisive inhibition of a single neuron by a pool of others. This mechanism has been shown to reduce the redundancy among neural responses to typical contrast dependencies in natural images. Using recent advances in natural image modeling, we show that the previously studied static model of divisive normalization achieves substantially less redundancy reduction than a theoretically optimal redundancy reduction mechanism called radial factorization. This optimal mechanism, however, is inconsistent with the existing neurophysiological observations. We suggest a new physiologically plausible modification of the standard model which accounts for the dynamics of the visual input by adapting to local contrasts during fixations. In this way the dynamic version of the standard model achieves almost optimal redundancy reduction performance. Our results imply that the dynamics of natural viewing conditions are critical for testing the role of divisive normalization for redundancy reduction.},
web_url = {http://www.frontiersin.org/10.3389/conf.fncom.2012.55.00048/event_abstract?sname=Bernstein_Conference_2012},
event_name = {Bernstein Conference 2012},
event_place = {München, Germany},
state = {published},
DOI = {10.3389/conf.fncom.2012.55.00048},
author = {Sinz F{fabee}; Bethge M{mbethge}}
}
@Poster{ EckerBTB2012_2,
title = {The correlation structure induced by fluctuations in attention},
year = {2012},
month = {6},
pages = {56},
abstract = {How attention shapes the structure of population activity has attracted substantial interest over the past decades. Attention has traditionally been associated with an increase in firing rates, reflecting a change in the gain of the population. More recent studies also report a
change in noise correlations, which is thought to reflect changes in functional connectivity.
However, since the degree of attention can vary substantially from trial to trial even within one experimental condition, the measured correlations could actually reflect fluctuations in the attention-related feedback signal (gain) rather than feed-forward noise, as often assumed.
To gain insights into this issue we analytically analyzed the standard model of spatial attention, where directing attention to the receptive field of a neuron increases its response gain. We assumed conditionally independent neurons (no noise correlations) and asked how uncontrolled
fluctuations in attention affect the correlation structure.
First, we found that this simple model of spatial attention explains the empirically measured correlation structure quite well. In addition to a positive average level of correlations, it predicts both an increase in correlations with firing rates, as observed in many studies, and a
decrease in correlations with the difference of two neurons’ tuning functions — a structure generally referred to as limited range correlations.
Second, we asked how fluctuations in attention would affect the accuracy of a population code, if treated as noise
by a downstream readout. Based on previous theoretical results, it would be expected that they negatively affect
readout accuracy because of the limited range correlations they induce. Surprisingly, we found that this is not
the case: correlations due to random gain fluctuations do not affect readout accuracy because their major axis is
orthogonal to changes in the stimulus orientation.
Our results can be readily generalized to include feature-based attention. The model has very few free parameters and can potentially account for a large fraction of the experimentally observed spike count (co-)variance.},
web_url = {http://areadne.org/2012/home.html},
event_name = {AREADNE 2012: Research in Encoding and Decoding of Neural Ensembles},
event_place = {Santorini, Greece},
state = {published},
author = {Ecker AS{aecker}{Department Physiology of Cognitive Processes}; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Tolias AS{atolias}{Department Physiology of Cognitive Processes}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ GerhardB2012,
title = {Perceptual relevance of neurally-inspired natural image models evaluated via contour discrimination},
year = {2012},
month = {2},
volume = {9},
pages = {200},
abstract = {statistical regularities in sensory signals and thus acquire knowledge about the outside world (Barlow, 1997). In
vision, several probabilistic models of local natural image regularities have been proposed which intriguingly
replicate neural response properties (Attick&Redlich 1992, Bell&Sejnowski 1997, Schwartz&Simoncelli 2001,
Karklin&Lewicki 2009). To evaluate how such models relate to functional vision, we previously measured their
perceptual relevance using a discrimination task pitting model image patches against true natural image patches
(Gerhard, Wichmann, Bethge, 2011). Observers were remarkably sensitive to the regularities of grayscale
patches, even for patches as small as 3x3 pixels. Performance relied greatly on how well the models captured
luminance features like contrast fluctuation. Here we focus on how well the models capture local contour information
in natural images. In a two-alternative forced choice task, observers viewed two tightly-tiled textures of
binary image patches, one comprised of natural image samples, the other of model patches. The task was to
select the natural image samples. We measured discrimination performance at patch sizes from 3x3 to 8x8 pixels for 8 models spanning the range from low likelihood to one among the current best in terms of likelihood. We
compared human performance to an ideal observer with perfect knowledge of the natural distribution for patch
sizes at which we could empirically estimate the distribution and tested potential texture cues with a classification analysis. While human performance suggested suboptimal strategies were used to discriminate contour statistics relative to grayscale statistics, observers were well above chance with binary 4x4 pixel patches and larger, meaning that neuronally-inspired models do not yet capture enough of the contour regularities in natural images that
functional human vision can detect, even in very small natural image patches.},
web_url = {http://www.cosyne.org/c/index.php?title=Cosyne_12},
event_name = {9th Annual Computational and Systems Neuroscience Meeting (Cosyne 2012)},
event_place = {Salt Lake City, UT, USA},
state = {published},
author = {Gerhard H{hgerhard}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ EckerBTB2012,
title = {The correlation structure induced by fluctuations in attention},
year = {2012},
month = {2},
volume = {9},
pages = {180},
abstract = {Attention has traditionally been associated with an increase in firing rates, reflecting a change in the gain of the
population. More recent studies also report a change in noise correlations, which is thought to reflect changes
in functional connectivity. However, since the degree of attention can vary substantially from trial to trial even
within one experimental condition, the measured correlations could actually reflect fluctuations in the attentionrelated feedback signal (gain) rather than feed-forward noise, as often assumed. To gain insights into this issue we analytically analyzed the standard model of spatial attention, where directing attention to the receptive field of a neuron increases its response gain. We assumed conditionally independent neurons (no noise correlations) and asked how uncontrolled fluctuations in attention affect the correlation structure. First, we found that this simple model of spatial attention explains the empirically measured correlation structure quite well. In addition to a positive average level of correlations, it predicts both an increase in correlations with firing rates, as observed in many studies, and a decrease in correlations with the difference of two neurons’ tuning functions—a structure generally referred to as limited range correlations. Second, we asked how fluctuations in attention would affect the accuracy of a population code, if treated as noise by a downstream readout. Based on previous theoretical results, it would be expected that they negatively affect readout accuracy because of the limited range correlations they induce. Surprisingly, we found that this is not the case: correlations due to random gain fluctuations do not affect readout accuracy because their major axis is orthogonal to changes in the stimulus orientation. Our results can be readily generalized to include feature-based attention. The model has very few free parameters and can potentially account for a large fraction of the observed spike count (co-)variance.},
web_url = {http://www.cosyne.org/c/index.php?title=Cosyne_12},
event_name = {9th Annual Computational and Systems Neuroscience Meeting (Cosyne 2012)},
event_place = {Salt Lake City, UT, USA},
state = {published},
author = {Ecker A{aecker}; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Tolias A{atolias}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ HaefnerGMB2011,
title = {Relationship between decoding strategy, choice
probabilities and neural correlations in perceptual decision-making task},
year = {2011},
month = {11},
volume = {41},
number = {17.09},
abstract = {When monkeys make a perceptual decision about ambiguous visual stimuli, individual sensory neurons in MT and other areas have been shown to covary with the decision. This observation suggests that the response variability in those very neurons causes the animal to choose one over the other option. However, the fact that sensory neurons are correlated has greatly complicated attempts to link those covariances (and the associated choice probabilities) to a direct involvement of any particular neuron in a decision-making task.
Here we report on an analytical treatment of choice probabilities in a population of correlated sensory neurons read out by a linear decoder. We present a closed-form solution that links choice probabilities, noise correlations and decoding weights for the case of fixed integration time. This allowed us to analytically prove and generalize a prior numerical finding about the choice probabilities being only due to the difference between the correlations within and between decision pools (Nienborg & Cumming 2010) and derive simplified expressions for a range of interesting cases. We investigated the implications for plausible correlation structures like pool-based and limited-range correlations.
We found that the relationship between choice probabilities and decoding weights is in general non-monotonic and highly sensitive to the underlying correlation structure. In fact, given empirical measures of the interneuronal correlations and CPs, our formulas allow to infer the individual neuronal decoding weights. We confirmed the feasibility of this approach using synthetic data. We then applied our analytical results to a published dataset of empirical noise correlations and choice probabilities (Cohen & Newsome 2008 and 2009) recorded during a classic motion discriminating task (Britten et al 1992). We found that the data are compatible with an optimal read-out scheme in which the responses of neurons with the correct direction preference are summed and those with perpendicular preference, but positively correlated noise, are subtracted. While the correlation data of Cohen & Newsome (being based on individual extracellular electrode recordings) do not give access to the full covariance structure of a neural population, our analytical formulas will make it possible to accurately infer individual read-out weights from simultaneous population recordings.},
web_url = {http://www.sfn.org/am2011/},
event_name = {41st Annual Meeting of the Society for Neuroscience (Neuroscience 2011)},
event_place = {Washington, DC, USA},
state = {published},
author = {Haefner RM{rhaefner}{Research Group Computational Vision and Neuroscience}; Gerwinn S{sgerwinn}{Research Group Computational Vision and Neuroscience}; Macke JH{jakob}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ TheisHB2011,
title = {A multiscale model of natural images},
year = {2011},
month = {10},
volume = {12},
pages = {43},
abstract = {We present a probabilistic model for natural images which is based on Gaussian scale mixtures
and a simple multiscale representation. In contrast to the dominant approach to modeling
whole images focusing on Markov random fields, we formulate our model in terms of a directed
graphical model. We show that it is able to generate images with interesting higher-order
correlations when trained on natural images or samples from an occlusion based model. More
importantly, the directed model enables us to perform a principled evaluation. While it is
easy to generate visually appealing images, we demonstrate that our model also yields the
best performance reported to date when evaluated with respect to the cross-entropy rate, a
measure tightly linked to the average log-likelihood.},
event_name = {12th Conference of Junior Neuroscientists of Tübingen (NeNA 2011)},
event_place = {Heiligkreuztal, Germany},
state = {published},
author = {Theis L{lucas}{Research Group Computational Vision and Neuroscience}; Hosseini R{hosseini}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ ArnsteinTCBS2011,
title = {LNP Analysis of Primary Whisker Afferents},
year = {2011},
month = {10},
volume = {12},
pages = {21},
abstract = {Little is known about what information is encoded by primary whisker afferents. Using extracellular single-unit recordings from the trigeminal ganglion during white noise stimulation of the innervated whisker, we attempted to characterize neurons’ response profiles using the
linear-nonlinear-Poisson (LNP) model.},
event_name = {12th Conference of Junior Neuroscientists of Tübingen (NeNA 2011)},
event_place = {Heiligkreuztal, Germany},
state = {published},
author = {Arnstein D{darnstein}{Research Group Computational Vision and Neuroscience}; Theis L{lucas}{Research Group Computational Vision and Neuroscience}; Chagas AM; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Schwarz C}
}
@Poster{ LiesHB2011,
title = {Slow Subspace Analysis: a New Algorithm for Invariance Learning},
year = {2011},
month = {10},
volume = {12},
pages = {34},
abstract = {The appearance of objects in an image can change dramatically depending on their pose,
distance, and illumination. Learning representations that are invariant against such appearance
changes can be viewed as an important preprocessing step which removes distracting
variance from a data set, so that downstream classifiers or regression estimators perform
better. Complex cells in primary visual cortex are commonly seen as building blocks for such
invariant image representations (e.g. Riesenhuber & Poggio 2000). While complex cells, like
simple cells, respond to edges of particular orientation they are less sensitive to the precise
location of the edge. A variety of neural algorithms have been proposed that aim at
explaining the response properties of complex cells as components of an invariant representation
that is optimized for the spatio-temporal statistics of the visual input. For certain
classes of transformations (e.g. translations, scalings, and rotations), it is possible to analytically
derive features that are invariant under these transformations, and the design of such
invariant features has been studied extensively in computer vision. The range of naturally
occurring transformations, however, is much more variable and not precisely known. Thus,
an analytical design of invariant features does not seem feasible. Instead one can seek to
find features that may not be perfectly invariant anymore but which on average change as
slowly as possible under the transformations occurring in the data (Földiák 1991). The best
known instantiation of this approach is slow feature analysis (SFA) which has been proposed
to underlie the formation of complex cell receptive fields (Berkes & Wiskott 2005). From a
machine learning perspective, SFA can be seen as a special case of oriented principal component
analysis that greedily searches for filters that maximize the signal-to-noise ratio if the
variations generated by the transformational changes are considered noise. For the learning of
complex cells the algorithm has been applied in the quadratic feature space. Here we present
a new algorithm called slow subspace analysis (SSA). SSA combines the slowness objective
of SFA with the energy model known from steerable filter theory such that it yields perfectly
invariant steerable filters in the ideal analytically tractable cases. There are two important
differences between SFA and SSA: First, while SSA uses the same slowness criterion as SFA
for each individual feature, it replaces the greedy search strategy by optimizing all filters
simultaneously for the best average slowness, and second, the optimization in SSA is done
only over the (n2 + n)/2 dimensional parameter space of orthogonal transforms on the original
n-dimensional signal space while for complex cell learning with SFA the optimization
is carried out over the entire quadratic feature space for which the number of parameters is
much larger, i.e. (n4+2n3−n2−2n)/8. These differences make SSA an interesting alternative
to SFA. In particular, the theoretical grounding of SSA in steerable filter theory is attractive
as it allows one to carry out meaningful model comparisons between different algorithms.
Accordingly, we show that our new algorithm exhibits larger slowness than SFA for various
important examples such as translations, rotations and scalings as well as natural movies.},
event_name = {12th Conference of Junior Neuroscientists of Tübingen (NeNA 2011)},
event_place = {Heiligkreuztal, Germany},
state = {published},
author = {Lies P; H\"afner RM{rhaefner}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ GerhardWWB2011,
title = {Perceptual Sensitivity to Statistical Regularities in Natural Images},
year = {2011},
month = {3},
pages = {745},
abstract = {A long standing hypothesis is that neural representations are adapted to environmental statistical regularities
(Attneave 1954, Barlow 1959), yet the relation between the primate visual system’s functional properties and the
statistical structure of natural images is still unknown. The central problem is that the high-dimensional space of
natural images is difficult to model. While many statistical models of small image patches that have been
suggested share certain neural response properties with the visual system (Atick 1990, Olshausen&Field 1996,
Schwarz&Simoncelli 2001), it is unclear how informative they are about the functional properties of visual
perception. Previously, we quantitatively evaluated how different models capture natural image statistics using
average log-loss (e.g. Eichhorn et al, 2009). Here we assess human sensitivity to natural image structure by
measuring how discriminable images synthesized by statistical models are from natural images. Our goal is to
improve the quantitative description of human sensitivity to natural image regularities and evaluate various
models’ relative efficacy in capturing perceptually relevant image structure.
Methods
We measured human perceptual thresholds to detect statistical deviations from natural images. The task was two
alternative forced choice with feedback. On a trial, two textures were presented side-by-side for 3 seconds: one a
tiling of image patches from the van Hateren photograph database, the other of model-synthesized images (Figure
1A). The task was to select the natural image texture.
We measured sensitivity at 3 patch sizes (3x3, 4x4, & 5x5 pixels) for 7 models. Five were natural image models: a
random filter model capturing only 2nd order pixel correlations (RND), the independent component analysis model
(ICA), a spherically symmetric model (L2S), the Lp-spherical model (LpS), and the mixture of elliptically
contoured distributions (MEC) with cluster number varied at 4 levels (k = 2, 4, 8, & 16). For MEC, we also used
patch size 8x8. We also tested perceptual sensitivity to independent phase scrambling in the Fourier basis (IPS)
and to global phase scrambling (GPS) which preserves all correlations between the phases and between the
amplitudes but destroys statistical dependences between phases and amplitudes. For each type, we presented 30
different textures to 15 naïve subjects (1020 trials/subject).
Results
Figure 1B shows performance by patch size for each model. Low values indicate better model performance as the
synthesized texture was harder to discriminate from natural. Surprisingly, subjects were significantly above chance
in all cases except at patch size 3x3 for MEC. This shows that human observers are highly sensitive to local
higher-order correlations as the models insufficiently reproduced natural image statistics for the visual system.
Further, the psychometric functions’ ordering parallels nicely the models’ average log-loss ordering, beautifully so
within MEC depending on cluster number, suggesting that the human visual system may have near perfect
knowledge of natural image statistical regularities and that average log-loss is a useful model comparison measure
in terms of perceptual relevance. Next, we will determine the features human observers use to discriminate the
textures’ naturalness which can help improve statistical modeling of perceptually relevant natural image structure.},
web_url = {https://www.nwg-goettingen.de/2011/default.asp?scientific_program},
event_name = {9th Göttingen Meeting of the German Neuroscience Society, 33rd Göttingen Neurobiology Conference},
event_place = {Göttingen, Germany},
state = {published},
author = {Gerhard HE{hgerhard}{Research Group Computational Vision and Neuroscience}; Wiecki T{wiecki}{Research Group Computational Vision and Neuroscience}; Wichmann F{felix}{Department Empirical Inference}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ BerensEGTB2011_2,
title = {Optimal Population Coding, Revisited},
year = {2011},
month = {2},
number = {III-67},
abstract = {Cortical circuits perform computations within few dozens of milliseconds with each neuron emitting only a few spikes. In this regime conclusions based on Fisher information, which is commonly used to assess the quality of population codes, are not always valid. Here we revisit the effect of tuning function width and correlation structure on neural population codes for angular variables using ideal observer analysis in both reconstruction and classification tasks employing Monte-Carlo simulations and analytical derivations. We show that the optimal tuning width of individual neurons and the optimal correlation structure of the population depend on the signal-to-noise ratio for both the reconstruction and the classification task. Strikingly, both ideal observers lead to very similar conclusions at low signal-to-noise ratio. In contrast, Fisher information favors severely suboptimal coding schemes in this regime. To further investigate the coding properties of Fisher-optimal codes, we compute the full neurometric functions of an ideal observer in the stimulus discrimination task, which allows us to evaluate population codes separately for fine and coarse discrimination. We find that codes with Fisher-optimal tuning width show strikingly bad performance for simple coarse discrimination tasks with a ëpedestal errorí, which is independent of population size. We show analytically that this is a necessary consequence of the fact that in such codes only few neurons are activated by each stimulus, irrespective of the population size. Further we show that the initial region of the neurometric function goes to zero with increasing population size. As a consequence, the overall error achieved by Fisher-optimal ensembles saturates for large populations. In summary, based on exact ideal observer analysis for both stimulus reconstruction and discrimination tasks we obtained (1) an accurate assessment of neural population codes at all signal-to-noise ratios and (2) analytical insights into the suboptimal behavior of Fisher-optimal population codes.},
web_url = {http://www.cosyne.org/c/index.php?title=Cosyne_11_posters3},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2011)},
event_place = {Salt Lake City, UT, USA},
state = {published},
author = {Berens P{berens}{Research Group Computational Vision and Neuroscience}; Ecker AS{aecker}{Department Physiology of Cognitive Processes}; Gerwinn S{sgerwinn}{Department Empirical Inference}{Research Group Computational Vision and Neuroscience}; Tolias AS{atolias}{Department Physiology of Cognitive Processes}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ MackeOB2011_2,
title = {The effect of common input on higher-order correlations and
entropy in neural populations},
year = {2011},
month = {2},
number = {III-68},
abstract = {Finding models for capturing the statistical structure of multi-neuron firing patterns is a major challenge in sensory neuroscience. Recently, Maximum Entropy (MaxEnt) models have become popular tools for studying neural population recordings [4, 3]. These studies have found that small populations in retinal, but not in local cortical circuits, are well described by models based on pairwise correlations. It has also been found that entropy in small populations grows sublinearly [4], that sparsity in the population code is related to correlations [3], and it has been conjectured that neural populations might be at a ícritical pointí. While there have been many empirical studies using MaxEnt models, there has arguably been a lack of analytical studies that might explain the diversity of their findings. In particular, theoretical models would be of great importance for investigating their implications for large populations. Here, we study these questions in a simple, tractable population model of neurons receiving Gaussian inputs [1, 2]. Although the Gaussian input has maximal entropy, the spiking-nonlinearities yield non-trivial higher-order correlations (íhocsí). We find that the magnitude of hocs is strongly modulated by pairwise correlations, in a manner which is consistent with neural recordings. In addition, we show that the entropy in this model grows sublinearly for small, but linearly for large populations. We characterize how the magnitude of hocs grows with population size. Finally, we find that the hocs in this model lead to a diverging specific heat, and therefore, that any such model appears to be at a critical point. We conclude that common input might provide a mechanistic explanation for a wide range of recent empirical observations. [1] SI Amari, H Nakahara, S Wu, Y Sakai. Neural Comput, 2003. [2] JH Macke, M Opper, M Bethge. ArXiv, 2010. [3] IE Ohiorhenuan, et. al Nature, 2010. [4] E Schneidman, MJ Berry, R Segev, W Bialek. Nature, 2006.},
web_url = {http://www.cosyne.org/c/index.php?title=Cosyne_11_posters3},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2011)},
event_place = {Salt Lake City, UT, USA},
state = {published},
author = {Macke JH{jakob}; Opper M; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ 7055,
title = {Decorrelated neuronal firing in cortical microcircuits},
year = {2010},
month = {11},
volume = {40},
number = {73.20},
abstract = {Correlated trial-to-trial variability in the activity of cortical neurons is thought to reflect the functional connectivity of the circuit. Many cortical areas are organized into functional columns, in which neurons are believed to be densely connected and share common input. Numerous studies report a high degree of correlated variability between nearby cells. We developed chronically implanted multi-tetrode arrays offering unprecedented recording quality to re-examine this question in primary visual cortex of awake macaques. We found that even nearby neurons with similar orientation tuning show virtually no correlated variability.
In a total of 46 recording sessions from two monkeys, we presented either static or drifting sine-wave gratings at eight different orientations. We recorded from 407 well isolated, visually responsive and orientation-tuned neurons, resulting in 1907 simultaneously recorded pairs of neurons. In 406 of these pairs both neurons were recorded by the same tetrode.
Despite being physically close to each other and having highly overlapping receptive fields, neurons recorded from the same tetrode had exceedingly low spike count correlations (rsc = 0.005 ± 0.004; mean ± SEM). Even cells with similar preferred orientations (rsignal > 0.5) had very weak correlations (rsc = 0.028 ± 0.010). This was also true if pairs were strongly driven by gratings with orientations close to the cells’ preferred orientations.
Correlations between neurons recorded by different tetrodes showed a similar pattern. They were low on average (rsc = 0.010 ± 0.002) with a weak relation between tuning similarity and spike count correlations (two-sample t test, rsignal < 0.5 versus rsignal > 0.5: P = 0.003, n = 1907).
To investigate whether low correlations also occur under more naturalistic stimulus conditions, we presented natural images to one of the monkeys. The average rsc was close to zero (rsc = 0.001 ± 0.005, n = 329) with no relation between receptive field overlap and spike count correlations. We obtained a similar result during stimulation with moving bars in a third monkey (rsc = 0.014 ± 0.011, n = 56).
Our findings suggest a refinement of current models of cortical microcircuit architecture and function: either adjacent neurons share only a few percent of their inputs or, alternatively, their activity is actively decorrelated.},
web_url = {http://www.sfn.org/am2010/index.aspx?pagename=abstracts_main},
event_name = {40th Annual Meeting of the Society for Neuroscience (Neuroscience 2010)},
event_place = {San Diego, CA, USA},
state = {published},
author = {Ecker AS{aecker}{Department Physiology of Cognitive Processes}; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Keliris GA{george}{Department Physiology of Cognitive Processes}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}; Tolias AS{atolias}{Department Physiology of Cognitive Processes}}
}
@Poster{ 7074,
title = {Estimating cortical maps with Gaussian process models},
year = {2010},
month = {11},
volume = {40},
number = {483.18},
abstract = {A striking feature of cortical organization is that the encoding of many stimulus features, such as orientation preference, is arranged into topographic maps. The structure of these maps has been extensively studied using functional imaging methods, for example optical imaging of intrinsic signals, voltage sensitive dye imaging or functional magnetic resonance imaging. As functional imaging measurements are usually noisy, statistical processing of the data is necessary to extract maps from the imaging data. We here present a probabilistic model of functional imaging data based on Gaussian processes. In comparison to conventional approaches, our model yields superior estimates of cortical maps from smaller amounts of data. In addition, we obtain quantitative uncertainty estimates, i.e. error bars on properties of the estimated map. We use our probabilistic model to study the coding properties of the map and the role of noise correlations by decoding the stimulus from single trials of an imaging experiment. In addition, we show how our method can be used to reconstruct maps from sparse measurements, for example multi-electrode recordings. We demonstrate our model both on simulated data and on intrinsic signaling data from ferret visual cortex.},
web_url = {http://www.sfn.org/am2010/index.aspx?pagename=abstracts_main},
event_name = {40th Annual Meeting of the Society for Neuroscience (Neuroscience 2010)},
event_place = {San Diego, CA, USA},
state = {published},
author = {Macke JH{jakob}; Sebastian G; White LE; Kaschube M; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ 6704,
title = {Likelihood Estimation in Deep Belief Networks},
journal = {Frontiers in Computational Neuroscience},
year = {2010},
month = {10},
volume = {2010},
number = {Conference Abstract: Bernstein Conference on Computational Neuroscience},
abstract = {Many models have been proposed to capture the statistical regularities in natural images patches.
The average log-likelihood on unseen data offers a canonical way to quantify and compare the performance of statistical models. A class of models that has recently gained increasing popularity for the task of modeling complexly structured data is formed by deep belief networks. Analyses of these models, however, have been typically based on samples from the model due to the computationally intractable nature of the model likelihood.
In this study, we investigate whether the apparent ability of a particular deep belief network to capture higher-order statistical regularities in natural images is also reflected in the likelihood. Specifically, we derive a consistent estimator for the likelihood of deep belief networks that is conceptually simpler and more readily applicable than the previously published method [1]. Using this estimator, we evaluate a three-layer deep belief network and compare its density estimation performance with the performance of other models trained on small patches of natural images. In contrast to an earlier analysis based solely on samples, we provide evidence that the deep belief network under study is not a good model for natural images by showing that it is outperformed even by very simple models. Further, we confirm existing results indicating that adding more layers to the network has only little effect on the likelihood if each layer of the model is trained well enough.
Finally, we offer a possible explanation for both the observed performance and the small effect of additional layers by analyzing a best case scenario of the greedy learning algorithm commonly used for training this class of models.},
web_url = {http://www.frontiersin.org/10.3389/conf.fncom.2010.51.00116/event_abstract},
event_name = {Bernstein Conference on Computational Neuroscience (BCCN 2010)},
event_place = {Berlin, Germany},
state = {published},
DOI = {10.3389/conf.fncom.2010.51.00116},
author = {Theis L{lucas}{Research Group Computational Vision and Neuroscience}; Gerwinn S{sgerwinn}{Research Group Computational Vision and Neuroscience}; Sinz F{fabee}{Research Group Computational Vision and Neuroscience}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ 6703,
title = {New Estimate for the Redundancy of Natural Images},
journal = {Frontiers in Computational Neuroscience},
year = {2010},
month = {10},
volume = {2010},
number = {Conference Abstract: Bernstein Conference on Computational Neuroscience},
abstract = {The light intensities of natural images exhibit a high degree of redundancy. Knowing the exact amount of their statistical dependencies is important for biological vision as well as compression and coding applications but estimating the total amount of redundancy, the multi-information, is intrinsically hard. The conventional approach for estimating the redundancy per pixel is to estimate the multi-information for patches of increasing sizes and divide by the number of pixels. Here, we show that the limiting value of this sequence---the multi-information rate---can be better estimated by another limiting process based on measuring the mutual information between a pixel and a causal neighborhood of increasing size around it. We explain the theoretical relationship of the two methods and compare their performance on natural images. While both methods provide a lower bound on the multi-information rate, the mutual information based sequence converges much faster to the multi-information rate than the conventional method does. In this way we can provide improved estimates of the multi-information rate of natural images and a better understanding its underlying spatial structure. In addition, we will present work in progress on hierarchical model architectures that has led to further improvements of this lower bound.},
web_url = {http://www.frontiersin.org/10.3389/conf.fncom.2010.51.00006/event_abstract},
event_name = {Bernstein Conference on Computational Neuroscience (BCCN 2010)},
event_place = {Berlin, Germany},
state = {published},
DOI = {10.3389/conf.fncom.2010.51.00006},
author = {Hosseini R{hosseini}{Research Group Computational Vision and Neuroscience}; Sinz F{fabee}{Research Group Computational Vision and Neuroscience}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ 6808,
title = {What is the Goal of Complex Cell Coding in V1?},
journal = {Frontiers in Computational Neuroscience},
year = {2010},
month = {10},
volume = {2010},
number = {Conference Abstract: Bernstein Conference on Computational Neuroscience},
abstract = {A long standing question of biological vision research is to identify the computational goal underlying the response properties of sensory neurons in the early visual system. Some response properties of visual neurons such as bandpass filtering and contrast gain control have been shown to exhibit a clear advantage in terms of redundancy reduction. The situation is less clear in the case of complex cells whose defining property is that of phase invariance. While it has been shown that complex cells can be learned based on the redundancy reduction principle by means of subspace ICA [Hyvärinen& Hoyer 2000], the resulting gain in redundancy reduction is very small [Sinz, Simoncelli, Bethge 2010]. Slow feature analysis (SFA, [Wiskott&Sejnowski 2002]) advocates an alternative objective function which does not seek to fit a density model but constitutes a special case of oriented PCA by maximizing the signal to noise ratio when treating temporal changes as noise.Here we set out to evaluate SFA with respect to two important empirical properties of complex cells RFs: 1) locality (i.e. finite, non-zero RF bandwidth) and 2) the relationship between RF bandwidth and RF spatial frequency (wavelet scaling). To this end we use an approach similar to that employed by [Field 1987] for sparse coding. Instead of single Gabor functions, however, we use the energy model of complex cells which is built with a (quadrature) pair of even and odd symmetric Gabor filters. We evaluate the objective function of SFA on the energy model responses to motion sequences of natural images for different spatial frequencies and envelope sizeswith patch sizes ranging from 16x16 to 512x512.We find that the objective function of SFA grows without bound for increasing envelope size and is only limited by a finite patch size (see Figure, solid line). Consequently, SFA by itself cannot explain spatially localized RFs but would need to evoke other mechanisms such as anatomical wiring constraints to limit the RF bandwidth. It is unlikely, however, that such anatomical constraints are able to reproduce the relationship between bandwidth and spatial frequency.In contrast to SFA, the objective function of subspace ICA yields a clear optimum for finite, non-zerobandwidth, regardless of assumed patch size (see Figure, dashed line). In particular, the optimum bandwidth is proportional to spatial frequency - just as observed for physiologically measured RFs in primary visual cortex of cat [Field &Tolhust 1986] and monkey ([Ringach 2002], histogram see Figure).We conclude that SFA fails to reproduce important features of complex cells. In contrast, the RF bandwidth predicted by subspace ICA lies well within the range of physiologically measured receptive field bandwidths. As a consequence, if we interpret complex cell coding as a step towards building an invariant representation, the underlying algorithm is more likely to resemble a sparse coding strategy as employed by subspace ICA than the covariance based learning rule employed by SFA.},
web_url = {http://www.frontiersin.org/10.3389/conf.fncom.2010.51.00047/event_abstract},
event_name = {Bernstein Conference on Computational Neuroscience (BCCN 2010)},
event_place = {Berlin, Germany},
state = {published},
DOI = {10.3389/conf.fncom.2010.51.00047},
author = {Lies J-P{plies}{Research Group Computational Vision and Neuroscience}; H\"afner RM{rhaefner}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ 6810,
title = {Decorrelated Firing in Cortical Microcircuits},
year = {2010},
month = {6},
volume = {2010},
pages = {58},
abstract = {Correlated trial-to-trial variability in the activity of cortical neurons is thought to reflect the
functional connectivity of the circuit. Many cortical areas are organized into functional columns,
in which neurons are believed to be densely connected and share common input. Numerous
studies report a high degree of correlated variability between nearby cells. We developed
chronically implanted multi-tetrode arrays offering unprecedented recording quality
to re-examine this question in primary visual cortex of awake macaques. We found that
even nearby neurons with similar orientation tuning show virtually no correlated variability.
In a total of 46 recording sessions from two monkeys, we presented either static or drifting
sine-wave gratings at eight different orientations. We recorded from 407 well isolated, visually
responsive and orientation-tuned neurons, resulting in 1907 simultaneously recorded
pairs of neurons. In 406 of these pairs both neurons were recorded by the same tetrode.
Despite being physically close to each other and having highly overlapping receptive fields,
neurons recorded from the same tetrode had exceedingly low spike count correlations (rsc =
0.005 ± 0.004; mean ± SEM). Even cells with similar preferred orientations (rsignal > 0.5) had
very weak correlations (rsc = 0.028 ± 0.010). This was also true if pairs were strongly driven
by gratings with orientations close to the cells’ preferred orientations.
Correlations between neurons recorded by different tetrodes showed a similar pattern. They
were low on average (rsc = 0.010 ± 0.002) with a weak relation between tuning similarity
and spike count correlations (two-sample t test, rsignal < 0.5 versus rsignal > 0.5: P = 0.003, n =
1907).
To investigate whether low correlations also occur under more naturalistic stimulus conditions,
we presented natural images to one of the monkeys. The average rsc was close to zero
(rsc = 0.001 ± 0.005, n = 329) with no relation between receptive field overlap and spike
count correlations. We obtained a similar result during stimulation with moving bars in a
third monkey (rsc = 0.014 ± 0.011, n = 56).
Our findings suggest a refinement of current models of cortical microcircuit architecture and
function: either adjacent neurons share only a few percent of their inputs or, alternatively,
their activity is actively decorrelated.},
web_url = {http://www.areadne.org/2010/home.html},
editor = {Hatsopoulos, N. G., S. Pezaris},
event_name = {AREADNE 2010: Research in Encoding And Decoding of Neural Ensembles},
event_place = {Santorini, Greece},
state = {published},
author = {Ecker AS{aecker}{Department Physiology of Cognitive Processes}; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Keliris GA{george}{Department Physiology of Cognitive Processes}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}; Tolias AS{atolias}{Department Physiology of Cognitive Processes}}
}
@Poster{ 6809,
title = {What is the Goal of Complex Cell Coding in V1?},
year = {2010},
month = {6},
volume = {2010},
pages = {72},
abstract = {A long standing question of biological vision research is to identify the computational goal
underlying the response properties of sensory neurons in the early visual system. Some response
properties of visual neurons such as bandpass filtering and contrast gain control have
been shown to exhibit a clear advantage in terms of redundancy reduction. The situation is less
clear in the case of complex cells whose defining property is that of phase invariance. While
it has been shown that complex cells can be learned based on the redundancy reduction principle
by means of subspace ICA [Hyvarinen & Hoyer 2000], the resulting gain in redundancy
reduction is very small [Sinz, Simoncelli, Bethge 2010]. Slow feature analysis (SFA, [Wiskott
& Sejnowski 2002]) advocates an alternative objective function which does not seek to fit a
density model but constitutes a special case of oriented PCA by maximizing the signal to noise
ratio when treating temporal changes as noise.
Here we set out to evaluate SFA with respect to two important empirical properties of complex
cells RFs: (1) locality (i.e. finite RF size) and (2) an inverse relationship between RF size and
RF spatial frequency. To this end we use an approach similar to that employed by [Field 1987]
for sparse coding. Instead of single Gabor functions, however, we use the energy model of
complex cells which is built with a (quadrature) pair of even and odd symmetric Gabor filters.
We evaluate the objective function of SFA on the energy model responses to motion sequences
of natural images for different spatial frequencies and envelope sizes, with patch sizes ranging
from 6464 to 512512.
We find that the objective function of SFA grows without bound for increasing envelope size
(see Figure, blue line). Consequently, SFA by itself cannot explain spatially localized RFs but
would need to evoke other mechanisms such as anatomical wiring constraints to limit the size
of the RF. It is unlikely, however, that such anatomical constraints are able to reproduce the
inverse relationship between RF size and spatial frequency.
64x6 4 2 56x256 512x512
0
1
2
3
4
5
6
Patch size in pixels
optimal envelop width/wavelength
ICA
SFA
Range of physiological
data [Ringach 2002]
In contrast to SFA, the objective function of subspace ICA yields
a clear optimum for finite envelope sizes, regardless of assumed
patch size (see Figure, red line). In particular, the optimum envelope
size is inversely proportional to spatial frequency — just
as observed for physiologically measured RFs in primary visual
cortex of cat [Field & Tolhust 1986] and monkey ([Ringach 2002],
histogram see Figure).
We conclude that SFA fails to reproduce important features of
complex cells. In contrast, the envelope size predicted by subspace
ICA lies well within the range of physiologically measured
receptive field sizes. As a consequence, if we interpret complex cell coding as a step towards
building an invariant representation, the underlying algorithm is more likely to resemble a
sparse coding strategy as employed by subspace ICA than the covariance based learning rule
employed by SFA.},
web_url = {http://www.areadne.org/2010/home.html},
editor = {Hatsopoulos, N. G., S. Pezaris},
event_name = {AREADNE 2010: Research in Encoding And Decoding of Neural Ensembles},
event_place = {Santorini, Greece},
state = {published},
author = {Lies J-P{plies}{Research Group Computational Vision and Neuroscience}; H\"afner RM{rhaefner}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ HafnerGMB2009,
title = {Neuronal decision-making with realistic spiking models},
journal = {Frontiers in Computational Neuroscience},
year = {2009},
month = {10},
day = {1},
volume = {2009},
number = {Conference Abstract: Bernstein Conference on Computational Neuroscience},
pages = {132-133},
abstract = {The neuronal processes underlying perceptual decision-making have been the focus of numerous studies over the past two decades. In the current standard model [1][2][3] the output of noisy sensory neurons is pooled and integrated by decision neurons. Once the activity of the decision neurons reaches a threshold, the corresponding choice is made. This bottom-up model was recently challenged based on the empirical finding that the time courses of psychophysical kernel (PK) and choice probability (CP) qualitatively differ from each other [4]. It was concluded that the decision-related activity in sensory neurons, at least in part, reflects the decision through a top-down signal, rather than contribute to the decision causally. However, the prediction of the standard bottom-up model about the relationship between the time courses of PKs and CPs crucially depends on the underlying noise model. Our study explores the impact of the time course and correlation structure of neuronal noise on PK and CP for several decision models. For the case of non-leaky integration over the entire stimulus duration, we derive analytical expressions for Gaussian additive noise with arbitrary correlation structure. For comparison, we also investigate biophysically generated responses with a Fano factor that increases with the counting window [5], and alternative decision models (leaky, integration to bound) using numerical simulations.
In the case of non-leaky integration over the entire stimulus duration we find that the amplitude of the PK only depends on the overall level of noise, but not its temporal changes. Consequently the PK remains constant regardless of the temporal evolution or correlation structure in the noise. In conjunction with the observed decrease in the amplitude of the PK (e.g. [4]) this supports the conclusion that decreasing PKs are evidence for an integration to a bound model [1][3]. However, we find that the temporal evolution of the CP depends strongly on both the time course of the noise variance and the temporal correlations within the pool of sensory neurons. For instance, a noise variance that increases over time also leads to an increasing CP. The bottom-up account that appears to agree best with the data in [4] combines an increasing variance of the correlated noise (the noise that cannot be eliminated by averaging over many neurons) with an integration-to-bound decision model. This leads to a decreasing PK, as well as a CP that first increases slowly before leveling off and persisting until the end. We do not find qualitatively different results when using biophysically generated or Poisson distributed responses instead of additive Gaussian noise.
In summary, we advance the analytical framework for a quantitative comparison of choice probabilities and psychophysical kernels and find that recent data that was taken to be evidence of a top-down component in choice probabilities, may alternatively be accounted for by a bottom-up model when allowing for time-varying correlated noise.},
web_url = {http://www.frontiersin.org/10.3389/conf.neuro.10.2009.14.004/event_abstract?sname=Bernstein_Conference_on_Computational_Neuroscience},
event_name = {Bernstein Conference on Computational Neuroscience (BCCN 2009)},
event_place = {Frankfurt a.M., Germany},
state = {published},
DOI = {10.3389/conf.neuro.10.2009.14.004},
author = {H\"afner R{rhaefner}{Research Group Computational Vision and Neuroscience}; Gerwinn S{sgerwinn}{Research Group Computational Vision and Neuroscience}; Macke JH{jakob}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ 5966,
title = {A new class of distributions for natural images generalizing independent subspace analysis},
journal = {Frontiers in Computational Neuroscience},
year = {2009},
month = {9},
day = {30},
volume = {2009},
number = {Conference Abstract: Bernstein Conference on Computational Neuroscience},
pages = {114-115},
abstract = {The Redundancy Reduction Hypothesis by Barlow and Attneave suggests a link between the statistics of natural images and the physiologically observed structure and function in the early visual system. In particular, algorithms and probabilistic models like Independent Component Analysis, Independent Subspace Analysis and Radial Factorization, which allow for redundancy reduction mechanism, have been used successfully to generate several features of the early visual system such as bandpass filtering, contrast gain control, and orientation selective filtering when applied to natural images.
Here, we propose a new family of probability distributions, called Lp-nested symmetric distributions, that comprises all of the above algorithms for natural images. This general class of distributions allows us to quantitatively asses (i) how well the assumptions made by all of the redundancy reducing models are justified for natural images, (ii) how large the contribution of each of these mechanisms (shape of filters, non-linear contrast gain control, subdivision into subspace) to redundancy reduction is. For ISA, we find that partitioning the space into different subspace only yields a competitive model when applied after contrast gain control. In this case, however, we find that the single filter responses are already almost independent. Therefore, we conclude that a partitioning into subspaces does not considerably improve the model which makes band-pass filtering (whitening) and contrast gain control (divisive normalization) the two most important mechanisms.},
web_url = {http://www.frontiersin.org/10.3389/conf.neuro.10.2009.14.127/event_abstract},
event_name = {Bernstein Conference on Computational Neuroscience (BCCN 2009)},
event_place = {Frankfurt a.M., Germany},
state = {published},
DOI = {10.3389/conf.neuro.10.2009.14.127},
author = {Sinz F{fabee}{Research Group Computational Vision and Neuroscience}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ HosseiniB2009,
title = {Hierarchical models of natural images},
journal = {Frontiers in Computational Neuroscience},
year = {2009},
month = {9},
day = {30},
volume = {2009},
number = {Conference Abstract: Bernstein Conference on Computational Neuroscience},
pages = {112},
abstract = {Here, we study two different approaches to estimate the multi-information of natural images. In both cases, we begin with a whitening step. Then, in the first approach, we use a hierarchical multi-layer ICA model [1] which is an efficient variant of projection pursuit density estimation. Projection pursuit [2] is a nonparametric density estimation technique with universal approximation properties. That is, it can be proven to converge to the true distribution in the limit of infinite amount of data and layers. For the second approach, we suggest a new model which consists of two layers only and has much less degrees of freedom than the multi-layer ICA model. In the first layer we apply symmetric whitening followed by radial Gaussianization [3,4] which transforms the norm of the image patches such that the distribution over the norm of the image patches matches the radial distribution of a multivariate Gaussian. In the next step, we apply ICA. The first step can be considered as a contrast gain control mechanism and the second one yields edge filters similar to those in primary visual cortex. By evaluating quantitatively the redundancy reduction achieved with the two approaches, we find that the second procedure fits the distribution significantly better than the first one. On
the van Hateren data set (400.000 image patches of size 12x12) with log-intensity scale, the redundancy reduction in the multi-layer ICA model yields 0.162,0.081,0.034,0.021,0.013,0.009,0.006,0.004,0.003,0.002 bits/pixel after the first, second, third, fourth, …, tenth layer, respectively.( For the training set size used, the
performance decreases after the tenth layer). In contrast, we find a redundancy reduction of 0.342 bits/pixel after the first layer and 0.073 bits/pixel after the second layer for the second approach.
In conclusion, the universal approximation property of the deep hierarchical architecture in the first approach does not pay off for the task of density estimation in case of natural images.},
web_url = {https://bccn2009.org/de/program/},
event_name = {Bernstein Conference on Computational Neuroscience (BCCN 2009)},
event_place = {Frankfurt a.M., Germany},
state = {published},
DOI = {10.3389/conf.neuro.10.2009.14.123},
author = {Hosseini R{hosseini}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ 6130,
title = {Unsupervised learning of disparity maps from stereo images},
journal = {Frontiers in Computational Neuroscience},
year = {2009},
month = {9},
day = {30},
volume = {2009},
number = {Conference Abstract: Bernstein Conference on Computational Neuroscience},
pages = {113},
abstract = {The visual perception of depth is a striking ability of the human visual system and an active part of research in fields like neurobiology, psychology, robotics, or computer vision. In real world scenarios, many different cues, such as shading, occlusion, or disparity are combined to perceive depth. As can be shown using random dot stereograms, however, disparity alone is sufficient for the generation of depth perception [1]. To compute the disparity map of an image, matching image regions in both images have to be found, i.e. the correspondence problem has to be solved. After this, it is possible to infer the depth of the scene. Specifically, we address the correspondence problem by inferring the transformations between image patches of the left and the right image. The transformations are modeled as Lie groups which can be learned efficiently [3]. First, we start from the assumption that horizontal disparity is caused by a horizontal shift only. In that case, the transformation matrix can be constructed analytically according to the Fourier shift theorem. The correspondence problem is then solved locally by finding the best matching shift for a complete image patch. The infinitesimal generators of a Lie group allow us to determine shifts smoothly down to subpixel resolution. In a second step, we use the general Lie group framework to allow for more general transformations. In this way, we infer a number of transform coefficients per image patch. We finally obtain the disparity map by combining the coefficients of (overlapping) image patches to a global disparity map. The stereo images were created using our 3D natural stereo image rendering system [2]. The advantage of these images is that we have ground truth information of the depth maps and full control over the camera parameters for the given scene. Finally, we explore how the obtained disparity maps can be used to compute accurate depth maps.},
web_url = {http://www.frontiersin.org/10.3389/conf.neuro.10.2009.14.126/event_abstract},
event_name = {Bernstein Conference on Computational Neuroscience (BCCN 2009)},
event_place = {Frankfurt a.M., Germany},
state = {published},
DOI = {10.3389/conf.neuro.10.2009.14.126},
author = {Lies J-P{plies}{Research Group Computational Vision and Neuroscience}; Wang J; Sohl-Dickstein J; Olshausen BA; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ 5845,
title = {Bayesian estimation of orientation preference maps},
journal = {Frontiers in Systems Neuroscience},
year = {2009},
month = {3},
volume = {2009},
number = {Conference Abstracts: Computational and Systems Neuroscience},
abstract = {Neurons in the early visual cortex of mammals exhibit a striking organization with respect to their functional properties. A prominent example is the layout of orientation preferences in primary visual cortex, the orientation preference map (OPM). Functional imaging techniques, such as optical imaging of intrinsic signals have been used extensively for the measurement of OPMs. As the signal-to-noise ratio in individual pixels if often low, the signals are usually spatially smoothed with a fixed linear filter to obtain an estimate of the functional map.
Here, we consider the estimation of the map from noisy measurements as a Bayesian inference problem. By combining prior knowledge about the structure of OPMs with experimental measurements, we want to obtain better estimates of the OPM with smaller trial numbers. In addition, the use of an explicit, probabilistic model for the data provides a principled framework for setting parameters and smoothing.
We model the underlying map as a bivariate Gaussian process (GP, a.k.a. Gaussian random field), with a prior covariance function that reflects known properties of OPMs. The posterior mean of the map can be interpreted as an optimally smoothed map. Hyper-parameters of the model can be chosen by optimization of the marginal likelihood. In addition, the GP also returns a predicted map for any location, and can therefore be used for extending the map to pixel at which no, or only unreliable data was obtained.
We also obtain a posterior distribution over maps, from which we can estimate the posterior uncertainty of statistical properties of the maps, such as the pinwheel density. Finally, our probabilistic model of both the signal and the noise can be used for decoding, and for estimating the informational content of the map.},
web_url = {http://www.cosyne.org/c/index.php?title=Cosyne_09},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2009)},
event_place = {Salt Lake City, UT, USA},
state = {published},
DOI = {10.3389/conf.neuro.06.2009.03.310},
author = {Macke J{jakob}; Gerwinn S{sgerwinn}{Research Group Computational Vision and Neuroscience}; White L; Kaschube M; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ 5843,
title = {Bayesian Population Decoding of Spiking Neurons},
journal = {Frontiers in Systems Neuroscience},
year = {2009},
month = {3},
volume = {2009},
number = {Conference Abstracts: Computational and Systems Neuroscience},
web_url = {http://www.cosyne.org/c/index.php?title=Cosyne_09},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2009)},
event_place = {Salt Lake City, UT, USA,},
state = {published},
DOI = {10.3389/conf.neuro.06.2009.03.026},
author = {Gerwinn S{sgerwinn}{Research Group Computational Vision and Neuroscience}; Macke J{jakob}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ 5844,
title = {Sensory input statistics and network mechanisms in primate primary visual cortex},
journal = {Frontiers in Systems Neuroscience},
year = {2009},
month = {3},
volume = {2009},
number = {Conference Abstracts: Computational and Systems Neuroscience},
abstract = {Understanding the structure of multi-neuronal firing patterns in ensembles of cortical neurons is a major challenge for systems neuroscience. The dependence of network properties on the statistics of the sensory input can provide important insights into the computations performed by neural ensembles. Here, we study the functional properties of neural populations in the primary visual cortex of awake, behaving macaques by varying visual input statistics in a controlled way. Using arrays of chronically implanted tetrodes, we record simultaneously from up to thirty well-isolated neurons while presenting sets of images with three different correlation structures: spatially uncorrelated white noise (whn), images matching the second-order correlations of natural images (phs) and natural images including higher-order correlations (nat).
We find that groups of six nearby cortical neurons show little redundancy in their firing patterns (represented as binary vectors, 10ms bins) but rather act almost independently (mean multi-information 0.85 bits/s, range 0.16 - 1.90 bits/s, mean fraction of marginal entropy 0.34 %, N=46). Although network correlations are weak, they are statistically significant. While relatively few groups showed significant redundancies under stimulation with white noise (67.4 ± 3.2%; mean fraction of groups ± S.E.M.), many more did so in the other two conditions (phs: 95.7 ± 0.6%; nat: 89.1 ± 1.4%). Additional higher-order correlations in natural images compared to phase scrambled images did not increase but rather decrease the redundancy in the cortical representation: Network correlations are significantly higher in phs than in nat, as is the number of significantly correlated groups.
Multi-information measures the reduction in entropy due to any form of correlation. By using second order maximum entropy modeling, we find that a large fraction of multi-information is accounted for by pairwise correlations (whn: 75.0 ± 3.3%; phs: 82.8 ± 2.1%; nat: 80.8 ± 2.4%; groups with significant redundancy). Importantly, stimulation with natural images containing higher-order correlations only lead to a slight increase in the fraction of redundancy due to higher-order correlations in the cortical representation (mean difference 2.26 %, p=0.054, Sign test).
While our results suggest that population activity in V1 may be modeled well using pairwise correlations only, they leave roughly 20-25 % of the multi-information unexplained. Therefore, choosing a particular form of higher-order interactions may improve model quality. Thus, in addition to the independent model, we evaluated the quality of three different models: (a) The second-order maximum entropy model, which minimizes higher-order correlations, (b) a model which assumes that correlations are a product of common inputs (Dichotomized Gaussian) and (c) a mixture model in which correlations are induced by a discrete number of latent states. We find that an independent model is sufficient for the white noise condition but neither for phs or nat. In contrast, all of the correlation models (a-c) perform similarly well for the conditions with correlated stimuli.
Our results suggest that under natural stimulation redundancies in cortical neurons are relatively weak. Higher-order correlations in natural images do not increase but rather decrease the redundancies in the cortical representation.},
web_url = {http://www.cosyne.org/c/index.php?title=Cosyne_09},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2009)},
event_place = {Salt Lake City, UT, USA},
state = {published},
DOI = {10.3389/conf.neuro.06.2009.03.298},
author = {Berens P{berens}{Research Group Computational Vision and Neuroscience}; Macke JH{jakob}; Ecker AS{aecker}; Cotton RJ; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Tolias AS{atolias}}
}
@Poster{ KostenGBK2008,
title = {Going to temporal superresolution for AP detection in two{photon calcium imaging in vivo by using an explicit datamodel},
year = {2008},
month = {10},
volume = {9},
number = {12},
abstract = {Two{photon calcium imaging in vivo allows for the simultaneous imaging of activity in populations of cortical neurons. This approach has been shown to achieve both single
action{potential (AP) and single{cell resolution, an important requirement when measuring neural activity. However, there still remains room for improvement in both data acquisition and data analysis. Imaging calcium transients across time allows the inference of electrical spiking activity, but since the calcium signals are an order of magnitude slower than the spiking activity which produces them, temporal accuracy can be lost. Here we
describe a possible approach to increase the temporal resolution of such data. We present an approach that explicitly models signal and noise in the data, and complements the output of a previous spike detection algorithm. Instead of averaging the signal over 96 ms
(a full frame), we employ higher resolution that averages over 1.5 ms periods, corresponding to the individual laser scan lines that compose a single image frame. The difference
between theoretical and observed fluorescence measurements is modeled as a multivariate Gaussian distribution with zero mean, yielding a likelihood value for each possible spike time over a two frame window. Taking into account the prior distribution of timing errors in the output of our AP detection algorithm, we estimate the detected spike's most likely position. This approach improves temporal resolution significantly compared to previous methods. We discuss the future development of this approach, its limitations, and the crucial role of an accurate estimation of baseline
uorescence.},
event_name = {9th Conference of the Junior Neuroscientists of Tübingen (NeNa 2008)},
event_place = {Ellwangen, Germany},
state = {published},
author = {Kosten J{jkosten}{Department High-Field Magnetic Resonance}; Greenberg D{david}{Research Group Neural Population Imaging}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Kerr J{jkerr}{Research Group Neural Population Imaging}}
}
@Poster{ MackeOB2008_2,
title = {How pairwise correlations affect the redundancy in large populations of neurons},
journal = {Frontiers in Computational Neuroscience},
year = {2008},
month = {10},
volume = {2008},
number = {Conference Abstract: Bernstein Symposium 2008},
abstract = {Simultaneously recorded neurons often exhibit correlations in their spiking activity. These correlations shape the statistical structure of the population activity, and can lead to substantial redundancy across neurons. Knowing the amount of redundancy in neural responses is critical for our understanding of the neural code. Here, we study the effect of pairwise correlations on the statistical structure of population activity. We model correlated activity as arising from common Gaussian inputs into simple threshold neurons. In population models with exchangeable correlation structure, one can analytically calculate the distribution of synchronous events across the whole population, and the joint entropy (and thus the redundancy) of the neural responses. We investigate the scaling of the redundancy as the population size is increased, and characterize its phase transitions for increasing correlation strengths. We compare the asymptotic redundancy in our models to the corresponding maximum- and minimum entropy models. Although this model must exhibit more redundancy than the maximum entropy model, we find that its joint entropy increases linearly with population size.},
web_url = {http://www.frontiersin.org/10.3389/conf.neuro.10.2008.01.086/event_abstract?sname=Bernstein_Symposium_2008},
event_name = {Bernstein Symposium 2008},
event_place = {München, Germany},
state = {published},
DOI = {10.3389/conf.neuro.10.2008.01.086},
author = {Macke J{jakob}; Opper M; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ 5532,
title = {Image library for unsupervised learning of depth from stereo},
journal = {Frontiers in Computational Neuroscience},
year = {2008},
month = {10},
volume = {2008},
number = {Conference Abstract: Bernstein Symposium 2008},
abstract = {The visual system is able to extract depth information from the disparity of the two images on the retinae. Every system that makes use of disparity information must identify corresponding points in the two images. This correspondence problem constitutes a principal difficulty in depth from stereo and many questions are left open about how the visual system solves it. In this work, we seek to understand how depth inference can emerge from unsupervised learning of statistical regularities in binocular images. In a first step we acquire a database of training data by using virtual 3D sceneries which are rendered into stereo images from two eye-like positioned cameras. This provides us with an extensive repository of stereo images along with precise depth and disparity maps. In the future we will use this data as ground truth for a quantitative analysis and comparison of different models for depth inference.},
web_url = {http://www.frontiersin.org/10.3389/conf.neuro.10.2008.01.083/event_abstract},
event_name = {Bernstein Symposium 2008},
event_place = {München, Germany},
state = {published},
DOI = {10.3389/conf.neuro.10.2008.01.083},
author = {Lies J-P{plies}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ 5536,
title = {The Conjoint Effect of Divisive Normalization and Orientation Selectivity on Redundancy Reduction in Natural Images},
journal = {Frontiers in Computational Neuroscience},
year = {2008},
month = {10},
volume = {2008},
number = {Conference Abstract: Bernstein Symposium 2008},
abstract = {Bandpass filtering, orientation selectivity, and contrast gain control are prominent features of sensory coding at the level of V1 simple cells. While the effect of bandpass filtering and orientation selectivity can be assessed within a linear model, contrast gain control is an inherently nonlinear computation. Here we employ the class of elliptically contoured distributions to investigate the extent to which the two features---orientation selectivity and contrast gain control---are suited to model the statistics of natural images. Within this framework we find that contrast gain control can play a significant role for the removal of redundancies in natural images. Orientation selectivity, in contrast, has only a very limited potential for redundancy reduction.},
web_url = {http://www.frontiersin.org/10.3389/conf.neuro.10.2008.01.116/event_abstract},
event_name = {Bernstein Symposium 2008},
event_place = {München, Germany},
state = {published},
DOI = {10.3389/conf.neuro.10.2008.01.116},
author = {Sinz FH{fabee}{Research Group Computational Vision and Neuroscience}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ 5359,
title = {Towards the neural basis of the flash-lag effect},
year = {2008},
month = {9},
event_name = {International Workshop: Aspects of Adaptive Cortex Dynamics},
event_place = {Delmenhorst, Germany},
state = {published},
author = {Ecker AS{aecker}; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Hoenselaar A{hoenselaar}; Subramaniyan M; Tolias AS{atolias}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ 5194,
title = {Redundancy Reduction in Natural Images: Quantifying the Effect of Orientation Selectivity and Contrast Gain Control},
year = {2008},
month = {7},
day = {29},
abstract = {The two most prominent features of early visual processing are orientation selective filtering and contrast gain control. While the effect of orientation selectivity can be assessed within in a linear model, contrast gain control is an inherently nonlinear computation. Here we employ the class of $L_p$ elliptically contoured distributions to investigate the extent to which the two features, orientation selectivity and contrast gain control, are suited to model the statistics of natural images. Within this model we find that contrast gain control can play a significant role for the removal of redundancies in natural images.
Orientation selectivity, in contrast, has only a very limited potential for linear redundancy reduction.},
web_url = {http://www.grc.org/programs.aspx?year=2008&program=senscod},
event_name = {Gordon Research Conference: Sensory Coding & The Natural Environment 2008},
event_place = {Lucca, Italy},
state = {published},
author = {Sinz F{fabee}{Research Group Computational Vision and Neuroscience}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ MackeBEOTB2008,
title = {Modeling populations of spiking neurons with the Dichotomized Gaussian distribution},
year = {2008},
month = {7},
web_url = {http://www.theswartzfoundation.org/summer-meeting-2008.asp},
event_name = {Annual Meeting 2008 of Sloan-Swartz Centers for Theoretical Neurobiology},
event_place = {Princeton, NJ, USA},
state = {published},
author = {Macke JH{jakob}; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Ecker AS{aecker}{Department Physiology of Cognitive Processes}; Opper M; Tolias AS{atolias}{Department Physiology of Cognitive Processes}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ 5101,
title = {Flexible Models for Population Spike Trains},
year = {2008},
month = {6},
pages = {48},
abstract = {In order to understand how neural systems perform computations and process sensory
information, we need to understand the structure of firing patterns in large populations of
neurons. Spike trains recorded from populations of neurons can exhibit substantial pair wise
correlations between neurons and rich temporal structure. Thus, efficient methods for
generating artificial spike trains with specified correlation structure are essential for the
realistic simulation and analysis of neural systems.
Here we show how correlated binary spike trains can be modeled by means of a latent
multivariate Gaussian model. Sampling from our model is computationally very efficient, and
in particular, feasible even for large populations of neurons. We show empirically that the
spike trains generated with this method have entropy close to the theoretical maximum. They
are therefore consistent with specified pair-wise correlations without exhibiting systematic
higher-order correlations. We compare our model to alternative approaches and discuss its
limitations and advantages. In addition, we demonstrate its use for modeling temporal
correlations in a neuron recorded in macaque primary visual cortex.
Neural activity is often summarized by discarding the exact timing of spikes, and only
counting the total number of spikes that a neuron (or population) fires in a given time window.
In modeling studies, these spike counts have often been assumed to be Poisson distributed
and neurons to be independent. However, correlations between spike counts have been
reported in various visual areas. We show how both temporal and inter-neuron correlations
shape the structure of spike counts, and how our model can be used to generate spike counts
with arbitrary marginal distributions and correlation structure. We demonstrate its capabilities
by modeling a population of simultaneously recorded neurons from the primary visual cortex
of a macaque, and we show how a model with correlations accounts for the data far better
than a model that assumes independence.},
web_url = {http://www.areadne.org/2008/home.html},
event_name = {AREADNE 2008: Research in Encoding and Decoding of Neural Ensembles},
event_place = {Santorini, Greece},
state = {published},
author = {Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Macke JH{jakob}; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Ecker AS{aecker}; Tolias AS{atolias}}
}
@Poster{ 5100,
title = {Pairwise Correlations and Multineuronal Firing Patterns in the Primary Visual Cortex of the Awake, Behaving Macaque},
year = {2008},
month = {6},
pages = {46},
abstract = {Understanding the structure of multi-neuronal firing patterns has been a central quest and major challenge for systems neuroscience. In particular, how do pairwise interactions between neurons shape the firing patterns of neuronal ensembles in the cortex? To study this question, we recorded simultaneously from multiple single neurons in the primary visual cortex of an awake, behaving macaque using an array of chronically implanted tetrodes1. High
contrast flashed and moving bars were used for stimulation, while the monkey was required to maintain fixation. In a similar vein to recent studies of in vitro preparations 2,3,5, we applied maximum entropy analysis for the first time to the binary spiking patterns of populations of cortical neurons recorded in vivo from the awake macaque. We employed the Dichotomized Gaussian distribution, which can be seen as a close approximation to the pairwise maximum-entropy model for binary data4. Surprisingly, we find that even pairs of neurons with nearby receptive
fields (receptive field center distance < 0.15°) have only weak correlations between their binary responses computed in bins of 10 ms (median absolute correlation coefficient: 0.014, 0.010-0.019, 95% confidence intervals, N=95 pairs; positive correlations: 0.015, N=59; negative correlations: -0.013, N=36). Accordingly, the distribution of spiking patterns of groups of 10 neurons is described well with a model that assumes independence between individual neurons (Jensen-Shannon-Divergence: 1.06×10-2 independent model, 0.96×10-2 approximate second-order maximum-entropy model4; H/H1=0.992). These results suggest that the distribution of firing patterns of small cortical networks in the awake animal is predominantly determined by the mean activity of the participating cells, not by their interactions.
Meaningful computations, however, are performed by neuronal populations much larger than 10 neurons. Therefore, we investigated how weak pairwise correlations affect the firing patterns of artificial populations4 of up to 1000 cells with the same correlation structure as experimentally measured. We find that in neuronal ensembles of this size firing patterns with many active or silent neurons occur considerably more often than expected from a fully
independent population (e.g. 130 or more out of 1000 neurons are active simultaneously roughly every 300 ms in the correlated model and only once every 3-4 seconds in the
independent model). These results suggest that the firing patterns of cortical networks comparable in size to several minicolumns exhibit a rich structure, even if most pairs appear relatively independent when studying small subgroups thereof.},
web_url = {http://www.areadne.org/2008/home.html},
event_name = {AREADNE 2008: Research in Encoding and Decoding of Neural Ensembles},
event_place = {Santorini, Greece},
state = {published},
author = {Berens P{berens}{Research Group Computational Vision and Neuroscience}; Ecker AS{aecker}; Subramaniyan M; Macke JH{jakob}; Hauck P; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Tolias AS{atolias}}
}
@Poster{ 4730,
title = {Near-Maximum Entropy Models for Binary Neural Representations of Natural Images},
year = {2007},
month = {9},
pages = {19-20},
abstract = {Maximum entropy analysis of binary variables provides an elegant way for studying the role of pairwise correlations in neural populations. Unfortunately, these approaches suffer from their poor scalability to high dimensions. In sensory coding, however, high-dimensional data is ubiquitous. Here, we introduce a new approach using a near-maximum entropy model, that makes this type of analysis feasible for very high-dimensional data---the model parameters can be derived in closed form and sampling is easy. We demonstrate its usefulness by studying a simple neural representation model of natural images. For the first time, we are able to directly compare predictions from a pairwise maximum entropy model not only in small groups of neurons, but also in larger populations of more than thousand units. Our results indicate that in such larger networks interactions exist that are not predicted by pairwise correlations, despite the fact that pairwise correlations explain the lower-dimensional marginal statistics extrem
ely well up to the limit of dimensionality where estimation of the full joint distribution is feasible.},
web_url = {http://www.gatsby.ucl.ac.uk/nccd/nccd07/abstract_book.pdf},
event_name = {Neural Coding, Computation and Dynamics (NCCD 07)},
event_place = {Hossegor, France},
state = {published},
author = {Berens P{berens}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ 4731,
title = {Studying the effects of noise correlations on population coding using a sampling method},
year = {2007},
month = {9},
pages = {21-22},
abstract = {Responses of single neurons to a fixed stimulus are usually both variable and highly ambiguous. Therefore,
it is widely assumed that stimulus parameters are encoded by populations of neurons. An important
aspect in population coding that has received much interest in the past is the effect of correlated noise
on the accuracy of the neural code.
Theoretical studies have investigated the effects of different correlation structures on the amount of
information that can be encoded by a population of neurons based on Fisher Information. Unfortunately,
to be analytically tractable, these studies usually have to make certain simplifying assumptions such as
high firing rates and Gaussian noise. Therefore, it remains open if these results also hold in the more realistic scenario of low firing rates and discrete, Poisson-distributed spike counts.
In order to address this question we have developed a straightforward and efficient method to draw samples
from a multivariate near-maximum entropy Poisson distribution with arbitrary mean and covariance
matrix based on the dichotomized Gaussian distribution [1]. The ability to extensively sample data from
this class of distributions enables us to study the effects of different types of correlation structures and
tuning functions on the information encoded by populations of neurons under more realistic assumptions
than analytically tractable methods.
Specifically, we studied how limited range correlations (neurons with similar tuning functions and low
spatial distance are more correlated than others) affect the accuracy of a downstream decoder compared
to uniform correlations (correlations between neurons are independent of their properties and locations).
Using a set of neurons with equally spaced orientation tuning functions, we computed the error of an
optimal linear estimator (OLE) reconstructing stimulus orientation from the neurons firing rates. We
findsupporting previous theoretical resultsthat irrespective of tuning width and the number of neurons in
the network, limited range correlations decrease decoding accuracy while uniform correlations facilitate
accurate decoding. The optimal tuning width, however, did not change as a function of either the
correlation structure or the number of neurons in the network. These results are particularly interesting
since a number of experimental studies report limited range correlation structures (starting at around
0.1 to 0.2 for similar neurons) while experiments carried out in our own lab suggest that correlations are
generally low (on the order of 0.01) and uniform.},
web_url = {http://www.gatsby.ucl.ac.uk/nccd/nccd07/abstract_book.pdf},
event_name = {Neural Coding, Computation and Dynamics (NCCD 07)},
event_place = {Hossegor, France},
state = {published},
author = {Ecker AS{aecker}{Department Physiology of Cognitive Processes}; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Logothetis NK{nikos}{Department Physiology of Cognitive Processes}; Tolias AS{atolias}{Department Physiology of Cognitive Processes}}
}
@Poster{ 4346,
title = {Bayesian Neural System identification: error bars, receptive fields and neural couplings},
journal = {Neuroforum},
year = {2007},
month = {4},
volume = {13},
number = {Supplement},
pages = {360},
abstract = {The task of system identification lies at the heart of neural data analysis. Bayesian system identification
methods provide a powerful toolbox which allows one to make inferences over stimulus-neuron and
neuron-neuron dependencies in a principled way. Rather than reporting only the most likely parameters, the
posterior distribution obtained in the Bayesian approach informs us about the range of parameter values that
are consistent with the observed data and the assumptions made. In other words, Bayesian receptive fields
always come with error bars. Since the amount of data from neural recordings is limited, the error bars are as
important as the receptive field itself.
Here we apply a recently developed approximation of Bayesian inference to a multi-cell response model
consisting of a set of coupled units, each of which being a Linear-Nonlinear-Poisson (LNP) cascade neuron
model. The instantaneous firing rate of each unit depends multiplicatively on both the spike train history of
the units and the stimulus. Parameter fitting in this model has been shown to be a convex optimization
problem (Paninski 2004) that can be solved efficiently, scaling linearly in the number of events, neurons and
history-size. By doing inference in such a model one can estimate excitatory and inhibitory interactions
between the neurons and the dependence of the stimulus. In addition, the Bayesian framework allows one not
only to put error bars on the inferred parameter values but also to quantify the predictive power of the model
in terms of the marginal likelihood.
As a sanity check of the new technique, and also to explore its limitations, we first verify for artificially
generated data that we are able to infer the true underlying model. Then we apply the method to recordings
from retinal ganglion cells (RGC) responding to white noise (m-sequence) stimulation. The figure shows both
the inferred receptive fields (lower) as well as the confidence range of the sorted pixel values (upper) when
using a different fraction of the data (0,10,50, and 100 %). We also compare the results with the receptive
fields derived with classical linear correlation analysis and maximum likelihood estimation.},
file_url = {/fileadmin/user_upload/files/publications/gerwinn_abstract_[0].pdf},
web_url = {http://nwg.glia.mdc-berlin.de/media/pdf/conference/Proceedings-Goettingen2007.pdf},
event_name = {7th Meeting of the German Neuroscience Society, 31st Göttingen Neurobiology Conference},
event_place = {Göttingen, Germany},
state = {published},
author = {Gerwinn S{sgerwinn}{Department Empirical Inference}{Research Group Computational Vision and Neuroscience}; Seeger M{seeger}{Department Empirical Inference}; Zeck G; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ 4345,
title = {Identifying temporal population codes in the retina using canonical correlation analysis},
journal = {Neuroforum},
year = {2007},
month = {4},
volume = {13},
number = {Supplement},
pages = {359},
abstract = {Right from the first synapse in the retina, the visual information gets distributed across several parallel
channels with different temporal filtering properties (Wässle, 2004). Yet, the prevalent system identification
tool for characterizing neural responses, the spike-triggered average, only allows one to investigate the
individual neural responses independently of each other. Here, we present a novel data analysis tool for the
identification of temporal population codes based on canonical correlation analysis (Hotelling, 1936).
Canonical correlation analysis allows one to find `population receptive fields' (PRF) which are maximally
correlated with the temporal response of the entire neural population. The method is a convex optimization
technique which essentially solves an eigenvalue problem and is not prone to local minima.
We apply the method to simultaneous recordings from rabbit retinal ganlion cells in a whole mount
preparation (Zeck et al, 2005). The cells respond to a 16 by 16 pixel m-sequence stimulus presented at a frame
rate of 1/(20 msec). The response of 27 ganglion cells is correlated with each input frame in an interval
between zero and 200 msec relative to the stimulus. The 200 msec response period is binned into 14
equal-sized bins. As shown in the figure, we obtain six predictive population receptive fields (left column),
each of which gives rise to a different population response (right column). The x-axis of the color-coded
images used to describe the population response kernels (right column) corresponds to the index of the 27
different neurons, while the y-axis indicates time relative to the stimulus from 0 (top) to 200 msec (bottom).
The six population receptive fields do not only provide a more concise description of the population response
but can also be estimated much more reliably than the receptive fields of individual neurons.
In conclusion, we suggest to characterize retinal ganglion cell responses in terms of population receptive
fields, rather than discussing stimulus-neuron and neuron-neuron dependencies separately.},
file_url = {/fileadmin/user_upload/files/publications/TS24-2C_4345[0].pdf},
web_url = {http://nwg.glia.mdc-berlin.de/media/pdf/conference/Proceedings-Goettingen2007.pdf},
event_name = {7th Meeting of the German Neuroscience Society, 31st Göttingen Neurobiology Conference},
event_place = {Göttingen, Germany},
state = {published},
author = {Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Macke JH{jakob}; Gerwinn S{sgerwinn}{Department Empirical Inference}{Research Group Computational Vision and Neuroscience}; Zeck G{gzeck}}
}
@Poster{ GerwinnSZB2007,
title = {Bayesian Receptive Fields and Neural Couplings with Sparsity
Prior and Error Bars},
year = {2007},
month = {2},
pages = {47},
abstract = {Here we apply Bayesian system identification methods to infer stimulus-neuron and neuron-neuron dependencies.
Rather than reporting only the most likely parameters, the posterior distribution obtained in the Bayesian approach informs us about the range of parameter values that are consistent with the observed data and the assumptions made. In other words, Bayesian receptive fields always come with error bars. In fact, we obtain the full posterior covariance, indicating conditional (in-)dependence between the weights of both, receptive fields and neural couplings. Since the amount of data from neural recordings is limited, such uncertainty information is as important as the usual point estimate of the receptive field itself.
We employ expectation propagation, a recently developed approximation of Bayesian inference, to a multicell
response model consisting of a set of coupled units, each of which is a Linear-Nonlinear-Poisson (LNP) cascade neuron model. The instantaneous firing rate of each unit depends on both the spike train history of the units and the stimulus. Parameter fitting in this model has been shown to be a convex optimization problem [1], which can be solved efficiently. By doing inference in this model we can determine excitatory and inhibitory interactions between the neurons and the dependence of the stimulus on the firing rate. In addition to the uncertainty information (error bars) obtained within the Bayesian framework one can impose a sparsity-inducing prior on the parameter values. This forces weights actively to zero, if they are not
relevant for explaining the data, leading to a more robust estimate of receptive fields and neural couplings,
where only significant parameters are nonzero.
The approximative Bayesian inference technique is applied to both artificially generated data and to recordings
from retinal ganglion cells (RGC) responding to white noise (m-sequence) stimulation. We compare the different results obtained with a Laplacian (sparsity) prior and a Gaussian (no sparsity) prior via Bayes factors and test set validation. For completeness, the receptive fields based on classical linear correlation analysis and maximum likelihood estimation are included into the comparison.},
web_url = {http://www.cosyne.org/c/index.php?title=Cosyne_07},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2007)},
event_place = {Salt Lake City, UT, USA},
state = {published},
author = {Gerwinn S{sgerwinn}{Department Empirical Inference}{Research Group Computational Vision and Neuroscience}; Seeger M{seeger}{Department Empirical Inference}; Zeck G; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ 4668,
title = {Estimating Population Receptive Fields in Space and Time},
year = {2007},
month = {2},
pages = {44},
abstract = {Right from the first synapse in the retina, visual information gets distributed
across several parallel channels with different temporal filtering properties.
Yet, commonly used system identification tools for characterizing
neural responses, such as the spike-triggered average, only allow one to
investigate the individual neural responses independently of each other.
Conversely, many population coding models of neurons and correlations
between neurons concentrate on the encoding of a single-variate stimulus.
We seek to identify the features of the visual stimulus that are encoded in
the temporal response of an ensemble of neurons, and the corresponding
spike-patterns that indicate the presence of these features.
We present a novel data analysis tool for the identification of such temporal
population codes based on canonical correlation analysis (Hotelling,
1936). The “population receptive fields” (PRFs) are defined to be those
dimensions of the stimulus-space that are maximally correlated with the
temporal response of the entire neural population, irrespective of whether
the stimulus features are encoded by the responses of single neurons or by
patterns of spikes across neurons or time. These dimensions are identified
by canonical correlation analysis, a convex optimization technique which essentially solves an eigenvalue
problem and is not prone to local minima.
Each receptive field can be represented by the weighted sum of a small number of functions that are separable
in space-time. Therefore, non-separable receptive fields can be estimated more efficiently than with spiketriggered
techniques, which makes our method advantageous even for the estimation of single-cell receptive
fields.
The method is demonstrated by applying it to data from multi-electrode recordings from rabbit retinal ganglion
cells in a whole mount preparation (Zeck et al, 2005). The figure displays the first 6 PRFs of a population
of 27 cells from one such experiment. The recovered stimulus-features look qualitatively different
to the receptive fields of single retinal ganglion cells. In addition, we show how the model can be extendended
to capture nonlinear stimulus-response relationships and to test different coding-mechanisms by the
use of kernel-canonical correlation analysis. In conclusion, we suggest to characterize responses of ensembles
of neurons in terms of PRFs, rather than discussing stimulus-neuron and neuron-neuron dependencies
separately.},
file_url = {/fileadmin/user_upload/files/publications/Cosyne-2007-I-37_[0].pdf},
web_url = {http://www.cosyne.org/wiki/Cosyne_07_Program},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2007)},
event_place = {Salt Lake City, UT, USA},
state = {published},
author = {Macke JH{jakob}; Zeck G{gzeck}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Poster{ 4833,
title = {Factorial Coding of Natural Images: How Effective are Linear Models in Removing Higher-Order Dependencies?},
year = {2006},
month = {3},
volume = {9},
pages = {90},
abstract = {The performance of unsupervised learning models for natural images is evaluated quantitatively by means of information theory. We estimate the gain in statistical independence (the multi-information reduction) achieved with independent component analysis (ICA), principal component analysis (PCA), zero-phase whitening, and predictive coding. Predictive coding is translated into the transform coding framework, where it can be characterized by the constraint of a triangular filter matrix. A randomly sampled whitening basis and the Haar wavelet are included into the comparison as well. The comparison of all these methods is carried out for different patch sizes, ranging from 2x2 to 16x16 pixels. In spite of large differences in the shape of the basis functions, we find only small differences in the multi-information between all decorrelation transforms (5% or less) for all patch sizes. Among the second-order methods, PCA is optimal for small patch sizes and predictive coding performs best for large patch sizes. The extra gain achieved with ICA is always less than 2%. In conclusion, the `edge filters‘ found with ICA lead only to a surprisingly small improvement in terms of its actual objective.},
event_name = {9th Tübingen Perception Conference (TWK 2006)},
event_place = {Tübingen, Germany},
state = {published},
author = {Bethge M{mbethge}}
}
@Miscellaneous{ 5192,
title = {Der kollektiven Signalverarbeitung von Nervenzellen auf der Spur - Forschungsbericht 2008 Max-Planck-Institut für biologische Kybernetik},
journal = {Jahrbuch der Max-Planck-Gesellschaft},
year = {2008},
volume = {2008},
pages = {1-6},
abstract = {Knowledge about neural signaling originates from experiments where the activity of a neuron is correlated with stimuli. Meanwhile, simultaneous recording from many neurons is possible. With new mathematical methods such experiments can be used to determine the response properties of neural networks rather than of individual cells. This allows us to analyze how retinal images are processed collectively by neural networks in the visual pathway.},
file_url = {/fileadmin/user_upload/files/publications/Jahrbuch2008-Bethge_5192[0].pdf},
web_url = {https://www.mpg.de/412648/forschungsSchwerpunkt},
state = {published},
author = {Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Miscellaneous{ 5193,
title = {Geheimsprache der Neuronen},
journal = {Gerhin & Geist},
year = {2002},
month = {4},
volume = {2002},
number = {2},
pages = {80-87},
abstract = {Wie gelingt es Nervenzellen, die Reize der Außenwelt sinngerecht in elektrische Impulse umzuwandeln? Jetzt lernen Hirnforscher die rätselhafte Sprache des Gehirns zu verstehen.},
web_url = {http://www.spektrum.de/magazin/geheimsprache-der-neuronen/839294},
state = {published},
author = {Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Pawelzik K}
}
@Conference{ WallisFEGWB2016,
title = {Towards matching the peripheral visual appearance of arbitrary scenes using deep convolutional neural networks},
year = {2016},
month = {8},
day = {30},
web_url = {http://www.ub.edu/ecvp/talk-sessions},
event_name = {39th European Conference on Visual Perception (ECVP 2016)},
event_place = {Barcelona, Spain},
state = {published},
author = {Wallis TS; Funke CM; Ecker AS{aecker}{Department Physiology of Cognitive Processes}; Gatys LA; Wichmann FA{felix}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Conference{ Bethge2016,
title = {Understanding Complex Neural Network Computations},
year = {2016},
month = {6},
pages = {26},
abstract = {The recent breakthrough in deep learning has led to a rapid explosion in the evolution of artificial neural networks that successfully perform complex computations such as object recognition or semantic image segmentation. Unlike in the past, the complexity of these networks seems essential for their success and cannot easily be replaced by much simpler architectures. In trying to understand how deep neural networks achieve robust perceptual interpretations of sensory stimuli, we face similar questions as we do in neuroscience even though their full connectome is known and it is easy to obtain the responses of all its neurons to arbitrary stimuli. How can we obtain precise descriptions of neural responses without relying on the specifics of implementation? Can we characterize the knowledge that such networks have acquired about the world and how it is represented? I will present recent results from my lab on assessing the meaning of neural representations in high-performing convolutional neural networks. More generally, I will argue that the rise of deep neural networks offers a particular chance for computational neuroscience to advance its concepts and tools for understanding complex computational neural systems, and I am hoping to spark stimulating discussions on how we could use this opportunity.},
web_url = {http://areadne.org/2016/pezaris-hatsopoulos-2016-areadne.pdf},
event_name = {AREADNE 2016: Research in Encoding And Decoding of Neural Ensembles},
event_place = {Santorini, Greece},
state = {published},
author = {Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Conference{ WallisBW2016,
title = {Testing models of peripheral encoding using metamerism in an oddity paradigm},
year = {2016},
month = {3},
day = {21},
pages = {364-365},
web_url = {http://www.teap2016.de/543/Hund/doc/teap2016_abstracts_online.pdf},
event_name = {58th Conference of Experimental Psychologists (TeaP 2016)},
event_place = {Heidelberg, Germany},
state = {published},
author = {Wallis T; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Wichmann F{felix}}
}
@Conference{ Bethge2015_3,
title = {Let's compete: Benchmarking models in neuroscience},
year = {2015},
month = {12},
day = {11},
web_url = {https://users.soe.ucsc.edu/~afletcher/neuralsysnips.html},
event_name = {NIPS 2015 Workshop on Statistical Methods for Understanding Neural Systems},
event_place = {Montréal, Canada},
state = {published},
author = {Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Conference{ Bethge2015_2,
title = {Perceiving Neural Networks},
year = {2015},
month = {11},
day = {30},
abstract = {Let’s compete—benchmarking models in neuroscience: Computational modeling has become increasingly popular in neuroscience but it often lacks a common strategy for model comparison. Following the benchmarking approach ubiquitous in machine learning I will present three problems in neuroscience for which model comparison plays an important role: (1) Predicting where people look, (2) predicting when neurons spike, and (3) generative modeling of natural images. I will conclude with a discussion on the growing importance of Machine Learning in neuroscience and how the increasing proficiency of artificial neural networks in solving perceptual tasks opens exciting new opportunities for interaction between the two fields.},
web_url = {http://learning-systems.org/events/max-planck-eth-center-for-learning-systems-inauguration},
event_name = {Max Planck ETH Center for Learning Systems Inauguration},
event_place = {Tübingen, Germany},
state = {published},
author = {Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Conference{ NonnenmacherBBBM2015,
title = {Correlations and signatures of criticality in neural population models},
year = {2015},
month = {9},
day = {16},
pages = {27-28},
abstract = {Large-scale recording methods make it possible to measure the statistics of neural population activity, and thereby to gain insights into the principles that govern the
collective activity of neural ensembles. One hypothesis that has emerged from this approach is that neural populations are poised at a thermodynamic critical point [1], and that this may have important functional consequences. Support for this hypothesis has come from studies [2,3] that identified signatures of criticality (such as a divergence of the specific heat with population size) in the statistics of neural activity recorded from populations of retinal ganglion cells. What mechanisms can explain these observations? Do they require the neural system to be fine-tuned to be poised at the critical point, or do they robustly emerge in generic circuits [4,5,6]?
We show that indicators for thermodynamic criticality arise in a simple simulation of retinal population activity, and without the need for fine-tuning or adaptation. Using simple statistical models [7], we demonstrate that peak specific heat grows with population size whenever the (average) correlation is independent of the number of
neurons. The latter is always true when uniformly subsampling a large, correlated population. For weakly correlated populations, the rate of divergence of the specific heat is proportional to the correlation strength. This predicts that neural populations would be strongly correlated if they were optimized to maximize specific heat, which is in contrast with theories of efficient coding that make the opposite prediction. Our findings suggest that indicators for thermodynamic criticality might not require an optimized coding strategy, but rather arise as consequence of subsampling a stimulusdriven neural population.},
web_url = {http://www.nncn.de/de/bernstein-conference/2015/program},
event_name = {Bernstein Conference 2015},
event_place = {Heidelberg, Germany},
state = {published},
DOI = {10.12751/nncn.bc2015.0013},
author = {Nonnenmacher M{mnonnenmacher}; Behrens C; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Macke J{jakob}}
}
@Conference{ WallisBW2015,
title = {Metamers of the ventral stream revisited},
journal = {Journal of Vision},
year = {2015},
month = {9},
volume = {15},
number = {12},
pages = {554},
abstract = {Peripheral vision has been characterised as a lossy representation: information present in the periphery is discarded to a greater degree than in the fovea. What information is lost and what is retained? Freeman and Simoncelli (2011) recently revived the concept of metamers (physically different stimuli that look the same) as a way to test this question. Metamerism is a useful criterion, but several details must be refined. First, their paper assessed metamerism using a task with a significant working memory component (ABX). We use a purely spatial discrimination task to probe perceptual encoding. Second, a strong test of any hypothesised representation is to what extent it is metameric for a real scene. Several subsequent studies have misunderstood this to be the result of the paper. Freeman and Simoncelli instead only compared synthetic stimuli to each other. Pairs of stimuli were synthesised from natural images such that they were physically different but equal under the model representation. The experiment then assessed the scaling factor (spatial pooling region as a function of retinal eccentricity) required to make these two synthesised images indiscriminable from one another, finding that these scaling factors approximated V2 receptive field sizes. We find that a smaller scale factor than V2 neurons is required to make the synthesised images metameric for natural scenes (which are also equal under the model). We further show that this varies over images and is modified by including the spatial context of the target patches. While this particular model therefore fails to capture some perceptually relevant information, we believe that testing specific models against the criteria that they should discard as much information as possible while remaining metameric is a useful way to understand perceptual representations psychophysically.},
web_url = {http://jov.arvojournals.org/article.aspx?articleid=2433662},
event_name = {15th Annual Meeting of the Vision Sciences Society (VSS 2015)},
event_place = {St. Pete Beach, FL, USA},
state = {published},
DOI = {10.1167/15.12.554},
author = {Wallis T; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Wichmann F{felix}}
}
@Conference{ Bethge2015,
title = {Understanding biological and artificial neural networks},
year = {2015},
month = {6},
day = {11},
web_url = {http://www.nncn.de/en/news/events/bernstein-sparks-workshop-decision-making},
event_name = {5th Bernstein Sparks Workshop: Neural models of decision making in natural inference tasks - from theory to experiment},
event_place = {Tübingen, Germany},
state = {published},
author = {Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Conference{ GatysETB2015,
title = {Synaptic unreliability facilitates information transmission in balanced cortical populations},
year = {2015},
month = {3},
day = {16},
abstract = {Synaptic unreliability is one of the major sources of biophysical noise in the brain. In the context of neural information processing, it is a central question how neural systems can afford this unreliability. Here we examined how synaptic noise affects signal transmission in cortical circuits, where excitation and inhibition are thought to be tightly balanced. Surprisingly, we found that in this balanced state synaptic response variability actually facilitates information transmission, rather than impairing it. In particular, the transmission of fast-varying signals benefits from synaptic noise, as it instantaneously increases the amount of information shared between presynaptic signal and postsynaptic current. This finding provides a parsimonious explanation why cortex can afford to operate with noisy synapses.},
web_url = {http://www.dpg-verhandlungen.de/year/2015/conference/berlin/part/bp/session/8/contribution/5},
event_name = {79. Jahrestagung der Deutschen Physikalischen Gesellschaft und DPG-Frühjahrstagung},
event_place = {Berlin, Germany},
state = {published},
author = {Gatys LA; Ecker AS{aecker}{Department Physiology of Cognitive Processes}; Tchumatchenko T; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Conference{ FrankeBBRBE2014,
title = {What the mouse eye tells the mouse brain: Fingerprinting the retinal ganglion cell types of the mouse retina},
year = {2014},
month = {10},
day = {15},
volume = {15},
pages = {10},
abstract = {The retinal ganglion cells (RGCs) relay the output of each parallel feature detecting channel established through complex interactions in the retinas two synaptic layers to higher visual centres. Understanding how the visual scenery is encoded by the outputs of the 20 RGC types will thus yield a complete picture of the representation of the visual scene available to the brain. To reliably record from each RGC type in the mouse retina, we bulk-electroporated the tissue with a synthetic calcium indicator (OGB-1) and used two-photon calcium imaging
to record light stimulus-evoked activity at the level of the ganglion cell layer (GCL) (Briggman & Euler, J Neurophysiol 2011). So far, our database contains recordings of >10,000 cells from the GCL. In addition, we obtained recordings from transgenic PV and PCP2 mice, in
which 13 morphologically distinct RGC types are fluorescently labelled and can be identified based on their anatomy (Farrow et al., Neuron 2013; Ivanova et al., J Comp Neuol 2013).
Moreover, we performed electrical single-cell recordings from RGCs to relate their spiking responses to the somatic Ca2+ signals and to compare their morphologies with published RGC catalogues (e.g., Völgyi et al., JCN 2009). We implemented a probabilistic clustering framework for separating RGCs into functional types based on features extracted from their responses to the different visual stimuli using PCA. We employed an automated mixture of
Gaussians Clustering algorithm to cluster the cells based on their physiological properties. Subsequently, clusters were grouped according to genetic labels and morphological criteria (e.g. soma size). For our data, we obtain 35 functional groups, which separate into 25 RGC groups and 10 displaced amacrine cell (dAC) groups, as verified using glutamatedecarboxylase (GAD) immunostaining. These numbers match well the number of RGC and dAC types expected in mouse retina. The RGC types include many known cell types (OFF and ON alpha, W3, ON-OFF direction-selective), as verified using our genetic label and single cell data (e.g. alpha RGCs) and additional information available (e.g. soma size/shape and retinal tiling). In addition, they include new functional RGC types, such as (1) an OFF orientation selective RGC, (2) an ON transient DS RGC with single cardinal direction and, (3) a contrast-suppressed type. Our results suggest that a functional fingerprint for each RGC
in the mouse retina is within reach.},
web_url = {http://www.neuroschool-tuebingen-nena.de/fileadmin/user_upload/Dokumente/neuroscience/Abstractbook_NeNa2014_final.pdf},
event_name = {15th Conference of Junior Neuroscientists of Tübingen (NeNa 2014)},
event_place = {Schramberg, Germany},
state = {published},
author = {Franke K; Baden T; Berens P{berens}{Research Group Computational Vision and Neuroscience}; Rezac M; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Euler T}
}
@Conference{ GatysETB2014_2,
title = {Synaptic unreliability facilitates information transmission in balanced cortical populations},
year = {2014},
month = {10},
day = {13},
volume = {15},
pages = {11},
abstract = {Cortical neurons fire in a highly irregular manner, suggesting that their input is tightly balanced and changes in presynaptic firing rate are encoded primarily in the variance of the postsynaptic currents. Here we show that such balance has a surprising effect on information
transmission: Synaptic unreliability which is ubiquitous in cortex and usually thought to impair neural communication actually increases the information rate. We show that the
beneficial effect of noise is based on a very general mechanism which contrary to stochastic resonance does not rely on a threshold nonlinearity.},
web_url = {http://www.neuroschool-tuebingen-nena.de/fileadmin/user_upload/Dokumente/neuroscience/Abstractbook_NeNa2014_final.pdf},
event_name = {15th Conference of Junior Neuroscientists of Tübingen (NeNa 2014)},
event_place = {Schramberg, Germany},
state = {published},
author = {Gatys L; Ecker A{aecker}{Department Physiology of Cognitive Processes}; Tchumatchenko T; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Conference{ GatysETB2014,
title = {Synaptic unreliability facilitates information transmission in
balanced cortical populations},
year = {2014},
month = {9},
day = {4},
pages = {21},
abstract = {Cortical neurons fire in a highly irregular manner, suggesting that their input is tightly balanced and changes in presynaptic firing rate are encoded primarily in the variance of the postsynaptic currents. Here we show that such balance has a surprising effect on information transmission: Synaptic unreliability – which is ubiquitous in cortex and usually thought to impair neural communication – actually increases the information rate. We show that the beneficial effect of noise is based on a very general mechanism which contrary to stochastic resonance does not rely on a threshold nonlinearity.},
web_url = {http://abstracts.g-node.org/abstracts/65d2bbbf-5b2d-4570-8200-f994f190e9ca},
event_name = {Bernstein Conference 2014},
event_place = {Göttingen, Germany},
state = {published},
DOI = {10.12751/nncn.bc2014.0017},
author = {Gatys LA; Ecker AS{aecker}{Department Physiology of Cognitive Processes}; Tchumatchenko T; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Conference{ Bethge2014_2,
title = {Natural image statistics & neural representation learning},
year = {2014},
month = {9},
abstract = {n important motivation for studying the statistics of natural images is the search for image representations which facilitate visual inference tasks. Representations optimized directly for a given task are at risk of overfitting, that is, the representations might work well for that particular task but might not generalize well to others. However, the striking ability of our visual system to perform well in a variety of different situations and to recognize objects even when they have been seen only once suggests that it exploits general structural regularities of natural images. In this lecture, I will give an overview on natural image statistics and how different types of representations have been derived by modeling different statistical properties of natural images.},
web_url = {http://www.mis.mpg.de/calendar/conferences/2014/al/abstracts.html},
event_name = {Autonomous Learning: Summer School 2014},
event_place = {Leipzig, Germany},
state = {published},
author = {Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Conference{ Bethge2013,
title = {Beyond GLMs: A generative mixture modeling approach to neural system identification},
year = {2013},
month = {10},
day = {16},
web_url = {http://grossmancenter.columbia.edu/workshop-2013.html},
event_name = {Columbia University: Workshop on Quantifying Structure in Large Neural Datasets},
event_place = {New York, NY, USA},
state = {published},
author = {Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Conference{ Bethge2012,
title = {The unsolved mystery of neural information processing: taming the curse of dimensionality},
year = {2012},
month = {6},
web_url = {http://2012.occam-os.de/videos.html},
event_name = {Osnabrück Computational Cognition Alliance Meeting on "The Brain as an Information Processing System" (OCCAM 2012)},
event_place = {Osnabrück, Germany},
state = {published},
author = {Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Conference{ GerhardWB2012,
title = {Sensitivity to local higher-order correlations in natural images},
journal = {Perception},
year = {2011},
month = {9},
volume = {40},
number = {ECVP Abstract Supplement},
pages = {18},
abstract = {We measured perceptual sensitivity to higher-order correlational structure of natural images using a new paradigm, with which we also evaluated the efficacy of several successful natural image models that reproduce neural response properties of the visual system. To measure sensitivity to local correlations in natural images, stimuli were square textures of tightly tiled small image patches originating from either: (i) natural scene photographs or (ii) a model. In a trial, observers viewed both texture types and had to select the one made of natural image patches. In a series of experiments with twenty-two subjects, we tested 7 models, varying patch size from 3×3 to 8×8 pixels. Results indicate high sensitivity to local higher-order correlations in natural images: no current model fools the human eye for patches 5×5 pixels or larger, and only the model with the highest likelihood brings performance near chance when patches are 4×4 pixels or smaller. Remarkably, the ordering of the psychophysical matched the models' ordering in likelihood of capturing natural image regularities. The subjects' performance on binarzed textures approached ideal observer efficiency, where the ideal observer has perfect knowledge of the natural image distribution. In four control experiments, we determined the knowledge observers use to detect higher-order correlations.},
web_url = {http://pec.sagepub.com/content/40/1_suppl.toc},
event_name = {34th European Conference on Visual Perception},
event_place = {Toulouse, France},
state = {published},
DOI = {10.1177/03010066110400S102},
author = {Gerhard H{hgerhard}{Research Group Computational Vision and Neuroscience}; Wichmann F{felix}{Department Empirical Inference}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Conference{ HafnerB2014,
title = {Evaluating neuronal codes for inference using Fisher information},
journal = {Frontiers in Computational Neuroscience},
year = {2010},
month = {10},
volume = {2010},
number = {Conference Abstract: Bernstein Conference on Computational Neuroscience},
abstract = {Many studies have explored the impact of response variability on the quality of sensory codes. The source of this variability is almost always assumed to be intrinsic to the brain. However, when inferring a particular stimulus property, variability associated with other stimulus attributes also effectively acts as noise. Here we explore the impact of such stimulus-induced response variability for two model cases: a) binocular disparity inference and b) orientation discrimination. We characterize the response distribution for the energy model in response to random dot stereograms and to displays of oriented random dots.For the case of bincular disparity processing we find the response distribution to be very different from the Poisson-like noise usually assumed. We compute the Fisher information with respect to binocular disparity, present in the monocular inputs to the standard model of early binocular processing, and thereby obtain an upper bound on how much information a model could theoretically extract from them. Then we analyze the information loss incurred by the different ways of combining those inputs to produce a scalar single-neuron response. We find that in the case of depth inference, monocular stimulus variability places a greater limit on the extractable information than intrinsic neuronal noise for typical spike counts. Furthermore, the largest loss of information is incurred by the standard model for position disparity neurons (tuned-excitatory), that are the most ubiquitous in monkey primary visual cortex, while more information from the inputs is preserved in phase-disparity neurons (tuned-near or tuned-far) primarily found in higher cortical regions.},
web_url = {http://www.frontiersin.org/10.3389/conf.fncom.2010.51.00069/event_abstract?sname=Bernstein_Conference_on_Computational_Neuroscience_1},
event_name = {Bernstein Conference on Computational Neuroscience (BCCN 2010)},
event_place = {Berlin, Germany},
state = {published},
DOI = {10.3389/conf.fncom.2010.51.00069},
author = {Haefner R{rhaefner}{Research Group Computational Vision and Neuroscience}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Conference{ GerwinnMB2010,
title = {Toolbox for inference in generalized linear models of spiking neurons},
journal = {Frontiers in Computational Neuroscience},
year = {2010},
month = {10},
volume = {2010},
number = {Conference Abstract: Bernstein Conference on Computational Neuroscience},
abstract = {Generalized linear models are increasingly used for analyzing neural data, and to characterize the stimulus dependence and functional connectivity of both single neurons and neural populations. One possibility to extend the computational complexity of these models is to expand the stimulus, and possibly the representation of the spiking history into high dimensional feature spaces.
When the dimension of the parameter space is large, strong regularization has to be used in order to fit GLMs to datasets of realistic size without overfitting. By imposing properly chosen priors over parameters, Bayesian inference provides an effective and principled approach for achieving regularization.
In this work, we present a MATLAB toolbox which provides efficient inference methods for parameter fitting. This includes standard maximum a posteriori estimation for Gaussian and Laplacian prior, which is also sometimes referred to as L1- and L2-reguralization. Furthermore, it implements approximate inference techniques for both prior distributions based on the expectation propagation algorithm [1].
In order to model the refractory property and functional couplings between neurons, the spiking history within a population is often represented as responses to a set of predefined basis functions. Most of the basis function sets used so far, are non-orthogonal. Commonly priors are specified without taking the properties of the basis functions into account (uncorrelated Gauss, independent Laplace). However, if basis functions overlap, the coefficients are correlated. As an example application of this toolbox, we analyze the effect of independent prior distributions, if the set of basis functions are non-orthogonal and compare the performance to the orthogonal setting.},
web_url = {http://www.frontiersin.org/10.3389/conf.fncom.2010.51.00091/event_abstract?sname=Bernstein_Conference_on_Computational_Neuroscience_1},
event_name = {Bernstein Conference on Computational Neuroscience (BCCN 2010)},
event_place = {Berlin, Germany},
state = {published},
DOI = {10.3389/conf.fncom.2010.51.00091},
author = {Gerwinn S{sgerwinn}{Research Group Computational Vision and Neuroscience}; Macke JH{jakob}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Conference{ HafnerGMB2010,
title = {Implications of correlated neuronal noise in decision making circuits for physiology and behavior},
journal = {Frontiers in Neuroscience},
year = {2010},
month = {2},
volume = {Conference Abstract: Computational and Systems Neuroscience 2010},
abstract = {Understanding how the activity of sensory neurons contribute to perceptual decision making is one of the major questions in neuroscience. In the current standard model, the output of opposing pools of noisy, correlated sensory neurons is integrated by downstream neurons whose activity elicits a decision-dependent behavior [1][2]. The predictions of the standard model for empirical measurements like choice probability (CP), psychophysical kernel (PK) and reaction time distribution crucially depend on the spatial and temporal correlations within the pools of sensory neurons. This dependency has so far only been investigated numerically and for time-invariant correlations and variances. However, it has recently been shown that the noise variance undergoes significant changes over the course of the stimulus presentation [3]. The same is true for inter-neuronal correlations that have been shown to change with task and attentional state [4][5]. In the first part of our work we compute analytically the time course of CPs and PKs in the presence of arbitrary noise correlations and variances for the case of non-leaky integration and Gaussian noise. This allows general insights and is especially needed in the light of the experimental transition from single-cell to multi-cell recordings. Then we simulate the implications of realistic noise in several variants of the standard model (leaky and non-leaky integration, integration over the entire stimulus presentation or until a bound, with and without urgency signal) and compare them to physiological data. We find that in the case of non-leaky integration over the entire stimulus duration, the PK only depends on the overall level of noise variance, not its time course. That means that the PK remains constant regardless of the temporal changes in the noise. This finding supports an earlier conclusion that an observed decreasing PK suggests that the brain is not integrating over the entire stimulus duration but only until it has accumulated sufficient evidence, even in the case of no urgency [6]. The time course of the CP, on the other hand, strongly depends on the time course of the noise variances and on the temporal and interneuronal correlations. If noise variance or interneuronal correlation increases, CPs increase as well. This dissociation of PK and CP allows an alternative solution to the puzzle recently posed by [7] in a bottom-up framework by combining integration to a bound with an increase in noise variance/correlation. In addition, we derive how the distribution of reaction times depends on noise variance and correlation, further constraining the model using empirical observations.},
web_url = {http://www.frontiersin.org/10.3389/conf.fnins.2010.03.00023/event_abstract},
event_name = {Computational and Systems Neuroscience Meeting (COSYNE 2010)},
event_place = {Salt Lake City, UT, USA},
state = {published},
DOI = {10.3389/conf.fnins.2010.03.00023},
author = {Haefner R{rhaefner}{Research Group Computational Vision and Neuroscience}; Gerwinn S{sgerwinn}{Department Empirical Inference}{Research Group Computational Vision and Neuroscience}; Macke J{jakob}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Conference{ BerensGEB2009,
title = {Neurometric function analysis of short-term population codes},
journal = {Frontiers in Computational Neuroscience},
year = {2009},
month = {10},
day = {1},
volume = {2009},
number = {Conference Abstract: Bernstein Conference on Computational Neuroscience},
pages = {24-25},
abstract = {The relative merits of different population coding schemes have mostly been studied in the framework of stimulus reconstruction using Fisher Information, minimum mean square error or mutual information. Here, we analyze neural population codes using the minimal discrimination error (MDE) and the Jensen-Shannon information in a two alternatives forced choice (2AFC) task. In a certain sense, this approach is more informative than the previous ones as it defines an error that is specific to any pair of possible stimuli - in particular, it includes Fisher Information as a special case. We demonstrate several advantages of the minimal discrimination error: (1) it is very intuitive and easier to compare to experimental data, (2) it is easier to compute than mutual information or minimum mean square error, (3) it allows studying assumption about prior distributions, and (4) it provides a more reliable assessment of coding accuracy than Fisher information.
First, we introduce the Jensen-Shannon information and explain how it can be used to bound the MDE. In particular, we derive a new lower bound on the minimal discrimination error that is tighter than previous ones. Also, we explain how Fisher information can be derived from the Jensen-Shannon information and conversely to what extent Fisher information can be used to predict the minimal discrimination error for arbitrary pairs of stimuli depending on the properties of the tuning functions.
Second, we use the minimal discrimination error to study population codes of angular variables. In particular, we assess the impact of different noise correlations structures on coding accuracy in long versus short decoding time windows. That is, for long time window we use the common Gaussian noise approximation while we analyze the Ising model with identical noise correlation structure to address the case of short time windows. As an important result, we find that the beneficial effect of stimulus dependent correlations in the absence of 'limited-range' correlations holds only true for long-term population codes while they provide no advantage in case of short decoding time windows.
In this way, we provide for a new rigorous framework for assessing the functional consequences of correlation structures for the representational accuracy of neural population codes in short time scales.},
web_url = {http://www.frontiersin.org/10.3389/conf.neuro.10.2009.14.093/event_abstract},
event_name = {Bernstein Conference on Computational Neuroscience (BCCN 2009)},
event_place = {Frankfurt a.M., Germany},
state = {published},
DOI = {10.3389/conf.neuro.10.2009.14.093},
author = {Berens P{berens}{Research Group Computational Vision and Neuroscience}; Gerwinn S{sgerwinn}{Department Empirical Inference}{Research Group Computational Vision and Neuroscience}; Ecker AS{aecker}{Department Physiology of Cognitive Processes}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Conference{ Bethge2008_3,
title = {How Much More Does V1 Know About the Statistics of Natural
Images Than the Retina?},
year = {2008},
month = {11},
day = {28},
web_url = {https://fias.uni-frankfurt.de/historical/MLInVision_WorkshopProgram.pdf},
event_name = {Workshop Machine Learning Approaches to Representational Learning and Recognition in Vision},
event_place = {Frankfurt a.M., Germany},
state = {published},
author = {Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Conference{ Bethge2008_4,
title = {How much more does V1 know about the statistics of natural images than the retina?},
year = {2008},
month = {11},
day = {8},
abstract = {It has long been assumed that sensory neurons are adapted, through both evolutionary and developmental processes, to the statistical properties of the signals to which they are exposed. In particular, Attneave (1954) and Barlow (1961) proposed that redundancy reduction could provide a link between environmental statistics and neural responses in a similar spirit to projection pursuit density estimation. A striking result related to this view is that three key features of V1 simple cell receptive fields – localization, bandpass filtering, and orientation selectivity – emerge if one maximizes statistical independence (i.e. minimizes the redundancy) of linear filters in response to natural images with an algorithm known as Independent Component Analysis (ICA). In addition, also the nonlinear filter property of divisive normalization has been interpreted as a mechanism for redundancy reduction. In order to quantitatively assess the potential of these prominent neural response properties for modeling the statistics of natural images we evaluate their relative contribution to the total amount of redundancy reduction. We find that bandpass filtering has the largest potential for redundancy reduction, followed by divisive normalization. Localization and orientation selectivity turn out to have only a surprisingly small potential for redundancy reduction. We conclude that the common model for V1 simple cells is not flexible enough to implement significantly more knowledge about the statistics of natural images than what can already be modeled at the level of the retina.},
web_url = {https://www.bccn-berlin.de/Calendar/Talks/talk/?contentId=1510},
event_name = {Bernstein Center for Computational Neuroscience Workshop: Models of Early Visual Processing},
event_place = {Berlin, Germany},
state = {published},
author = {Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Conference{ MackeOB2008,
title = {How pairwise correlations aect the redundancy in large
populations of neurons},
journal = {Frontiers in Computational Neuroscience},
year = {2008},
month = {10},
volume = {2008},
number = {Conference Abstract: Bernstein Symposium 2008},
web_url = {http://www.frontiersin.org/community/AbstractDetails.aspx?ABS_DOI=10.3389/conf.neuro.10.2008.01.086&eid=108&sname=Bernstein_Symposium_2008},
event_name = {Bernstein Symposium 2008},
event_place = {München, Germany},
state = {published},
DOI = {10.3389/conf.neuro.10.2008.01.086},
author = {Macke JH{jakob}; Opper M; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Conference{ Bethge2008_2,
title = {Hoch much can bandpass filtering, orientation selectivity,
and divisive normalization contribute to the reduction of
redundancy in natural images?},
year = {2008},
month = {9},
day = {4},
web_url = {http://ikw.uni-osnabrueck.de/~NBP/PDFs_Publications/Delmenhorst_Programm_040908.pdf},
event_name = {International Workshop: Aspects of Adaptive Cortex Dynamics},
event_place = {Delmenhorst, Germany},
state = {published},
author = {Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Conference{ Bethge2008,
title = {Sensory coding of natural images: bandpass filtering, orientation selectivity and contrast gain control},
year = {2008},
month = {7},
day = {30},
web_url = {http://www.grc.org/programs.aspx?year=2008&program=senscod},
event_name = {Gordon Research Conference: Sensory Coding & The Natural Environment 2008},
event_place = {Lucca, Italy},
state = {published},
author = {Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Conference{ 5408,
title = {Estimating receptive fields without spike-triggering},
year = {2007},
month = {11},
volume = {37},
number = {768.1},
abstract = {The prevalent means of characterizing stimulus selectivity in sensory neurons is to estimate their receptive field properties such as orientation selectivity. Receptive fields are usually derived from the mean (or covariance) of the spike-triggered stimulus ensemble.
This approach treats each spike as an independent message but ignores the possibility that information might be conveyed through patterns of neural activity that are distributed across space or time.
In the retina for example, visual stimuli are analyzed by several parallel channels with different spatiotemporal filtering properties. How can we define the receptive field of a whole population of neurons, not just a single neuron?
Imaging methods (such as voltage-sensitive dye imaging) yield measurements of neural activity that do not contain spiking events at all. How can receptive fields be derived from this kind of data?
Even for single neurons, there is evidence that multiple features of the neural response, for example spike patterns or latencies, can carry information. How can these features be taken into account in the estimation process?
Here, we address the question of how receptive fields can be calculated from such distributed representations. We seek to identify those stimulus features and the corresponding patterns of neural activity that are most reliably coupled, as measured by the mutual information between the two signals. As an efficient implementation of this strategy, we use an extension of reverse-correlation methods based on canonical correlation analysis [1]. We evaluate our approach using both simulated data and multi-electrode recordings from rabbit retinal ganglion cells [2]. In addition, we show how the model can be extended to capture nonlinear stimulus-response relationships and to test different coding mechanisms using kernel canonical correlation analysis [3].},
web_url = {http://www.sfn.org/am2007/},
event_name = {37th Annual Meeting of the Society for Neuroscience (Neuroscience 2007)},
event_place = {San Diego, CA, USA},
state = {published},
author = {Macke JH{jakob}; Zeck G{gzeck}; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Conference{ BethgeE2007,
title = {Linking V1 receptive field properties to optimal coding principles},
year = {2007},
month = {11},
volume = {37},
number = {768.6},
abstract = {Redundancy reduction has been proposed as a principle underlying the self-organization of neural representations at the early stages of sensory processing [1]. In particular, principal component analysis (PCA), symmetric whitening (SWH) and independent component analysis (ICA) have been studied as parsimonious redundancy reduction models. When applied to data sets of natural image patches second-order decorrelation methods such as PCA and SWH do not yield localized, oriented, and bandpass filter shapes. These striking properties of V1 simple cell receptive fields, however, can be derived with ICA because of its additional minimization of higher-order correlations. While this finding is intriguing, the structure of the higher-order correlations encountered in ICA is not well understood and their use for sensory coding remains elusive.
Previous studies [2-4] have tried to quantify the difference in coding efficiency between the orientation selective ICA filters and those derived with second-order decorrelation methods. Due to the different methods used, these studies yielded differing results for the coding gain of ICA. In a comprehensive study we included all the previous approaches by measuring the expected log-likelihood, the multi-information, as well as rate-distortion curves for both gray-level and color images. Without exception, we find that the advantage of ICA is very small. We further corroborate and explain this finding by showing that a spherical symmetric distribution can fit the data even better than the ICA model. For this model all filter shapes are equally well suited since the distribution is invariant under arbitrary orthogonal transforms. In conclusion, more sophisticated models are necessary to explain V1 receptive field properties in terms of optimal coding principles.},
web_url = {http://www.sfn.org/am2007/},
event_name = {37th Annual Meeting of the Society for Neuroscience (Neuroscience 2007)},
event_place = {San Diego, CA, USA},
state = {published},
author = {Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Eichhorn J{je}{Department Empirical Inference}{Research Group Computational Vision and Neuroscience}}
}
@Conference{ 4669,
title = {Do We Know What the Early Visual System Computes?},
journal = {Neuroforum},
year = {2007},
month = {4},
volume = {13},
number = {Supplement},
pages = {352},
abstract = {Decades of research provided much data and insights into the mechanisms of the early visual system.
Currently, however, there is great controversy on whether these findings can provide us with a thorough
functional understanding of what the early visual system does, or formulated differently, of what it computes.
At the Society for Neuroscience meeting 2005 in Washington, a symposium was held on the question "Do we
know that the early visual system does", which was accompanied by a widely regarded publication in the
Journal of Neuroscience. Yet, that discussion was rather specialized as it predominantly addressed the
question of how well neural responses in retina, LGN, and cortex can be predicted from noise stimuli, but did
not emphasize the question of whether we understand what the function of these early visual areas is. Here we
will concentrate on this neuro-computational aspect of vision. Experts from neurobiology, psychophysics and
computational neuroscience will present studies which approach this question from different viewpoints and promote a critical discussion of whether we actually understand what early areas contribute to the processing and perception of visual information.},
web_url = {http://nwg.glia.mdc-berlin.de/media/pdf/conference/Proceedings-Goettingen2007.pdf},
event_name = {7th Meeting of the German Neuroscience Society, 31st Göttingen Neurobiology Conference},
event_place = {Göttingen, Germany},
state = {published},
author = {Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Kayser C{kayser}{Department Physiology of Cognitive Processes}}
}
@Conference{ GerwinnSZB2006,
title = {Bayesian Neural System identification: error bars,
receptive fields and neural couplings},
year = {2006},
month = {11},
volume = {7},
pages = {9},
abstract = {The task of system identification lies at the heart of neural data analysis. Bayesian system identification methods provide a powerful toolbox which allows one to make inferences over stimulus-neuron and neuron-neuron dependencies in a principled way. Rather than reporting only
the most likely parameters, the posterior distribution obtained in the Bayesian approach informs us about the range of parameter values that are consistent with the observed data and the assumptions made. In other words, Bayesian receptive fields always come with error bars. Since the amount of data from neural recordings is limited, the error bars are as important as the receptive field itself.
Here we apply a recently developed approximation of Bayesian inference to a multi-cell response model consisting of a set of coupled units, each of which being a Linear-Nonlinear-Poisson (LNP) cascade neuron model. The instantaneous firing rate of each unit depends multiplicatively on both the spike train history of the units and the stimulus. Parameter fitting in this model has been shown to be a convex optimization problem (Paninski 2004) that can be solved efficiently, scaling linearly in the number of events, neurons and history-size. By doing inference in such a model one can estimate excitatory and inhibitory interactions between the neurons and the dependence of the stimulus. In addition, the Bayesian framework allows one not only to put error bars on the inferred parameter values but also to quantify the predictive power of the model in terms of the marginal likelihood.
As a sanity check of the new technique, and also to explore its limitations, we first verify for artificially generated data that we are able to infer the true underlying model. Then we apply the method to recordings from retinal ganglion cells (RGC) responding to white noise (m-sequence) stimulation. The figure shows both the inferred receptive fields (lower) as well as the confidence range of the sorted pixel values (upper) when using a different fraction of the data (0,10,50, and 100 %). We also compare the results with the receptive fields derived with classical linear correlation analysis and maximum likelihood estimation.},
event_name = {7th Conference of the Junior Neuroscientists of Tübingen (NeNa 2006)},
event_place = {Oberjoch, Germany},
state = {published},
author = {Gerwinn S{sgerwinn}{Department Empirical Inference}{Research Group Computational Vision and Neuroscience}; Seeger M{seeger}{Department Empirical Inference}; Zeck G; Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Conference{ Bethge2006,
title = {Factorial coding of natural images: How effective are linear filters in removing higher-order dependencies?},
year = {2006},
month = {11},
volume = {7},
pages = {17},
abstract = {Our todays understanding of how neurons in the early visual system respond to the light intensity patterns on the retina can be described basically in terms of firing rates and linear filtering (plus pointwise nonlinearities). It has been suggested that the purpose of this filtering is to represent the retinal image by the activition pattern of statistically less dependent features (i. e. redundancy reduction). In particular, the filters found with independent component analysis (ICA) for natural images resemble important properties of simple cells in striate cortex: they are localized, oriented, and bandpass.
In contrast to the many possible second-order decorrelation transforms, ICA returns a unique answer by additionally optimizing for higher-order correlations. However, it has never been tested quantitatively how large the additional gain of ICA is compared with second-order methods. Here, we estimate the gain in statistical independence (the multi-information reduction) achieved with ICA, principal component analysis (PCA), zero-phase whitening, and predictive coding. A randomly sampled whitening basis and the Haar wavelet are included into the comparison as well. The comparison of all these methods is carried out for different patch sizes, ranging from 2x2 to 16x16 pixels. In spite of large differences in the shape of the basis functions, we find only small differences in the multi-information between all decorrelation transforms (5\% or less) for all patch sizes. Among the second-order methods, PCA is optimal for small patch sizes and predictive coding performs best for large patch sizes. In summary, the `edge filters' found with ICA lead only to a surprisingly small improvement in terms of its actual objective, and we conclude that a restriction to linear filtering does not line up well with the idea of higher-order decorrelation. In addition, psychophysical data is presented which further corroborates this conclusion.},
event_name = {7th Conference of the Junior Neuroscientists of Tübingen (NeNa 2006)},
event_place = {Oberjoch, Germany},
state = {published},
author = {Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Conference{ Bethge2006_2,
title = {What is the goal of early visual processing?},
year = {2006},
month = {10},
day = {25},
abstract = {Translating the information carried by the retinal image into a more useful representation is generally thought of as being an important goal of early processing in biological vision. During the last decade many models have been proposed which aim at deriving filters whose shapes resemble prominent properties of receptive fields in the early visual pathways. The filters in these models are
determined by optimizing a certain objective function. While the use of optimalityprinciples seems to imply a superior performance of these filters for scene analysis
and object recognition, it lacks thorough verification of this supposition. Minimization of the statistical higher-order dependencies between the filter outputs
(ICA) has been used to derive localized, oriented, and bandpass filters, resembling the receptive fields of simple cells in V1. A quantitative analysis of the reduction of
statistical dependencies achieved with this model, however, reveals only a small improvement in comparison with arbitrary second-order decorrelation filters. In addition, I will present psychophysical results, showing that, perceptually, the independent components of natural images exhibit even more dependencies than the non-localized basis functions of the discrete cosine transform used in image
compression. Finally, I will present a slightly different objective which similarly leads to localized, oriented, and bandpass image filters but rather seeks to divide the sensory information into different clusters of similar image content.},
web_url = {http://cogsci.uni-osnabrueck.de/~NBP/News.html},
event_name = {Universität Osnabrück: Institute of Cognitive Science Colloquium},
event_place = {Osnabrück, Germany},
state = {published},
author = {Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}}
}
@Patent{ 5492,
title = {Method and Device for Image Compression},
year = {2009},
month = {12},
number = {WO/2009/146933},
abstract = {A method for compressing a digital image comprises the steps of:selecting an image patch of the digital image; assigning the selected image patch to a specific class (z); transforming the image patch, with a pre-determined class-specific transformation function; and quantizing the transformed image patch.},
web_url = {http://www.wipo.int/pctdb/en/wo.jsp?WO=2009146933},
state = {published},
author = {Bethge M{mbethge}{Research Group Computational Vision and Neuroscience}; Hosseini R{hosseini}{Research Group Computational Vision and Neuroscience}}
}